diff --git a/.gitignore b/.gitignore index 39723909c71..aa62a9fad71 100644 --- a/.gitignore +++ b/.gitignore @@ -37,6 +37,7 @@ _testmain.go /src/cmd/internal/objabi/zbootstrap.go /src/go/build/zcgo.go /src/go/doc/headscan +/src/internal/buildcfg/zbootstrap.go /src/runtime/internal/sys/zversion.go /src/unicode/maketables /test.out diff --git a/AUTHORS b/AUTHORS index 4828ba36cc6..48ce71f4cc6 100644 --- a/AUTHORS +++ b/AUTHORS @@ -145,7 +145,7 @@ Andy Davis Andy Finkenstadt Andy Lindeman Andy Maloney -Andy Pan +Andy Pan Andy Walker Anfernee Yongkun Gui Angelo Bulfone @@ -195,7 +195,7 @@ Ayanamist Yang Aymerick Jéhanne Azat Kaumov Baiju Muthukadan -Baokun Lee +Baokun Lee Bartosz Grzybowski Bastian Ike Ben Burkert @@ -1425,6 +1425,7 @@ Wèi Cōngruì Wei Fu Wei Guangjing Weichao Tang +Weixie Cui <523516579@qq.com> Wembley G. Leach, Jr Will Faught Will Storey diff --git a/CONTRIBUTORS b/CONTRIBUTORS index ccbe4627f38..6c7262adb1f 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -240,7 +240,7 @@ Andy Davis Andy Finkenstadt Andy Lindeman Andy Maloney -Andy Pan +Andy Pan Andy Walker Andy Wang Andy Williams @@ -321,7 +321,7 @@ Azat Kaumov Baiju Muthukadan Balaram Makam Balazs Lecz -Baokun Lee +Baokun Lee Barnaby Keene Bartosz Grzybowski Bartosz Oler @@ -466,7 +466,7 @@ Charlotte Brandhorst-Satzkorn Chauncy Cullitan Chen Zhidong Chen Zhihan -Cherry Zhang +Cherry Mui Chew Choon Keat Chiawen Chen Chirag Sukhala @@ -2526,6 +2526,7 @@ Wei Guangjing Wei Xiao Wei Xikai Weichao Tang +Weixie Cui <523516579@qq.com> Wembley G. Leach, Jr Wenlei (Frank) He Wenzel Lowe diff --git a/api/except.txt b/api/except.txt index 6f6f839ba60..14fe7785fa5 100644 --- a/api/except.txt +++ b/api/except.txt @@ -1,4 +1,7 @@ pkg encoding/json, method (*RawMessage) MarshalJSON() ([]uint8, error) +pkg math, const MaxFloat64 = 1.79769e+308 // 179769313486231570814527423731704356798100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +pkg math, const SmallestNonzeroFloat32 = 1.4013e-45 // 17516230804060213386546619791123951641/12500000000000000000000000000000000000000000000000000000000000000000000000000000000 +pkg math, const SmallestNonzeroFloat64 = 4.94066e-324 // 4940656458412465441765687928682213723651/1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 pkg math/big, const MaxBase = 36 pkg math/big, type Word uintptr pkg net, func ListenUnixgram(string, *UnixAddr) (*UDPConn, error) diff --git a/api/next.txt b/api/next.txt index e69de29bb2d..9e996005c62 100644 --- a/api/next.txt +++ b/api/next.txt @@ -0,0 +1,99 @@ +pkg compress/lzw, method (*Reader) Close() error +pkg compress/lzw, method (*Reader) Read([]uint8) (int, error) +pkg compress/lzw, method (*Reader) Reset(io.Reader, Order, int) +pkg compress/lzw, method (*Writer) Close() error +pkg compress/lzw, method (*Writer) Reset(io.Writer, Order, int) +pkg compress/lzw, method (*Writer) Write([]uint8) (int, error) +pkg compress/lzw, type Reader struct +pkg compress/lzw, type Writer struct +pkg crypto/tls, method (*CertificateRequestInfo) Context() context.Context +pkg crypto/tls, method (*ClientHelloInfo) Context() context.Context +pkg crypto/tls, method (*Conn) HandshakeContext(context.Context) error +pkg debug/elf, const SHT_MIPS_ABIFLAGS = 1879048234 +pkg debug/elf, const SHT_MIPS_ABIFLAGS SectionType +pkg encoding/csv, method (*Reader) FieldPos(int) (int, int) +pkg go/ast, method (*FuncDecl) IsMethod() bool +pkg go/build, type Context struct, ToolTags []string +pkg go/parser, const SkipObjectResolution = 64 +pkg go/parser, const SkipObjectResolution Mode +pkg go/types, type Config struct, GoVersion string +pkg io/fs, func FileInfoToDirEntry(FileInfo) DirEntry +pkg net, method (*ParseError) Temporary() bool +pkg net, method (*ParseError) Timeout() bool +pkg net, method (IP) IsPrivate() bool +pkg reflect, func VisibleFields(Type) []StructField +pkg reflect, method (Method) IsExported() bool +pkg reflect, method (StructField) IsExported() bool +pkg runtime/cgo (darwin-amd64-cgo), func NewHandle(interface{}) Handle +pkg runtime/cgo (darwin-amd64-cgo), method (Handle) Delete() +pkg runtime/cgo (darwin-amd64-cgo), method (Handle) Value() interface{} +pkg runtime/cgo (darwin-amd64-cgo), type Handle uintptr +pkg runtime/cgo (freebsd-386-cgo), func NewHandle(interface{}) Handle +pkg runtime/cgo (freebsd-386-cgo), method (Handle) Delete() +pkg runtime/cgo (freebsd-386-cgo), method (Handle) Value() interface{} +pkg runtime/cgo (freebsd-386-cgo), type Handle uintptr +pkg runtime/cgo (freebsd-amd64-cgo), func NewHandle(interface{}) Handle +pkg runtime/cgo (freebsd-amd64-cgo), method (Handle) Delete() +pkg runtime/cgo (freebsd-amd64-cgo), method (Handle) Value() interface{} +pkg runtime/cgo (freebsd-amd64-cgo), type Handle uintptr +pkg runtime/cgo (freebsd-arm-cgo), func NewHandle(interface{}) Handle +pkg runtime/cgo (freebsd-arm-cgo), method (Handle) Delete() +pkg runtime/cgo (freebsd-arm-cgo), method (Handle) Value() interface{} +pkg runtime/cgo (freebsd-arm-cgo), type Handle uintptr +pkg runtime/cgo (linux-386-cgo), func NewHandle(interface{}) Handle +pkg runtime/cgo (linux-386-cgo), method (Handle) Delete() +pkg runtime/cgo (linux-386-cgo), method (Handle) Value() interface{} +pkg runtime/cgo (linux-386-cgo), type Handle uintptr +pkg runtime/cgo (linux-amd64-cgo), func NewHandle(interface{}) Handle +pkg runtime/cgo (linux-amd64-cgo), method (Handle) Delete() +pkg runtime/cgo (linux-amd64-cgo), method (Handle) Value() interface{} +pkg runtime/cgo (linux-amd64-cgo), type Handle uintptr +pkg runtime/cgo (linux-arm-cgo), func NewHandle(interface{}) Handle +pkg runtime/cgo (linux-arm-cgo), method (Handle) Delete() +pkg runtime/cgo (linux-arm-cgo), method (Handle) Value() interface{} +pkg runtime/cgo (linux-arm-cgo), type Handle uintptr +pkg runtime/cgo (netbsd-386-cgo), func NewHandle(interface{}) Handle +pkg runtime/cgo (netbsd-386-cgo), method (Handle) Delete() +pkg runtime/cgo (netbsd-386-cgo), method (Handle) Value() interface{} +pkg runtime/cgo (netbsd-386-cgo), type Handle uintptr +pkg runtime/cgo (netbsd-amd64-cgo), func NewHandle(interface{}) Handle +pkg runtime/cgo (netbsd-amd64-cgo), method (Handle) Delete() +pkg runtime/cgo (netbsd-amd64-cgo), method (Handle) Value() interface{} +pkg runtime/cgo (netbsd-amd64-cgo), type Handle uintptr +pkg runtime/cgo (netbsd-arm-cgo), func NewHandle(interface{}) Handle +pkg runtime/cgo (netbsd-arm-cgo), method (Handle) Delete() +pkg runtime/cgo (netbsd-arm-cgo), method (Handle) Value() interface{} +pkg runtime/cgo (netbsd-arm-cgo), type Handle uintptr +pkg runtime/cgo (netbsd-arm64-cgo), func NewHandle(interface{}) Handle +pkg runtime/cgo (netbsd-arm64-cgo), method (Handle) Delete() +pkg runtime/cgo (netbsd-arm64-cgo), method (Handle) Value() interface{} +pkg runtime/cgo (netbsd-arm64-cgo), type Handle uintptr +pkg runtime/cgo (openbsd-386-cgo), func NewHandle(interface{}) Handle +pkg runtime/cgo (openbsd-386-cgo), method (Handle) Delete() +pkg runtime/cgo (openbsd-386-cgo), method (Handle) Value() interface{} +pkg runtime/cgo (openbsd-386-cgo), type Handle uintptr +pkg runtime/cgo (openbsd-amd64-cgo), func NewHandle(interface{}) Handle +pkg runtime/cgo (openbsd-amd64-cgo), method (Handle) Delete() +pkg runtime/cgo (openbsd-amd64-cgo), method (Handle) Value() interface{} +pkg runtime/cgo (openbsd-amd64-cgo), type Handle uintptr +pkg syscall (openbsd-386), const MSG_CMSG_CLOEXEC = 2048 +pkg syscall (openbsd-386), const MSG_CMSG_CLOEXEC ideal-int +pkg syscall (openbsd-386-cgo), const MSG_CMSG_CLOEXEC = 2048 +pkg syscall (openbsd-386-cgo), const MSG_CMSG_CLOEXEC ideal-int +pkg syscall (openbsd-amd64), const MSG_CMSG_CLOEXEC = 2048 +pkg syscall (openbsd-amd64), const MSG_CMSG_CLOEXEC ideal-int +pkg syscall (openbsd-amd64-cgo), const MSG_CMSG_CLOEXEC = 2048 +pkg syscall (openbsd-amd64-cgo), const MSG_CMSG_CLOEXEC ideal-int +pkg syscall (windows-386), type SysProcAttr struct, AdditionalInheritedHandles []Handle +pkg syscall (windows-386), type SysProcAttr struct, ParentProcess Handle +pkg syscall (windows-amd64), type SysProcAttr struct, AdditionalInheritedHandles []Handle +pkg syscall (windows-amd64), type SysProcAttr struct, ParentProcess Handle +pkg testing, method (*B) Setenv(string, string) +pkg testing, method (*T) Setenv(string, string) +pkg text/template/parse, const SkipFuncCheck = 2 +pkg text/template/parse, const SkipFuncCheck Mode +pkg time, func UnixMicro(int64) Time +pkg time, func UnixMilli(int64) Time +pkg time, method (*Time) IsDST() bool +pkg time, method (Time) UnixMicro() int64 +pkg time, method (Time) UnixMilli() int64 diff --git a/doc/go1.16.html b/doc/go1.16.html deleted file mode 100644 index 0beb62d160e..00000000000 --- a/doc/go1.16.html +++ /dev/null @@ -1,1220 +0,0 @@ - - - - - - -

Introduction to Go 1.16

- -

- The latest Go release, version 1.16, arrives six months after Go 1.15. - Most of its changes are in the implementation of the toolchain, runtime, and libraries. - As always, the release maintains the Go 1 promise of compatibility. - We expect almost all Go programs to continue to compile and run as before. -

- -

Changes to the language

- -

- There are no changes to the language. -

- -

Ports

- -

Darwin and iOS

- -

- Go 1.16 adds support of 64-bit ARM architecture on macOS (also known as - Apple Silicon) with GOOS=darwin, GOARCH=arm64. - Like the darwin/amd64 port, the darwin/arm64 - port supports cgo, internal and external linking, c-archive, - c-shared, and pie build modes, and the race - detector. -

- -

- The iOS port, which was previously darwin/arm64, has - been renamed to ios/arm64. GOOS=ios - implies the - darwin build tag, just as GOOS=android - implies the linux build tag. This change should be - transparent to anyone using gomobile to build iOS apps. -

- -

- Go 1.16 adds an ios/amd64 port, which targets the iOS - simulator running on AMD64-based macOS. Previously this was - unofficially supported through darwin/amd64 with - the ios build tag set. See also - misc/ios/README for - details about how to build programs for iOS and iOS simulator. -

- -

- Go 1.16 is the last release that will run on macOS 10.12 Sierra. - Go 1.17 will require macOS 10.13 High Sierra or later. -

- -

NetBSD

- -

- Go now supports the 64-bit ARM architecture on NetBSD (the - netbsd/arm64 port). -

- -

OpenBSD

- -

- Go now supports the MIPS64 architecture on OpenBSD - (the openbsd/mips64 port). This port does not yet - support cgo. -

- -

- On the 64-bit x86 and 64-bit ARM architectures on OpenBSD (the - openbsd/amd64 and openbsd/arm64 ports), system - calls are now made through libc, instead of directly using - the SYSCALL/SVC instruction. This ensures - forward-compatibility with future versions of OpenBSD. In particular, - OpenBSD 6.9 onwards will require system calls to be made through - libc for non-static Go binaries. -

- -

386

- -

- As announced in the Go 1.15 release notes, - Go 1.16 drops support for x87 mode compilation (GO386=387). - Support for non-SSE2 processors is now available using soft float - mode (GO386=softfloat). - Users running on non-SSE2 processors should replace GO386=387 - with GO386=softfloat. -

- -

RISC-V

- -

- The linux/riscv64 port now supports cgo and - -buildmode=pie. This release also includes performance - optimizations and code generation improvements for RISC-V. -

- -

Tools

- -

Go command

- -

Modules

- -

- Module-aware mode is enabled by default, regardless of whether a - go.mod file is present in the current working directory or a - parent directory. More precisely, the GO111MODULE environment - variable now defaults to on. To switch to the previous behavior, - set GO111MODULE to auto. -

- -

- Build commands like go build and go - test no longer modify go.mod and go.sum - by default. Instead, they report an error if a module requirement or checksum - needs to be added or updated (as if the -mod=readonly flag were - used). Module requirements and sums may be adjusted with go - mod tidy or go get. -

- -

- go install now accepts arguments with - version suffixes (for example, go install - example.com/cmd@v1.0.0). This causes go - install to build and install packages in module-aware mode, - ignoring the go.mod file in the current directory or any parent - directory, if there is one. This is useful for installing executables without - affecting the dependencies of the main module. -

- -

- go install, with or without a version suffix (as - described above), is now the recommended way to build and install packages in - module mode. go get should be used with the - -d flag to adjust the current module's dependencies without - building packages, and use of go get to build and - install packages is deprecated. In a future release, the -d flag - will always be enabled. -

- -

- retract directives may now be used in a go.mod file - to indicate that certain published versions of the module should not be used - by other modules. A module author may retract a version after a severe problem - is discovered or if the version was published unintentionally. -

- -

- The go mod vendor - and go mod tidy subcommands now accept - the -e flag, which instructs them to proceed despite errors in - resolving missing packages. -

- -

- The go command now ignores requirements on module versions - excluded by exclude directives in the main module. Previously, - the go command used the next version higher than an excluded - version, but that version could change over time, resulting in - non-reproducible builds. -

- -

- In module mode, the go command now disallows import paths that - include non-ASCII characters or path elements with a leading dot character - (.). Module paths with these characters were already disallowed - (see Module paths and versions), - so this change affects only paths within module subdirectories. -

- -

Embedding Files

- -

- The go command now supports including - static files and file trees as part of the final executable, - using the new //go:embed directive. - See the documentation for the new - embed - package for details. -

- -

go test

- -

- When using go test, a test that - calls os.Exit(0) during execution of a test function - will now be considered to fail. - This will help catch cases in which a test calls code that calls - os.Exit(0) and thereby stops running all future tests. - If a TestMain function calls os.Exit(0) - that is still considered to be a passing test. -

- -

- go test reports an error when the -c - or -i flags are used together with unknown flags. Normally, - unknown flags are passed to tests, but when -c or -i - are used, tests are not run. -

- -

go get

- -

- The go get -insecure flag is - deprecated and will be removed in a future version. This flag permits - fetching from repositories and resolving custom domains using insecure - schemes such as HTTP, and also bypasses module sum validation using the - checksum database. To permit the use of insecure schemes, use the - GOINSECURE environment variable instead. To bypass module - sum validation, use GOPRIVATE or GONOSUMDB. - See go help environment for details. -

- -

- go get example.com/mod@patch now - requires that some version of example.com/mod already be - required by the main module. - (However, go get -u=patch continues - to patch even newly-added dependencies.) -

- -

GOVCS environment variable

- -

- GOVCS is a new environment variable that limits which version - control tools the go command may use to download source code. - This mitigates security issues with tools that are typically used in trusted, - authenticated environments. By default, git and hg - may be used to download code from any repository. svn, - bzr, and fossil may only be used to download code - from repositories with module paths or package paths matching patterns in - the GOPRIVATE environment variable. See - go - help vcs for details. -

- -

The all pattern

- -

- When the main module's go.mod file - declares go 1.16 or higher, the all - package pattern now matches only those packages that are transitively imported - by a package or test found in the main module. (Packages imported by tests - of packages imported by the main module are no longer included.) This is - the same set of packages retained - by go mod vendor since Go 1.11. -

- -

The -toolexec build flag

- -

- When the -toolexec build flag is specified to use a program when - invoking toolchain programs like compile or asm, the environment variable - TOOLEXEC_IMPORTPATH is now set to the import path of the package - being built. -

- -

The -i build flag

- -

- The -i flag accepted by go build, - go install, and go test is - now deprecated. The -i flag instructs the go command - to install packages imported by packages named on the command line. Since - the build cache was introduced in Go 1.10, the -i flag no longer - has a significant effect on build times, and it causes errors when the install - directory is not writable. -

- -

The list command

- -

- When the -export flag is specified, the BuildID - field is now set to the build ID of the compiled package. This is equivalent - to running go tool buildid on - go list -exported -f {{.Export}}, - but without the extra step. -

- -

The -overlay flag

- -

- The -overlay flag specifies a JSON configuration file containing - a set of file path replacements. The -overlay flag may be used - with all build commands and go mod subcommands. - It is primarily intended to be used by editor tooling such as gopls to - understand the effects of unsaved changes to source files. The config file - maps actual file paths to replacement file paths and the go - command and its builds will run as if the actual file paths exist with the - contents given by the replacement file paths, or don't exist if the replacement - file paths are empty. -

- -

Cgo

- -

- The cgo tool will no longer try to translate - C struct bitfields into Go struct fields, even if their size can be - represented in Go. The order in which C bitfields appear in memory - is implementation dependent, so in some cases the cgo tool produced - results that were silently incorrect. -

- -

Vet

- -

New warning for invalid testing.T use in -goroutines

- -

- The vet tool now warns about invalid calls to the testing.T - method Fatal from within a goroutine created during the test. - This also warns on calls to Fatalf, FailNow, and - Skip{,f,Now} methods on testing.T tests or - testing.B benchmarks. -

- -

- Calls to these methods stop the execution of the created goroutine and not - the Test* or Benchmark* function. So these are - required to be called by the goroutine - running the test or benchmark function. For example: -

- -
-func TestFoo(t *testing.T) {
-    go func() {
-        if condition() {
-            t.Fatal("oops") // This exits the inner func instead of TestFoo.
-        }
-        ...
-    }()
-}
-
- -

- Code calling t.Fatal (or a similar method) from a created - goroutine should be rewritten to signal the test failure using - t.Error and exit the goroutine early using an alternative - method, such as using a return statement. The previous example - could be rewritten as: -

- -
-func TestFoo(t *testing.T) {
-    go func() {
-        if condition() {
-            t.Error("oops")
-            return
-        }
-        ...
-    }()
-}
-
- -

New warning for frame pointer

- -

- The vet tool now warns about amd64 assembly that clobbers the BP - register (the frame pointer) without saving and restoring it, - contrary to the calling convention. Code that doesn't preserve the - BP register must be modified to either not use BP at all or preserve - BP by saving and restoring it. An easy way to preserve BP is to set - the frame size to a nonzero value, which causes the generated - prologue and epilogue to preserve the BP register for you. - See CL 248260 for example - fixes. -

- -

New warning for asn1.Unmarshal

- -

- The vet tool now warns about incorrectly passing a non-pointer or nil argument to - asn1.Unmarshal. - This is like the existing checks for - encoding/json.Unmarshal - and encoding/xml.Unmarshal. -

- -

Runtime

- -

- The new runtime/metrics package - introduces a stable interface for reading - implementation-defined metrics from the Go runtime. - It supersedes existing functions like - runtime.ReadMemStats - and - debug.GCStats - and is significantly more general and efficient. - See the package documentation for more details. -

- -

- Setting the GODEBUG environment variable - to inittrace=1 now causes the runtime to emit a single - line to standard error for each package init, - summarizing its execution time and memory allocation. This trace can - be used to find bottlenecks or regressions in Go startup - performance. - The GODEBUG - documentation describes the format. -

- -

- On Linux, the runtime now defaults to releasing memory to the - operating system promptly (using MADV_DONTNEED), rather - than lazily when the operating system is under memory pressure - (using MADV_FREE). This means process-level memory - statistics like RSS will more accurately reflect the amount of - physical memory being used by Go processes. Systems that are - currently using GODEBUG=madvdontneed=1 to improve - memory monitoring behavior no longer need to set this environment - variable. -

- -

- Go 1.16 fixes a discrepancy between the race detector and - the Go memory model. The race detector now - more precisely follows the channel synchronization rules of the - memory model. As a result, the detector may now report races it - previously missed. -

- -

Compiler

- -

- The compiler can now inline functions with - non-labeled for loops, method values, and type - switches. The inliner can also detect more indirect calls where - inlining is possible. -

- -

Linker

- -

- This release includes additional improvements to the Go linker, - reducing linker resource usage (both time and memory) and improving - code robustness/maintainability. These changes form the second half - of a two-release project to - modernize the Go - linker. -

- -

- The linker changes in 1.16 extend the 1.15 improvements to all - supported architecture/OS combinations (the 1.15 performance improvements - were primarily focused on ELF-based OSes and - amd64 architectures). For a representative set of - large Go programs, linking is 20-25% faster than 1.15 and requires - 5-15% less memory on average for linux/amd64, with larger - improvements for other architectures and OSes. Most binaries are - also smaller as a result of more aggressive symbol pruning. -

- -

- On Windows, go build -buildmode=c-shared now generates Windows - ASLR DLLs by default. ASLR can be disabled with --ldflags=-aslr=false. -

- -

Core library

- -

Embedded Files

- -

- The new embed package - provides access to files embedded in the program during compilation - using the new //go:embed directive. -

- -

File Systems

- -

- The new io/fs package - defines the fs.FS interface, - an abstraction for read-only trees of files. - The standard library packages have been adapted to make use - of the interface as appropriate. -

- -

- On the producer side of the interface, - the new embed.FS type - implements fs.FS, as does - zip.Reader. - The new os.DirFS function - provides an implementation of fs.FS backed by a tree - of operating system files. -

- -

- On the consumer side, - the new http.FS - function converts an fs.FS to an - http.FileSystem. - Also, the html/template - and text/template - packages’ ParseFS - functions and methods read templates from an fs.FS. -

- -

- For testing code that implements fs.FS, - the new testing/fstest - package provides a TestFS - function that checks for and reports common mistakes. - It also provides a simple in-memory file system implementation, - MapFS, - which can be useful for testing code that accepts fs.FS - implementations. -

- -

Deprecation of io/ioutil

- -

- The io/ioutil package has - turned out to be a poorly defined and hard to understand collection - of things. All functionality provided by the package has been moved - to other packages. The io/ioutil package remains and - will continue to work as before, but we encourage new code to use - the new definitions in the io and - os packages. - - Here is a list of the new locations of the names exported - by io/ioutil: -

-

- - - -

Minor changes to the library

- -

- As always, there are various minor changes and updates to the library, - made with the Go 1 promise of compatibility - in mind. -

- -
archive/zip
-
-

- The new Reader.Open - method implements the fs.FS - interface. -

-
-
- -
crypto/dsa
-
-

- The crypto/dsa package is now deprecated. - See issue #40337. -

-
-
- -
crypto/hmac
-
-

- New will now panic if - separate calls to the hash generation function fail to return new values. - Previously, the behavior was undefined and invalid outputs were sometimes - generated. -

-
-
- -
crypto/tls
-
-

- I/O operations on closing or closed TLS connections can now be detected - using the new net.ErrClosed - error. A typical use would be errors.Is(err, net.ErrClosed). -

- -

- A default write deadline is now set in - Conn.Close - before sending the "close notify" alert, in order to prevent blocking - indefinitely. -

- -

- Clients now return a handshake error if the server selects - - an ALPN protocol that was not in - - the list advertised by the client. -

- -

- Servers will now prefer other available AEAD cipher suites (such as ChaCha20Poly1305) - over AES-GCM cipher suites if either the client or server doesn't have AES hardware - support, unless both - Config.PreferServerCipherSuites - and Config.CipherSuites - are set. The client is assumed not to have AES hardware support if it does - not signal a preference for AES-GCM cipher suites. -

- -

- Config.Clone now - returns nil if the receiver is nil, rather than panicking. -

-
-
- -
crypto/x509
-
-

- The GODEBUG=x509ignoreCN=0 flag will be removed in Go 1.17. - It enables the legacy behavior of treating the CommonName - field on X.509 certificates as a host name when no Subject Alternative - Names are present. -

- -

- ParseCertificate and - CreateCertificate - now enforce string encoding restrictions for the DNSNames, - EmailAddresses, and URIs fields. These fields - can only contain strings with characters within the ASCII range. -

- -

- CreateCertificate - now verifies the generated certificate's signature using the signer's - public key. If the signature is invalid, an error is returned, instead of - a malformed certificate. -

- -

- DSA signature verification is no longer supported. Note that DSA signature - generation was never supported. - See issue #40337. -

- -

- On Windows, Certificate.Verify - will now return all certificate chains that are built by the platform - certificate verifier, instead of just the highest ranked chain. -

- -

- The new SystemRootsError.Unwrap - method allows accessing the Err - field through the errors package functions. -

- -

- On Unix systems, the crypto/x509 package is now more - efficient in how it stores its copy of the system cert pool. - Programs that use only a small number of roots will use around a - half megabyte less memory. -

- -
-
- -
debug/elf
-
-

- More DT - and PT - constants have been added. -

-
-
- -
encoding/asn1
-
-

- Unmarshal and - UnmarshalWithParams - now return an error instead of panicking when the argument is not - a pointer or is nil. This change matches the behavior of other - encoding packages such as encoding/json. -

-
-
- -
encoding/json
-
-

- The json struct field tags understood by - Marshal, - Unmarshal, - and related functionality now permit semicolon characters within - a JSON object name for a Go struct field. -

-
-
- -
encoding/xml
-
-

- The encoder has always taken care to avoid using namespace prefixes - beginning with xml, which are reserved by the XML - specification. - Now, following the specification more closely, that check is - case-insensitive, so that prefixes beginning - with XML, XmL, and so on are also - avoided. -

-
-
- -
flag
-
-

- The new Func function - allows registering a flag implemented by calling a function, - as a lighter-weight alternative to implementing the - Value interface. -

-
-
- -
go/build
-
-

- The Package - struct has new fields that report information - about //go:embed directives in the package: - EmbedPatterns, - EmbedPatternPos, - TestEmbedPatterns, - TestEmbedPatternPos, - XTestEmbedPatterns, - XTestEmbedPatternPos. -

- -

- The Package field - IgnoredGoFiles - will no longer include files that start with "_" or ".", - as those files are always ignored. - IgnoredGoFiles is for files ignored because of - build constraints. -

- -

- The new Package - field IgnoredOtherFiles - has a list of non-Go files ignored because of build constraints. -

-
-
- -
go/build/constraint
-
-

- The new - go/build/constraint - package parses build constraint lines, both the original - // +build syntax and the //go:build - syntax that will be introduced in Go 1.17. - This package exists so that tools built with Go 1.16 will be able - to process Go 1.17 source code. - See https://golang.org/design/draft-gobuild - for details about the build constraint syntaxes and the planned - transition to the //go:build syntax. - Note that //go:build lines are not supported - in Go 1.16 and should not be introduced into Go programs yet. -

-
-
- -
html/template
-
-

- The new template.ParseFS - function and template.Template.ParseFS - method are like template.ParseGlob - and template.Template.ParseGlob, - but read the templates from an fs.FS. -

-
-
- -
io
-
-

- The package now defines a - ReadSeekCloser interface. -

- -

- The package now defines - Discard, - NopCloser, and - ReadAll, - to be used instead of the same names in the - io/ioutil package. -

-
-
- -
log
-
-

- The new Default function - provides access to the default Logger. -

-
-
- -
log/syslog
-
-

- The Writer - now uses the local message format - (omitting the host name and using a shorter time stamp) - when logging to custom Unix domain sockets, - matching the format already used for the default log socket. -

-
-
- -
mime/multipart
-
-

- The Reader's - ReadForm - method no longer rejects form data - when passed the maximum int64 value as a limit. -

-
-
- -
net
-
-

- The case of I/O on a closed network connection, or I/O on a network - connection that is closed before any of the I/O completes, can now - be detected using the new ErrClosed - error. A typical use would be errors.Is(err, net.ErrClosed). - In earlier releases the only way to reliably detect this case was to - match the string returned by the Error method - with "use of closed network connection". -

- -

- In previous Go releases the default TCP listener backlog size on Linux systems, - set by /proc/sys/net/core/somaxconn, was limited to a maximum of 65535. - On Linux kernel version 4.1 and above, the maximum is now 4294967295. -

- -

- On Linux, host name lookups no longer use DNS before checking - /etc/hosts when /etc/nsswitch.conf - is missing; this is common on musl-based systems and makes - Go programs match the behavior of C programs on those systems. -

-
-
- -
net/http
-
-

- In the net/http package, the - behavior of StripPrefix - has been changed to strip the prefix from the request URL's - RawPath field in addition to its Path field. - In past releases, only the Path field was trimmed, and so if the - request URL contained any escaped characters the URL would be modified to - have mismatched Path and RawPath fields. - In Go 1.16, StripPrefix trims both fields. - If there are escaped characters in the prefix part of the request URL the - handler serves a 404 instead of its previous behavior of invoking the - underlying handler with a mismatched Path/RawPath pair. -

- -

- The net/http package now rejects HTTP range requests - of the form "Range": "bytes=--N" where "-N" is a negative suffix length, for - example "Range": "bytes=--2". It now replies with a 416 "Range Not Satisfiable" response. -

- -

- Cookies set with SameSiteDefaultMode - now behave according to the current spec (no attribute is set) instead of - generating a SameSite key without a value. -

- -

- The Client now sends - an explicit Content-Length: 0 - header in PATCH requests with empty bodies, - matching the existing behavior of POST and PUT. -

- -

- The ProxyFromEnvironment - function no longer returns the setting of the HTTP_PROXY - environment variable for https:// URLs when - HTTPS_PROXY is unset. -

- -

- The Transport - type has a new field - GetProxyConnectHeader - which may be set to a function that returns headers to send to a - proxy during a CONNECT request. - In effect GetProxyConnectHeader is a dynamic - version of the existing field - ProxyConnectHeader; - if GetProxyConnectHeader is not nil, - then ProxyConnectHeader is ignored. -

- -

- The new http.FS - function converts an fs.FS - to an http.FileSystem. -

-
-
- -
net/http/httputil
-
-

- ReverseProxy - now flushes buffered data more aggressively when proxying - streamed responses with unknown body lengths. -

-
-
- -
net/smtp
-
-

- The Client's - Mail - method now sends the SMTPUTF8 directive to - servers that support it, signaling that addresses are encoded in UTF-8. -

-
-
- -
os
-
-

- Process.Signal now - returns ErrProcessDone - instead of the unexported errFinished when the process has - already finished. -

- -

- The package defines a new type - DirEntry - as an alias for fs.DirEntry. - The new ReadDir - function and the new - File.ReadDir - method can be used to read the contents of a directory into a - slice of DirEntry. - The File.Readdir - method (note the lower case d in dir) - still exists, returning a slice of - FileInfo, but for - most programs it will be more efficient to switch to - File.ReadDir. -

- -

- The package now defines - CreateTemp, - MkdirTemp, - ReadFile, and - WriteFile, - to be used instead of functions defined in the - io/ioutil package. -

- -

- The types FileInfo, - FileMode, and - PathError - are now aliases for types of the same name in the - io/fs package. - Function signatures in the os - package have been updated to refer to the names in the - io/fs package. - This should not affect any existing code. -

- -

- The new DirFS function - provides an implementation of - fs.FS backed by a tree - of operating system files. -

-
-
- -
os/signal
-
-

- The new - NotifyContext - function allows creating contexts that are canceled upon arrival of - specific signals. -

-
-
- -
path
-
-

- The Match function now - returns an error if the unmatched part of the pattern has a - syntax error. Previously, the function returned early on a failed - match, and thus did not report any later syntax error in the - pattern. -

-
-
- -
path/filepath
-
-

- The new function - WalkDir - is similar to - Walk, - but is typically more efficient. - The function passed to WalkDir receives a - fs.DirEntry - instead of a - fs.FileInfo. - (To clarify for those who recall the Walk function - as taking an os.FileInfo, - os.FileInfo is now an alias for fs.FileInfo.) -

- -

- The Match and - Glob functions now - return an error if the unmatched part of the pattern has a - syntax error. Previously, the functions returned early on a failed - match, and thus did not report any later syntax error in the - pattern. -

-
-
- -
runtime/debug
-
-

- The runtime.Error values - used when SetPanicOnFault is enabled may now have an - Addr method. If that method exists, it returns the memory - address that triggered the fault. -

-
-
- -
strconv
-
-

- ParseFloat now uses - the Eisel-Lemire - algorithm, improving performance by up to a factor of 2. This can - also speed up decoding textual formats like encoding/json. -

-
-
- -
syscall
-
-

- NewCallback - and - NewCallbackCDecl - now correctly support callback functions with multiple - sub-uintptr-sized arguments in a row. This may - require changing uses of these functions to eliminate manual - padding between small arguments. -

- -

- SysProcAttr on Windows has a new NoInheritHandles field that disables inheriting handles when creating a new process. -

- -

- DLLError on Windows now has an Unwrap method for unwrapping its underlying error. -

- -

- On Linux, - Setgid, - Setuid, - and related calls are now implemented. - Previously, they returned an syscall.EOPNOTSUPP error. -

- -

- On Linux, the new functions - AllThreadsSyscall - and AllThreadsSyscall6 - may be used to make a system call on all Go threads in the process. - These functions may only be used by programs that do not use cgo; - if a program uses cgo, they will always return - syscall.ENOTSUP. -

-
-
- -
testing/iotest
-
-

- The new - ErrReader - function returns an - io.Reader that always - returns an error. -

- -

- The new - TestReader - function tests that an io.Reader - behaves correctly. -

-
-
- -
text/template
-
-

- Newlines characters are now allowed inside action delimiters, - permitting actions to span multiple lines. -

- -

- The new template.ParseFS - function and template.Template.ParseFS - method are like template.ParseGlob - and template.Template.ParseGlob, - but read the templates from an fs.FS. -

-
-
- -
text/template/parse
-
-

- A new CommentNode - was added to the parse tree. The Mode - field in the parse.Tree enables access to it. -

-
-
- -
time/tzdata
-
-

- The slim timezone data format is now used for the timezone database in - $GOROOT/lib/time/zoneinfo.zip and the embedded copy in this - package. This reduces the size of the timezone database by about 350 KB. -

-
-
- -
unicode
-
-

- The unicode package and associated - support throughout the system has been upgraded from Unicode 12.0.0 to - Unicode 13.0.0, - which adds 5,930 new characters, including four new scripts, and 55 new emoji. - Unicode 13.0.0 also designates plane 3 (U+30000-U+3FFFF) as the tertiary - ideographic plane. -

-
-
diff --git a/doc/go1.17.html b/doc/go1.17.html new file mode 100644 index 00000000000..cf856a1e735 --- /dev/null +++ b/doc/go1.17.html @@ -0,0 +1,535 @@ + + + + + + +

DRAFT RELEASE NOTES — Introduction to Go 1.17

+ +

+ + Go 1.17 is not yet released. These are work-in-progress + release notes. Go 1.17 is expected to be released in August 2021. + +

+ +

Changes to the language

+ +

+ TODO: https://golang.org/cl/216424: allow conversion from slice to array ptr +

+ +

+ TODO: https://golang.org/cl/312212: add unsafe.Add and unsafe.Slice +

+ +

Ports

+ +

Darwin

+ +

+ As announced in the Go 1.16 release + notes, Go 1.17 requires macOS 10.13 High Sierra or later; support + for previous versions has been discontinued. +

+ +

+ TODO: complete the Ports section +

+ +

Tools

+ +

+ TODO: complete the Tools section +

+ +

Go command

+ +

Lazy module loading

+ +

+ If a module specifies go 1.17 or higher in its + go.mod file, its transitive requirements are now loaded lazily, + avoding the need to download or read go.mod files for + otherwise-irrelevant dependencies. To support lazy loading, in Go 1.17 modules + the go command maintains explicit requirements in + the go.mod file for every dependency that provides any package + transitively imported by any package or test within the module. + See the design + document for more detail. + +

+ +

To facilitate the upgrade to lazy loading, + the go mod tidy subcommand now supports + a -go flag to set or change the go version in + the go.mod file. To enable lazy loading for an existing module + without changing the selected versions of its dependencies, run: +

+ +
+  go mod tidy -go=1.17
+
+ +

Module deprecation comments

+ +

+ Module authors may deprecate a module by adding a + // Deprecated: + comment to go.mod, then tagging a new version. + go get now prints a warning if a module needed to + build packages named on the command line is deprecated. go + list -m -u prints deprecations for all + dependencies (use -f or -json to show the full + message). The go command considers different major versions to + be distinct modules, so this mechanism may be used, for example, to provide + users with migration instructions for a new major version. +

+ +

go get

+ +

+ The go get -insecure flag is + deprecated and has been removed. To permit the use of insecure schemes + when fetching dependencies, please use the GOINSECURE + environment variable. The -insecure flag also bypassed module + sum validation, use GOPRIVATE or GONOSUMDB if + you need that functionality. See go help + environment for details. +

+ +

go.mod files missing go directives

+ +

+ If the main module's go.mod file does not contain + a go directive and + the go command cannot update the go.mod file, the + go command now assumes go 1.11 instead of the + current release. (go mod init has added + go directives automatically since + Go 1.12.) +

+ +

+ If a module dependency lacks an explicit go.mod file, or + its go.mod file does not contain + a go directive, + the go command now assumes go 1.16 for that + dependency instead of the current release. (Dependencies developed in GOPATH + mode may lack a go.mod file, and + the vendor/modules.txt has to date never recorded + the go versions indicated by dependencies' go.mod + files.) +

+ +

vendor contents

+ +

+ If the main module specifies go 1.17 or higher, + go mod vendor now annotates + vendor/modules.txt with the go version indicated by + each vendored module in its own go.mod file. The annotated + version is used when building the module's packages from vendored source code. +

+ +

+ If the main module specifies go 1.17 or higher, + go mod vendor now omits go.mod + and go.sum files for vendored dependencies, which can otherwise + interfere with the ability of the go command to identify the correct + module root when invoked within the vendor tree. +

+ +

Password prompts

+ +

+ The go command by default now suppresses SSH password prompts and + Git Credential Manager prompts when fetching Git repositories using SSH, as it + already did previously for other Git password prompts. Users authenticating to + private Git repos with password-protected SSH may configure + an ssh-agent to enable the go command to use + password-protected SSH keys. +

+ +

+ TODO: https://golang.org/cl/249759: cmd/cover: replace code using optimized golang.org/x/tools/cover +

+ +

Vet

+ +

+ TODO: https://golang.org/cl/299532: cmd/vet: bring in sigchanyzer to report unbuffered channels to signal.Notify +

+ +

+ TODO: complete the Vet section +

+ +

Runtime

+ +

+ TODO: https://golang.org/cl/304470: cmd/compile, runtime: add metadata for argument printing in traceback +

+ +

+ TODO: complete the Runtime section +

+ +

Compiler

+ +

+ TODO: complete the Compiler section, or delete if not needed +

+ +

Linker

+ +

+ TODO: complete the Linker section, or delete if not needed +

+ +

Core library

+ +

+ TODO: complete the Core library section +

+ +

crypto/tls

+ +

+ (*Conn).HandshakeContext was added to + allow the user to control cancellation of an in-progress TLS Handshake. + The context provided is propagated into the + ClientHelloInfo + and CertificateRequestInfo + structs and accessible through the new + (*ClientHelloInfo).Context + and + + (*CertificateRequestInfo).Context + methods respectively. Canceling the context after the handshake has finished + has no effect. +

+ +

+ When Config.NextProtos is set, servers now + enforce that there is an overlap between the configured protocols and the protocols + advertised by the client, if any. If there is no overlap the connection is closed + with the no_application_protocol alert, as required by RFC 7301. +

+ +

Cgo

+ +

+ The runtime/cgo package now provides a + new facility that allows to turn any Go values to a safe representation + that can be used to pass values between C and Go safely. See + runtime/cgo.Handle for more information. +

+ +

Minor changes to the library

+ +

+ As always, there are various minor changes and updates to the library, + made with the Go 1 promise of compatibility + in mind. +

+ +
archive/zip
+
+

+ TODO: https://golang.org/cl/312310: add File.OpenRaw, Writer.CreateRaw, Writer.Copy +

+
+
+ +
bufio
+
+

+ The Writer.WriteRune method + now writes the replacement character U+FFFD for negative rune values, + as it does for other invalid runes. +

+
+
+ +
bytes
+
+

+ The Buffer.WriteRune method + now writes the replacement character U+FFFD for negative rune values, + as it does for other invalid runes. +

+
+
+ +
compress/lzw
+
+

+ The new + Reader.Reset + and + Writer.Reset + methods allow reuse of a Reader or Writer. +

+
+
+ +
crypto/rsa
+
+

+ TODO: https://golang.org/cl/302230: fix salt length calculation with PSSSaltLengthAuto +

+
+
+ +
database/sql
+
+

+ TODO: https://golang.org/cl/258360: close driver.Connector if it implements io.Closer +

+ +

+ TODO: https://golang.org/cl/311572: add NullInt16 and NullByte +

+
+
+ +
encoding/binary
+
+

+ binary.Uvarint will stop reading after 10 bytes to avoid + wasted computations. If more than 10 bytes are needed, the byte count returned is -11. +
+ Previous Go versions could return larger negative counts when reading incorrectly encoded varints. +

+
+
+ +
flag
+
+

+ TODO: https://golang.org/cl/271788: panic if flag name begins with - or contains = +

+
+
+ +
io/fs
+
+

+ TODO: https://golang.org/cl/293649: implement FileInfoToDirEntry +

+
+
+ +
math
+
+

+ TODO: https://golang.org/cl/247058: add MaxUint, MinInt, MaxInt +

+
+
+ +
mime
+
+

+ TODO: https://golang.org/cl/305230: support reading shared mime-info database on unix systems +

+
+
+ +
net
+
+

+ TODO: https://golang.org/cl/272668: add IP.IsPrivate +

+ +

+ TODO: https://golang.org/cl/301709: make go resolver aware of network parameter +

+ +

+ TODO: https://golang.org/cl/307030: make ErrClosed and ParseError implement net.Error +

+
+
+ +
net/http
+
+

+ The net/http package now uses the new + (*tls.Conn).HandshakeContext + with the Request context + when performing TLS handshakes in the client or server. +

+ +

+ TODO: https://golang.org/cl/235437: add to deadlines only when positive +

+ +

+ TODO: https://golang.org/cl/308952: make ReadRequest return an error when requests have multiple Host headers +

+
+
+ +
net/http/httptest
+
+

+ TODO: https://golang.org/cl/308950: panic on non-3 digit (XXX) status code in Recorder.WriteHeader +

+
+
+ +
net/url
+
+

+ TODO: https://golang.org/cl/314850: add Values.Has +

+
+
+ +
os
+
+

+ TODO: https://golang.org/cl/268020: avoid allocation in File.WriteString +

+
+
+ +
reflect
+
+

+ The new + StructField.IsExported + and + Method.IsExported + methods report whether a struct field or type method is exported. + They provide a more readable alternative to checking whether PkgPath + is empty. +

+ +

+ TODO: https://golang.org/cl/281233: add VisibleFields function +

+ +

+ TODO: https://golang.org/cl/284136: panic if ArrayOf is called with negative length +

+
+
+ +
strconv
+
+

+ TODO: https://golang.org/cl/170079: implement Ryū-like algorithm for fixed precision ftoa +

+ +

+ TODO: https://golang.org/cl/170080: Implement Ryū algorithm for ftoa shortest mode +

+ +

+ The new QuotedPrefix function + returns the quoted string (as understood by + Unquote) + at the start of input. +

+
+
+ +
strings
+
+

+ The Builder.WriteRune method + now writes the replacement character U+FFFD for negative rune values, + as it does for other invalid runes. +

+
+
+ +
sync/atomic
+
+

+ TODO: https://golang.org/cl/241678: add (*Value).Swap and (*Value).CompareAndSwap +

+
+
+ +
syscall
+
+

+ TODO: https://golang.org/cl/295371: do not overflow key memory in GetQueuedCompletionStatus +

+ +

+ TODO: https://golang.org/cl/313653: restore signal mask after setting foreground process group +

+
+
+ +
testing
+
+

+ TODO: https://golang.org/cl/310033: add -shuffle=off|on|N to alter the execution order of tests and benchmarks +

+
+
+ +
text/template/parse
+
+

+ TODO: https://golang.org/cl/301493: add a mode to skip func-check on parsing +

+
+
+ +
time
+
+

+ time.Time now has a GoString + method that will return a more useful value for times when printed with + the "%#v" format specifier in the fmt package. +

+ +

+ TODO: https://golang.org/cl/264077: add Time.IsDST() to check if its Location is in Daylight Savings Time +

+ +

+ TODO: https://golang.org/cl/293349: add Time.Unix{Milli,Micro} and to-Time helpers UnixMicro, UnixMilli +

+ +

+ TODO: https://golang.org/cl/300996: support "," as separator for fractional seconds +

+
+
+ +
unicode
+
+

+ The Is, + IsGraphic, + IsLetter, + IsLower, + IsMark, + IsNumber, + IsPrint, + IsPunct, + IsSpace, + IsSymbol, and + IsUpper functions + now return false on negative rune values, as they do for other invalid runes. +

+
+
diff --git a/doc/go_spec.html b/doc/go_spec.html index 59c9ce3c434..e59b3554f28 100644 --- a/doc/go_spec.html +++ b/doc/go_spec.html @@ -1,6 +1,6 @@ @@ -830,7 +830,7 @@ The underlying type of []B1, B3, and B4 i

Method sets

-A type may have a method set associated with it. +A type has a (possibly empty) method set associated with it. The method set of an interface type is its interface. The method set of any other type T consists of all methods declared with receiver type T. @@ -3532,9 +3532,9 @@ within Greeting, who will have the value

-If the final argument is assignable to a slice type []T, it is -passed unchanged as the value for a ...T parameter if the argument -is followed by .... In this case no new slice is created. +If the final argument is assignable to a slice type []T and +is followed by ..., it is passed unchanged as the value +for a ...T parameter. In this case no new slice is created.

@@ -3681,8 +3681,8 @@ The bitwise logical and shift operators apply to integers only. ^ bitwise XOR integers &^ bit clear (AND NOT) integers -<< left shift integer << unsigned integer ->> right shift integer >> unsigned integer +<< left shift integer << integer >= 0 +>> right shift integer >> integer >= 0 @@ -4164,6 +4164,10 @@ in any of these cases:

  • x is a string and T is a slice of bytes or runes.
  • +
  • + x is a slice, T is a pointer to an array, + and the slice and array types have identical element types. +
  • @@ -4314,6 +4318,24 @@ MyRunes("白鵬翔") // []rune{0x767d, 0x9d6c, 0x7fd4} +

    Conversions from slice to array pointer

    + +

    +Converting a slice to an array pointer yields a pointer to the underlying array of the slice. +If the length of the slice is less than the length of the array, +a run-time panic occurs. +

    + +
    +s := make([]byte, 2, 4)
    +s0 := (*[0]byte)(s)      // s0 != nil
    +s2 := (*[2]byte)(s)      // &s2[0] == &s[0]
    +s4 := (*[4]byte)(s)      // panics: len([4]byte) > len(s)
    +
    +var t []string
    +t0 := (*[0]string)(t)    // t0 == nil
    +t1 := (*[1]string)(t)    // panics: len([1]string) > len(s)
    +

    Constant expressions

    @@ -4931,9 +4953,9 @@ ExprSwitchCase = "case" ExpressionList | "default" .

    If the switch expression evaluates to an untyped constant, it is first implicitly -converted to its default type; -if it is an untyped boolean value, it is first implicitly converted to type bool. +converted to its default type. The predeclared untyped value nil cannot be used as a switch expression. +The switch expression type must be comparable.

    @@ -6689,6 +6711,10 @@ type Pointer *ArbitraryType func Alignof(variable ArbitraryType) uintptr func Offsetof(selector ArbitraryType) uintptr func Sizeof(variable ArbitraryType) uintptr + +type IntegerType int // shorthand for an integer type; it is not a real type +func Add(ptr Pointer, len IntegerType) Pointer +func Slice(ptr *ArbitraryType, len IntegerType) []ArbitraryType

    @@ -6745,6 +6771,32 @@ Calls to Alignof, Offsetof, and Sizeof are compile-time constant expressions of type uintptr.

    +

    +The function Add adds len to ptr +and returns the updated pointer unsafe.Pointer(uintptr(ptr) + uintptr(len)). +The len argument must be of integer type or an untyped constant. +A constant len argument must be representable by a value of type int; +if it is an untyped constant it is given type int. +The rules for valid uses of Pointer still apply. +

    + +

    +The function Slice returns a slice whose underlying array starts at ptr +and whose length and capacity are len: +

    + +
    +(*[len]ArbitraryType)(unsafe.Pointer(ptr))[:]
    +
    + +

    +The len argument must be of integer type or an untyped constant. +A constant len argument must be non-negative and representable by a value of type int; +if it is an untyped constant it is given type int. +If ptr is nil or len is negative at run time, +a run-time panic occurs. +

    +

    Size and alignment guarantees

    diff --git a/favicon.ico b/favicon.ico deleted file mode 100644 index 8d225846dbc..00000000000 Binary files a/favicon.ico and /dev/null differ diff --git a/misc/android/go_android_exec.go b/misc/android/go_android_exec.go index 7aa7fe56fc5..3af2bee5839 100644 --- a/misc/android/go_android_exec.go +++ b/misc/android/go_android_exec.go @@ -14,7 +14,6 @@ import ( "fmt" "go/build" "io" - "io/ioutil" "log" "os" "os/exec" @@ -276,7 +275,7 @@ func adbCopyGoroot() error { if err := syscall.Flock(int(stat.Fd()), syscall.LOCK_EX); err != nil { return err } - s, err := ioutil.ReadAll(stat) + s, err := io.ReadAll(stat) if err != nil { return err } @@ -294,7 +293,7 @@ func adbCopyGoroot() error { goroot := runtime.GOROOT() // Build go for android. goCmd := filepath.Join(goroot, "bin", "go") - tmpGo, err := ioutil.TempFile("", "go_android_exec-cmd-go-*") + tmpGo, err := os.CreateTemp("", "go_android_exec-cmd-go-*") if err != nil { return err } diff --git a/misc/cgo/errors/argposition_test.go b/misc/cgo/errors/argposition_test.go new file mode 100644 index 00000000000..331095f7473 --- /dev/null +++ b/misc/cgo/errors/argposition_test.go @@ -0,0 +1,134 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 42580: cmd/cgo: shifting identifier position in ast + +package errorstest + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" +) + +type ShortPosition struct { + Line int + Column int + Visited bool +} + +type IdentPositionInfo map[string][]ShortPosition + +type Visitor struct { + identPosInfo IdentPositionInfo + fset *token.FileSet + t *testing.T +} + +func (v *Visitor) Visit(node ast.Node) ast.Visitor { + if ident, ok := node.(*ast.Ident); ok { + if expectedPositions, ok := v.identPosInfo[ident.Name]; ok { + gotMatch := false + var errorMessage strings.Builder + for caseIndex, expectedPos := range expectedPositions { + actualPosition := v.fset.PositionFor(ident.Pos(), true) + errorOccured := false + if expectedPos.Line != actualPosition.Line { + fmt.Fprintf(&errorMessage, "wrong line number for ident %s: expected: %d got: %d\n", ident.Name, expectedPos.Line, actualPosition.Line) + errorOccured = true + } + if expectedPos.Column != actualPosition.Column { + fmt.Fprintf(&errorMessage, "wrong column number for ident %s: expected: %d got: %d\n", ident.Name, expectedPos.Column, actualPosition.Column) + errorOccured = true + } + if errorOccured { + continue + } + gotMatch = true + expectedPositions[caseIndex].Visited = true + } + + if !gotMatch { + v.t.Errorf(errorMessage.String()) + } + } + } + return v +} + +func TestArgumentsPositions(t *testing.T) { + testdata, err := filepath.Abs("testdata") + if err != nil { + t.Fatal(err) + } + + tmpPath := t.TempDir() + + dir := filepath.Join(tmpPath, "src", "testpositions") + if err := os.MkdirAll(dir, 0755); err != nil { + t.Fatal(err) + } + + cmd := exec.Command("go", "tool", "cgo", + "-srcdir", testdata, + "-objdir", dir, + "issue42580.go") + cmd.Stderr = new(bytes.Buffer) + + err = cmd.Run() + if err != nil { + t.Fatalf("%s: %v\n%s", cmd, err, cmd.Stderr) + } + mainProcessed, err := ioutil.ReadFile(filepath.Join(dir, "issue42580.cgo1.go")) + if err != nil { + t.Fatal(err) + } + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, "", mainProcessed, parser.AllErrors) + if err != nil { + fmt.Println(err) + return + } + + expectation := IdentPositionInfo{ + "checkedPointer": []ShortPosition{ + ShortPosition{ + Line: 32, + Column: 56, + }, + }, + "singleInnerPointerChecked": []ShortPosition{ + ShortPosition{ + Line: 37, + Column: 91, + }, + }, + "doublePointerChecked": []ShortPosition{ + ShortPosition{ + Line: 42, + Column: 91, + }, + }, + } + for _, decl := range f.Decls { + if fdecl, ok := decl.(*ast.FuncDecl); ok { + ast.Walk(&Visitor{expectation, fset, t}, fdecl.Body) + } + } + for ident, positions := range expectation { + for _, position := range positions { + if !position.Visited { + t.Errorf("Position %d:%d missed for %s ident", position.Line, position.Column, ident) + } + } + } +} diff --git a/misc/cgo/errors/badsym_test.go b/misc/cgo/errors/badsym_test.go index b2701bf922e..fc687567bf7 100644 --- a/misc/cgo/errors/badsym_test.go +++ b/misc/cgo/errors/badsym_test.go @@ -6,7 +6,6 @@ package errorstest import ( "bytes" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -55,7 +54,7 @@ func TestBadSymbol(t *testing.T) { makeFile := func(mdir, base, source string) string { ret := filepath.Join(mdir, base) - if err := ioutil.WriteFile(ret, []byte(source), 0644); err != nil { + if err := os.WriteFile(ret, []byte(source), 0644); err != nil { t.Fatal(err) } return ret @@ -100,7 +99,7 @@ func TestBadSymbol(t *testing.T) { // _cgo_import.go. rewrite := func(from, to string) { - obj, err := ioutil.ReadFile(from) + obj, err := os.ReadFile(from) if err != nil { t.Fatal(err) } @@ -115,7 +114,7 @@ func TestBadSymbol(t *testing.T) { obj = bytes.ReplaceAll(obj, []byte(magicInput), []byte(magicReplace)) - if err := ioutil.WriteFile(to, obj, 0644); err != nil { + if err := os.WriteFile(to, obj, 0644); err != nil { t.Fatal(err) } } diff --git a/misc/cgo/errors/errors_test.go b/misc/cgo/errors/errors_test.go index 1bdf843451d..a077b594786 100644 --- a/misc/cgo/errors/errors_test.go +++ b/misc/cgo/errors/errors_test.go @@ -7,7 +7,6 @@ package errorstest import ( "bytes" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -25,7 +24,7 @@ func check(t *testing.T, file string) { t.Run(file, func(t *testing.T) { t.Parallel() - contents, err := ioutil.ReadFile(path(file)) + contents, err := os.ReadFile(path(file)) if err != nil { t.Fatal(err) } @@ -56,7 +55,7 @@ func check(t *testing.T, file string) { } func expect(t *testing.T, file string, errors []*regexp.Regexp) { - dir, err := ioutil.TempDir("", filepath.Base(t.Name())) + dir, err := os.MkdirTemp("", filepath.Base(t.Name())) if err != nil { t.Fatal(err) } diff --git a/misc/cgo/errors/ptr_test.go b/misc/cgo/errors/ptr_test.go index 4a46b6023bb..0f39dc8e547 100644 --- a/misc/cgo/errors/ptr_test.go +++ b/misc/cgo/errors/ptr_test.go @@ -10,7 +10,6 @@ import ( "bytes" "flag" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -463,7 +462,7 @@ func buildPtrTests(t *testing.T) (dir, exe string) { gopath = *tmp dir = "" } else { - d, err := ioutil.TempDir("", filepath.Base(t.Name())) + d, err := os.MkdirTemp("", filepath.Base(t.Name())) if err != nil { t.Fatal(err) } @@ -475,7 +474,7 @@ func buildPtrTests(t *testing.T) (dir, exe string) { if err := os.MkdirAll(src, 0777); err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(filepath.Join(src, "go.mod"), []byte("module ptrtest"), 0666); err != nil { + if err := os.WriteFile(filepath.Join(src, "go.mod"), []byte("module ptrtest"), 0666); err != nil { t.Fatal(err) } @@ -535,10 +534,10 @@ func buildPtrTests(t *testing.T) (dir, exe string) { fmt.Fprintf(&cgo1, "}\n\n") fmt.Fprintf(&cgo1, "%s\n", ptrTestMain) - if err := ioutil.WriteFile(filepath.Join(src, "cgo1.go"), cgo1.Bytes(), 0666); err != nil { + if err := os.WriteFile(filepath.Join(src, "cgo1.go"), cgo1.Bytes(), 0666); err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(filepath.Join(src, "cgo2.go"), cgo2.Bytes(), 0666); err != nil { + if err := os.WriteFile(filepath.Join(src, "cgo2.go"), cgo2.Bytes(), 0666); err != nil { t.Fatal(err) } diff --git a/misc/cgo/errors/testdata/issue42580.go b/misc/cgo/errors/testdata/issue42580.go new file mode 100644 index 00000000000..aba80dfebad --- /dev/null +++ b/misc/cgo/errors/testdata/issue42580.go @@ -0,0 +1,44 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 42580: cmd/cgo: shifting identifier position in ast + +package cgotest + +// typedef int (*intFunc) (); +// +// char* strarg = ""; +// +// int func_with_char(char* arg, void* dummy) +// {return 5;} +// +// int* get_arr(char* arg, void* dummy) +// {return NULL;} +import "C" +import "unsafe" + +// Test variables +var ( + checkedPointer = []byte{1} + doublePointerChecked = []byte{1} + singleInnerPointerChecked = []byte{1} +) + +// This test checks the positions of variable identifiers. +// Changing the positions of the test variables idents after this point will break the test. + +func TestSingleArgumentCast() C.int { + retcode := C.func_with_char((*C.char)(unsafe.Pointer(&checkedPointer[0])), unsafe.Pointer(C.strarg)) + return retcode +} + +func TestSingleArgumentCastRecFuncAsSimpleArg() C.int { + retcode := C.func_with_char((*C.char)(unsafe.Pointer(C.get_arr((*C.char)(unsafe.Pointer(&singleInnerPointerChecked[0])), unsafe.Pointer(C.strarg)))), nil) + return retcode +} + +func TestSingleArgumentCastRecFunc() C.int { + retcode := C.func_with_char((*C.char)(unsafe.Pointer(C.get_arr((*C.char)(unsafe.Pointer(&doublePointerChecked[0])), unsafe.Pointer(C.strarg)))), unsafe.Pointer(C.strarg)) + return retcode +} diff --git a/misc/cgo/life/life_test.go b/misc/cgo/life/life_test.go index 3c95d87d8ad..0becb262b43 100644 --- a/misc/cgo/life/life_test.go +++ b/misc/cgo/life/life_test.go @@ -6,7 +6,6 @@ package life_test import ( "bytes" - "io/ioutil" "log" "os" "os/exec" @@ -21,7 +20,7 @@ func TestMain(m *testing.M) { } func testMain(m *testing.M) int { - GOPATH, err := ioutil.TempDir("", "cgolife") + GOPATH, err := os.MkdirTemp("", "cgolife") if err != nil { log.Panic(err) } @@ -38,7 +37,7 @@ func testMain(m *testing.M) int { log.Panic(err) } os.Setenv("PWD", modRoot) - if err := ioutil.WriteFile("go.mod", []byte("module cgolife\n"), 0666); err != nil { + if err := os.WriteFile("go.mod", []byte("module cgolife\n"), 0666); err != nil { log.Panic(err) } diff --git a/misc/cgo/stdio/stdio_test.go b/misc/cgo/stdio/stdio_test.go index ab5d328f676..675418f98d0 100644 --- a/misc/cgo/stdio/stdio_test.go +++ b/misc/cgo/stdio/stdio_test.go @@ -6,7 +6,6 @@ package stdio_test import ( "bytes" - "io/ioutil" "log" "os" "os/exec" @@ -21,7 +20,7 @@ func TestMain(m *testing.M) { } func testMain(m *testing.M) int { - GOPATH, err := ioutil.TempDir("", "cgostdio") + GOPATH, err := os.MkdirTemp("", "cgostdio") if err != nil { log.Panic(err) } @@ -38,7 +37,7 @@ func testMain(m *testing.M) int { log.Panic(err) } os.Setenv("PWD", modRoot) - if err := ioutil.WriteFile("go.mod", []byte("module cgostdio\n"), 0666); err != nil { + if err := os.WriteFile("go.mod", []byte("module cgostdio\n"), 0666); err != nil { log.Panic(err) } diff --git a/misc/cgo/test/cgo_test.go b/misc/cgo/test/cgo_test.go index f7a76d047bd..143f23f0e0c 100644 --- a/misc/cgo/test/cgo_test.go +++ b/misc/cgo/test/cgo_test.go @@ -59,6 +59,7 @@ func Test28896(t *testing.T) { test28896(t) } func Test30065(t *testing.T) { test30065(t) } func Test32579(t *testing.T) { test32579(t) } func Test31891(t *testing.T) { test31891(t) } +func Test45451(t *testing.T) { test45451(t) } func TestAlign(t *testing.T) { testAlign(t) } func TestAtol(t *testing.T) { testAtol(t) } func TestBlocking(t *testing.T) { testBlocking(t) } @@ -80,6 +81,7 @@ func TestNamedEnum(t *testing.T) { testNamedEnum(t) } func TestCastToEnum(t *testing.T) { testCastToEnum(t) } func TestErrno(t *testing.T) { testErrno(t) } func TestFpVar(t *testing.T) { testFpVar(t) } +func TestHandle(t *testing.T) { testHandle(t) } func TestHelpers(t *testing.T) { testHelpers(t) } func TestLibgcc(t *testing.T) { testLibgcc(t) } func TestMultipleAssign(t *testing.T) { testMultipleAssign(t) } diff --git a/misc/cgo/test/issue1435.go b/misc/cgo/test/issue1435.go index a1c7cacde73..cf34ce8db6c 100644 --- a/misc/cgo/test/issue1435.go +++ b/misc/cgo/test/issue1435.go @@ -8,7 +8,7 @@ package cgotest import ( "fmt" - "io/ioutil" + "os" "strings" "syscall" "testing" @@ -64,7 +64,7 @@ import "C" func compareStatus(filter, expect string) error { expected := filter + expect pid := syscall.Getpid() - fs, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/task", pid)) + fs, err := os.ReadDir(fmt.Sprintf("/proc/%d/task", pid)) if err != nil { return fmt.Errorf("unable to find %d tasks: %v", pid, err) } @@ -72,7 +72,7 @@ func compareStatus(filter, expect string) error { foundAThread := false for _, f := range fs { tf := fmt.Sprintf("/proc/%s/status", f.Name()) - d, err := ioutil.ReadFile(tf) + d, err := os.ReadFile(tf) if err != nil { // There are a surprising number of ways this // can error out on linux. We've seen all of diff --git a/misc/cgo/test/issue6997_linux.go b/misc/cgo/test/issue6997_linux.go index 0c98ea0794e..f19afb8b7ad 100644 --- a/misc/cgo/test/issue6997_linux.go +++ b/misc/cgo/test/issue6997_linux.go @@ -5,7 +5,7 @@ // +build !android // Test that pthread_cancel works as expected -// (NPTL uses SIGRTMIN to implement thread cancelation) +// (NPTL uses SIGRTMIN to implement thread cancellation) // See https://golang.org/issue/6997 package cgotest @@ -17,8 +17,10 @@ extern int CancelThread(); */ import "C" -import "testing" -import "time" +import ( + "testing" + "time" +) func test6997(t *testing.T) { r := C.StartThread() diff --git a/misc/cgo/test/issue8148.c b/misc/cgo/test/issue8148.c new file mode 100644 index 00000000000..927b4346cbe --- /dev/null +++ b/misc/cgo/test/issue8148.c @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "_cgo_export.h" + +int get8148(void) { + T t; + t.i = 42; + return issue8148Callback(&t); +} diff --git a/misc/cgo/test/issue8148.go b/misc/cgo/test/issue8148.go index f704788aef8..aee9003d507 100644 --- a/misc/cgo/test/issue8148.go +++ b/misc/cgo/test/issue8148.go @@ -10,14 +10,7 @@ package cgotest /* typedef struct { int i; } T; - -int issue8148Callback(T*); - -static int get() { - T t; - t.i = 42; - return issue8148Callback(&t); -} +int get8148(void); */ import "C" @@ -27,5 +20,5 @@ func issue8148Callback(t *C.T) C.int { } func Issue8148() int { - return int(C.get()) + return int(C.get8148()) } diff --git a/misc/cgo/test/pkg_test.go b/misc/cgo/test/pkg_test.go index 94abaa03e8d..14013a4cd96 100644 --- a/misc/cgo/test/pkg_test.go +++ b/misc/cgo/test/pkg_test.go @@ -5,7 +5,6 @@ package cgotest import ( - "io/ioutil" "os" "os/exec" "path/filepath" @@ -37,7 +36,7 @@ func TestCrossPackageTests(t *testing.T) { } } - GOPATH, err := ioutil.TempDir("", "cgotest") + GOPATH, err := os.MkdirTemp("", "cgotest") if err != nil { t.Fatal(err) } @@ -47,7 +46,7 @@ func TestCrossPackageTests(t *testing.T) { if err := overlayDir(modRoot, "testdata"); err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(filepath.Join(modRoot, "go.mod"), []byte("module cgotest\n"), 0666); err != nil { + if err := os.WriteFile(filepath.Join(modRoot, "go.mod"), []byte("module cgotest\n"), 0666); err != nil { t.Fatal(err) } diff --git a/misc/cgo/test/setgid_linux.go b/misc/cgo/test/setgid_linux.go index 6773f94d3d6..7c64946cb34 100644 --- a/misc/cgo/test/setgid_linux.go +++ b/misc/cgo/test/setgid_linux.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Test that setgid does not hang on GNU/Linux. +// Test that setgid does not hang on Linux. // See https://golang.org/issue/3871 for details. package cgotest diff --git a/misc/cgo/test/test.go b/misc/cgo/test/test.go index 65823b1ca0e..3b8f548b13d 100644 --- a/misc/cgo/test/test.go +++ b/misc/cgo/test/test.go @@ -899,6 +899,10 @@ static uint16_t issue31093F(uint16_t v) { return v; } // issue 32579 typedef struct S32579 { unsigned char data[1]; } S32579; +// issue 37033, cgo.Handle +extern void GoFunc37033(uintptr_t handle); +void cFunc37033(uintptr_t handle) { GoFunc37033(handle); } + // issue 38649 // Test that #define'd type aliases work. #define netbsd_gid unsigned int @@ -908,6 +912,9 @@ typedef struct S32579 { unsigned char data[1]; } S32579; enum Enum40494 { X_40494 }; union Union40494 { int x; }; void issue40494(enum Enum40494 e, union Union40494* up) {} + +// Issue 45451, bad handling of go:notinheap types. +typedef struct issue45451Undefined issue45451; */ import "C" @@ -920,6 +927,7 @@ import ( "os/signal" "reflect" "runtime" + "runtime/cgo" "sync" "syscall" "testing" @@ -2230,6 +2238,23 @@ func test32579(t *testing.T) { } } +// issue 37033, check if cgo.Handle works properly + +func testHandle(t *testing.T) { + ch := make(chan int) + + for i := 0; i < 42; i++ { + h := cgo.NewHandle(ch) + go func() { + C.cFunc37033(C.uintptr_t(h)) + }() + if v := <-ch; issue37033 != v { + t.Fatalf("unexpected receiving value: got %d, want %d", v, issue37033) + } + h.Delete() + } +} + // issue 38649 var issue38649 C.netbsd_gid = 42 @@ -2244,3 +2269,19 @@ var issue39877 *C.void = nil func Issue40494() { C.issue40494(C.enum_Enum40494(C.X_40494), (*C.union_Union40494)(nil)) } + +// Issue 45451. +func test45451(t *testing.T) { + var u *C.issue45451 + typ := reflect.ValueOf(u).Type().Elem() + + // The type is undefined in C so allocating it should panic. + defer func() { + if r := recover(); r == nil { + t.Error("expected panic") + } + }() + + _ = reflect.New(typ) + t.Errorf("reflect.New(%v) should have panicked", typ) +} diff --git a/misc/cgo/test/testdata/issue9400/asm_386.s b/misc/cgo/test/testdata/issue9400/asm_386.s index 7f158b5c39d..96b8b60c10f 100644 --- a/misc/cgo/test/testdata/issue9400/asm_386.s +++ b/misc/cgo/test/testdata/issue9400/asm_386.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/misc/cgo/test/testdata/issue9400/asm_amd64x.s b/misc/cgo/test/testdata/issue9400/asm_amd64x.s index 48b86190a59..99509bce5e1 100644 --- a/misc/cgo/test/testdata/issue9400/asm_amd64x.s +++ b/misc/cgo/test/testdata/issue9400/asm_amd64x.s @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build amd64 amd64p32 -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/misc/cgo/test/testdata/issue9400/asm_arm.s b/misc/cgo/test/testdata/issue9400/asm_arm.s index 96c278520f3..cc92856c2ff 100644 --- a/misc/cgo/test/testdata/issue9400/asm_arm.s +++ b/misc/cgo/test/testdata/issue9400/asm_arm.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/misc/cgo/test/testdata/issue9400/asm_arm64.s b/misc/cgo/test/testdata/issue9400/asm_arm64.s index 2ebbfcca3b6..2565793f9ab 100644 --- a/misc/cgo/test/testdata/issue9400/asm_arm64.s +++ b/misc/cgo/test/testdata/issue9400/asm_arm64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/misc/cgo/test/testdata/issue9400/asm_mips64x.s b/misc/cgo/test/testdata/issue9400/asm_mips64x.s index 63dc90605e6..693231ddfe1 100644 --- a/misc/cgo/test/testdata/issue9400/asm_mips64x.s +++ b/misc/cgo/test/testdata/issue9400/asm_mips64x.s @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build mips64 mips64le -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/misc/cgo/test/testdata/issue9400/asm_mipsx.s b/misc/cgo/test/testdata/issue9400/asm_mipsx.s index 7a927351942..63261bbf9d0 100644 --- a/misc/cgo/test/testdata/issue9400/asm_mipsx.s +++ b/misc/cgo/test/testdata/issue9400/asm_mipsx.s @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build mips mipsle -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/misc/cgo/test/testdata/issue9400/asm_ppc64x.s b/misc/cgo/test/testdata/issue9400/asm_ppc64x.s index c88ec3b21e7..b5613fb6ec7 100644 --- a/misc/cgo/test/testdata/issue9400/asm_ppc64x.s +++ b/misc/cgo/test/testdata/issue9400/asm_ppc64x.s @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build ppc64 ppc64le -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/misc/cgo/test/testdata/issue9400/asm_riscv64.s b/misc/cgo/test/testdata/issue9400/asm_riscv64.s index 20fcc0066d6..244dadac350 100644 --- a/misc/cgo/test/testdata/issue9400/asm_riscv64.s +++ b/misc/cgo/test/testdata/issue9400/asm_riscv64.s @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build riscv64 -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/misc/cgo/test/testdata/issue9400/asm_s390x.s b/misc/cgo/test/testdata/issue9400/asm_s390x.s index fc9ad724c15..4856492958b 100644 --- a/misc/cgo/test/testdata/issue9400/asm_s390x.s +++ b/misc/cgo/test/testdata/issue9400/asm_s390x.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/misc/cgo/test/testx.c b/misc/cgo/test/testx.c new file mode 100644 index 00000000000..1258e326a41 --- /dev/null +++ b/misc/cgo/test/testx.c @@ -0,0 +1,24 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "_cgo_export.h" + +void lockOSThreadC(void) { + lockOSThreadCallback(); +} + +void issue7978c(uint32_t *sync) { + while(__atomic_load_n(sync, __ATOMIC_SEQ_CST) != 0) + ; + __atomic_add_fetch(sync, 1, __ATOMIC_SEQ_CST); + while(__atomic_load_n(sync, __ATOMIC_SEQ_CST) != 2) + ; + issue7978cb(); + __atomic_add_fetch(sync, 1, __ATOMIC_SEQ_CST); + while(__atomic_load_n(sync, __ATOMIC_SEQ_CST) != 6) + ; +} + +void f7665(void) { +} diff --git a/misc/cgo/test/testx.go b/misc/cgo/test/testx.go index 2b2e69ec00f..823c3e13d29 100644 --- a/misc/cgo/test/testx.go +++ b/misc/cgo/test/testx.go @@ -12,6 +12,7 @@ package cgotest import ( "runtime" + "runtime/cgo" "runtime/debug" "strings" "sync" @@ -26,7 +27,6 @@ import ( extern void doAdd(int, int); // issue 1328 -extern void BackIntoGo(void); void IntoC(void); // issue 1560 @@ -38,11 +38,7 @@ long long mysleep(int seconds); long long twoSleep(int); // issue 3775 -void lockOSThreadCallback(void); -inline static void lockOSThreadC(void) -{ - lockOSThreadCallback(); -} +void lockOSThreadC(void); int usleep(unsigned usec); // issue 4054 part 2 - part 1 in test.go @@ -81,21 +77,9 @@ extern void f7665(void); #include -void issue7978cb(void); - // use ugly atomic variable sync since that doesn't require calling back into // Go code or OS dependencies -static void issue7978c(uint32_t *sync) { - while(__atomic_load_n(sync, __ATOMIC_SEQ_CST) != 0) - ; - __atomic_add_fetch(sync, 1, __ATOMIC_SEQ_CST); - while(__atomic_load_n(sync, __ATOMIC_SEQ_CST) != 2) - ; - issue7978cb(); - __atomic_add_fetch(sync, 1, __ATOMIC_SEQ_CST); - while(__atomic_load_n(sync, __ATOMIC_SEQ_CST) != 6) - ; -} +void issue7978c(uint32_t *sync); // issue 8331 part 2 - part 1 in test.go // A typedef of an unnamed struct is the same struct when @@ -428,9 +412,6 @@ func test6907Go(t *testing.T) { // issue 7665 -//export f7665 -func f7665() {} - var bad7665 unsafe.Pointer = C.f7665 var good7665 uintptr = uintptr(C.f7665) @@ -558,6 +539,17 @@ func test31891(t *testing.T) { C.callIssue31891() } +// issue 37033, check if cgo.Handle works properly + +var issue37033 = 42 + +//export GoFunc37033 +func GoFunc37033(handle C.uintptr_t) { + h := cgo.Handle(handle) + ch := h.Value().(chan int) + ch <- issue37033 +} + // issue 38408 // A typedef pointer can be used as the element type. // No runtime test; just make sure it compiles. diff --git a/misc/cgo/testcarchive/carchive_test.go b/misc/cgo/testcarchive/carchive_test.go index 6a5adf79ca0..55be3c5f707 100644 --- a/misc/cgo/testcarchive/carchive_test.go +++ b/misc/cgo/testcarchive/carchive_test.go @@ -10,7 +10,6 @@ import ( "debug/elf" "flag" "fmt" - "io/ioutil" "log" "os" "os/exec" @@ -53,7 +52,7 @@ func testMain(m *testing.M) int { // We need a writable GOPATH in which to run the tests. // Construct one in a temporary directory. var err error - GOPATH, err = ioutil.TempDir("", "carchive_test") + GOPATH, err = os.MkdirTemp("", "carchive_test") if err != nil { log.Panic(err) } @@ -74,7 +73,7 @@ func testMain(m *testing.M) int { log.Panic(err) } os.Setenv("PWD", modRoot) - if err := ioutil.WriteFile("go.mod", []byte("module testcarchive\n"), 0666); err != nil { + if err := os.WriteFile("go.mod", []byte("module testcarchive\n"), 0666); err != nil { log.Panic(err) } @@ -176,7 +175,7 @@ func genHeader(t *testing.T, header, dir string) { // The 'cgo' command generates a number of additional artifacts, // but we're only interested in the header. // Shunt the rest of the outputs to a temporary directory. - objDir, err := ioutil.TempDir(GOPATH, "_obj") + objDir, err := os.MkdirTemp(GOPATH, "_obj") if err != nil { t.Fatal(err) } @@ -252,7 +251,7 @@ var badLineRegexp = regexp.MustCompile(`(?m)^#line [0-9]+ "/.*$`) // the user and make the files change based on details of the location // of GOPATH. func checkLineComments(t *testing.T, hdrname string) { - hdr, err := ioutil.ReadFile(hdrname) + hdr, err := os.ReadFile(hdrname) if err != nil { if !os.IsNotExist(err) { t.Error(err) @@ -618,7 +617,7 @@ func TestExtar(t *testing.T) { t.Fatal(err) } s := strings.Replace(testar, "PWD", dir, 1) - if err := ioutil.WriteFile("testar", []byte(s), 0777); err != nil { + if err := os.WriteFile("testar", []byte(s), 0777); err != nil { t.Fatal(err) } @@ -776,7 +775,7 @@ func TestSIGPROF(t *testing.T) { // tool with -buildmode=c-archive, it passes -shared to the compiler, // so we override that. The go tool doesn't work this way, but Bazel // will likely do it in the future. And it ought to work. This test -// was added because at one time it did not work on PPC GNU/Linux. +// was added because at one time it did not work on PPC Linux. func TestCompileWithoutShared(t *testing.T) { // For simplicity, reuse the signal forwarding test. checkSignalForwardingTest(t) diff --git a/misc/cgo/testcarchive/testdata/libgo6/sigprof.go b/misc/cgo/testcarchive/testdata/libgo6/sigprof.go index 4cb05dc6178..31527c59af1 100644 --- a/misc/cgo/testcarchive/testdata/libgo6/sigprof.go +++ b/misc/cgo/testcarchive/testdata/libgo6/sigprof.go @@ -5,7 +5,7 @@ package main import ( - "io/ioutil" + "io" "runtime/pprof" ) @@ -13,7 +13,7 @@ import "C" //export go_start_profile func go_start_profile() { - pprof.StartCPUProfile(ioutil.Discard) + pprof.StartCPUProfile(io.Discard) } //export go_stop_profile diff --git a/misc/cgo/testcarchive/testdata/main_unix.c b/misc/cgo/testcarchive/testdata/main_unix.c index b23ac1c2428..bd00f9d2339 100644 --- a/misc/cgo/testcarchive/testdata/main_unix.c +++ b/misc/cgo/testcarchive/testdata/main_unix.c @@ -36,7 +36,7 @@ int install_handler() { return 2; } // gccgo does not set SA_ONSTACK for SIGSEGV. - if (getenv("GCCGO") == "" && (osa.sa_flags&SA_ONSTACK) == 0) { + if (getenv("GCCGO") == NULL && (osa.sa_flags&SA_ONSTACK) == 0) { fprintf(stderr, "Go runtime did not install signal handler\n"); return 2; } diff --git a/misc/cgo/testcshared/cshared_test.go b/misc/cgo/testcshared/cshared_test.go index 3a4886cf30a..90d8c365e6d 100644 --- a/misc/cgo/testcshared/cshared_test.go +++ b/misc/cgo/testcshared/cshared_test.go @@ -11,7 +11,6 @@ import ( "encoding/binary" "flag" "fmt" - "io/ioutil" "log" "os" "os/exec" @@ -125,7 +124,7 @@ func testMain(m *testing.M) int { // Copy testdata into GOPATH/src/testcshared, along with a go.mod file // declaring the same path. - GOPATH, err := ioutil.TempDir("", "cshared_test") + GOPATH, err := os.MkdirTemp("", "cshared_test") if err != nil { log.Panic(err) } @@ -140,7 +139,7 @@ func testMain(m *testing.M) int { log.Panic(err) } os.Setenv("PWD", modRoot) - if err := ioutil.WriteFile("go.mod", []byte("module testcshared\n"), 0666); err != nil { + if err := os.WriteFile("go.mod", []byte("module testcshared\n"), 0666); err != nil { log.Panic(err) } @@ -260,7 +259,7 @@ func createHeaders() error { // The 'cgo' command generates a number of additional artifacts, // but we're only interested in the header. // Shunt the rest of the outputs to a temporary directory. - objDir, err := ioutil.TempDir("", "testcshared_obj") + objDir, err := os.MkdirTemp("", "testcshared_obj") if err != nil { return err } @@ -381,7 +380,7 @@ func main() { srcfile := filepath.Join(tmpdir, "test.go") objfile := filepath.Join(tmpdir, "test.dll") - if err := ioutil.WriteFile(srcfile, []byte(prog), 0666); err != nil { + if err := os.WriteFile(srcfile, []byte(prog), 0666); err != nil { t.Fatal(err) } argv := []string{"build", "-buildmode=c-shared"} @@ -643,7 +642,7 @@ func TestPIE(t *testing.T) { // Test that installing a second time recreates the header file. func TestCachedInstall(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "cshared") + tmpdir, err := os.MkdirTemp("", "cshared") if err != nil { t.Fatal(err) } @@ -719,14 +718,14 @@ func TestCachedInstall(t *testing.T) { // copyFile copies src to dst. func copyFile(t *testing.T, dst, src string) { t.Helper() - data, err := ioutil.ReadFile(src) + data, err := os.ReadFile(src) if err != nil { t.Fatal(err) } if err := os.MkdirAll(filepath.Dir(dst), 0777); err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(dst, data, 0666); err != nil { + if err := os.WriteFile(dst, data, 0666); err != nil { t.Fatal(err) } } @@ -743,7 +742,7 @@ func TestGo2C2Go(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "cshared-TestGo2C2Go") + tmpdir, err := os.MkdirTemp("", "cshared-TestGo2C2Go") if err != nil { t.Fatal(err) } diff --git a/misc/cgo/testgodefs/testgodefs_test.go b/misc/cgo/testgodefs/testgodefs_test.go index 4c2312c1c89..aae34043605 100644 --- a/misc/cgo/testgodefs/testgodefs_test.go +++ b/misc/cgo/testgodefs/testgodefs_test.go @@ -6,7 +6,6 @@ package testgodefs import ( "bytes" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -34,7 +33,7 @@ func TestGoDefs(t *testing.T) { t.Fatal(err) } - gopath, err := ioutil.TempDir("", "testgodefs-gopath") + gopath, err := os.MkdirTemp("", "testgodefs-gopath") if err != nil { t.Fatal(err) } @@ -58,20 +57,20 @@ func TestGoDefs(t *testing.T) { t.Fatalf("%s: %v\n%s", strings.Join(cmd.Args, " "), err, cmd.Stderr) } - if err := ioutil.WriteFile(filepath.Join(dir, fp+"_defs.go"), out, 0644); err != nil { + if err := os.WriteFile(filepath.Join(dir, fp+"_defs.go"), out, 0644); err != nil { t.Fatal(err) } } - main, err := ioutil.ReadFile(filepath.Join("testdata", "main.go")) + main, err := os.ReadFile(filepath.Join("testdata", "main.go")) if err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(filepath.Join(dir, "main.go"), main, 0644); err != nil { + if err := os.WriteFile(filepath.Join(dir, "main.go"), main, 0644); err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(filepath.Join(dir, "go.mod"), []byte("module testgodefs\ngo 1.14\n"), 0644); err != nil { + if err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module testgodefs\ngo 1.14\n"), 0644); err != nil { t.Fatal(err) } diff --git a/misc/cgo/testplugin/plugin_test.go b/misc/cgo/testplugin/plugin_test.go index 9055dbda044..28a8c669c07 100644 --- a/misc/cgo/testplugin/plugin_test.go +++ b/misc/cgo/testplugin/plugin_test.go @@ -9,7 +9,6 @@ import ( "context" "flag" "fmt" - "io/ioutil" "log" "os" "os/exec" @@ -31,15 +30,28 @@ func TestMain(m *testing.M) { os.Exit(testMain(m)) } +// tmpDir is used to cleanup logged commands -- s/tmpDir/$TMPDIR/ +var tmpDir string + +// prettyPrintf prints lines with tmpDir sanitized. +func prettyPrintf(format string, args ...interface{}) { + s := fmt.Sprintf(format, args...) + if tmpDir != "" { + s = strings.ReplaceAll(s, tmpDir, "$TMPDIR") + } + fmt.Print(s) +} + func testMain(m *testing.M) int { // Copy testdata into GOPATH/src/testplugin, along with a go.mod file // declaring the same path. - GOPATH, err := ioutil.TempDir("", "plugin_test") + GOPATH, err := os.MkdirTemp("", "plugin_test") if err != nil { log.Panic(err) } defer os.RemoveAll(GOPATH) + tmpDir = GOPATH modRoot := filepath.Join(GOPATH, "src", "testplugin") altRoot := filepath.Join(GOPATH, "alt", "src", "testplugin") @@ -50,14 +62,20 @@ func testMain(m *testing.M) int { if err := overlayDir(dstRoot, srcRoot); err != nil { log.Panic(err) } - if err := ioutil.WriteFile(filepath.Join(dstRoot, "go.mod"), []byte("module testplugin\n"), 0666); err != nil { + prettyPrintf("mkdir -p %s\n", dstRoot) + prettyPrintf("rsync -a %s/ %s\n", srcRoot, dstRoot) + + if err := os.WriteFile(filepath.Join(dstRoot, "go.mod"), []byte("module testplugin\n"), 0666); err != nil { log.Panic(err) } + prettyPrintf("echo 'module testplugin' > %s/go.mod\n", dstRoot) } os.Setenv("GOPATH", filepath.Join(GOPATH, "alt")) if err := os.Chdir(altRoot); err != nil { log.Panic(err) + } else { + prettyPrintf("cd %s\n", altRoot) } os.Setenv("PWD", altRoot) goCmd(nil, "build", "-buildmode=plugin", "-o", filepath.Join(modRoot, "plugin-mismatch.so"), "./plugin-mismatch") @@ -65,6 +83,8 @@ func testMain(m *testing.M) int { os.Setenv("GOPATH", GOPATH) if err := os.Chdir(modRoot); err != nil { log.Panic(err) + } else { + prettyPrintf("cd %s\n", modRoot) } os.Setenv("PWD", modRoot) @@ -72,13 +92,14 @@ func testMain(m *testing.M) int { goCmd(nil, "build", "-buildmode=plugin", "./plugin1") goCmd(nil, "build", "-buildmode=plugin", "./plugin2") - so, err := ioutil.ReadFile("plugin2.so") + so, err := os.ReadFile("plugin2.so") if err != nil { log.Panic(err) } - if err := ioutil.WriteFile("plugin2-dup.so", so, 0444); err != nil { + if err := os.WriteFile("plugin2-dup.so", so, 0444); err != nil { log.Panic(err) } + prettyPrintf("cp plugin2.so plugin2-dup.so\n") goCmd(nil, "build", "-buildmode=plugin", "-o=sub/plugin1.so", "./sub/plugin1") goCmd(nil, "build", "-buildmode=plugin", "-o=unnamed1.so", "./unnamed1/main.go") @@ -95,8 +116,53 @@ func goCmd(t *testing.T, op string, args ...string) { run(t, "go", append([]string{op, "-gcflags", gcflags}, args...)...) } +// escape converts a string to something suitable for a shell command line. +func escape(s string) string { + s = strings.Replace(s, "\\", "\\\\", -1) + s = strings.Replace(s, "'", "\\'", -1) + // Conservative guess at characters that will force quoting + if s == "" || strings.ContainsAny(s, "\\ ;#*&$~?!|[]()<>{}`") { + s = "'" + s + "'" + } + return s +} + +// asCommandLine renders cmd as something that could be copy-and-pasted into a command line +func asCommandLine(cwd string, cmd *exec.Cmd) string { + s := "(" + if cmd.Dir != "" && cmd.Dir != cwd { + s += "cd" + escape(cmd.Dir) + ";" + } + for _, e := range cmd.Env { + if !strings.HasPrefix(e, "PATH=") && + !strings.HasPrefix(e, "HOME=") && + !strings.HasPrefix(e, "USER=") && + !strings.HasPrefix(e, "SHELL=") { + s += " " + s += escape(e) + } + } + // These EVs are relevant to this test. + for _, e := range os.Environ() { + if strings.HasPrefix(e, "PWD=") || + strings.HasPrefix(e, "GOPATH=") || + strings.HasPrefix(e, "LD_LIBRARY_PATH=") { + s += " " + s += escape(e) + } + } + for _, a := range cmd.Args { + s += " " + s += escape(a) + } + s += " )" + return s +} + func run(t *testing.T, bin string, args ...string) string { cmd := exec.Command(bin, args...) + cmdLine := asCommandLine(".", cmd) + prettyPrintf("%s\n", cmdLine) cmd.Stderr = new(strings.Builder) out, err := cmd.Output() if err != nil { @@ -201,12 +267,18 @@ func TestMethod(t *testing.T) { // Exported symbol's method must be live. goCmd(t, "build", "-buildmode=plugin", "-o", "plugin.so", "./method/plugin.go") goCmd(t, "build", "-o", "method.exe", "./method/main.go") - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cmd := exec.CommandContext(ctx, "./method.exe") - out, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("%s: %v\n%s", strings.Join(cmd.Args, " "), err, out) - } + run(t, "./method.exe") +} + +func TestMethod2(t *testing.T) { + goCmd(t, "build", "-buildmode=plugin", "-o", "method2.so", "./method2/plugin.go") + goCmd(t, "build", "-o", "method2.exe", "./method2/main.go") + run(t, "./method2.exe") +} + +func TestIssue44956(t *testing.T) { + goCmd(t, "build", "-buildmode=plugin", "-o", "issue44956p1.so", "./issue44956/plugin1.go") + goCmd(t, "build", "-buildmode=plugin", "-o", "issue44956p2.so", "./issue44956/plugin2.go") + goCmd(t, "build", "-o", "issue44956.exe", "./issue44956/main.go") + run(t, "./issue44956.exe") } diff --git a/misc/cgo/testplugin/testdata/issue44956/base/base.go b/misc/cgo/testplugin/testdata/issue44956/base/base.go new file mode 100644 index 00000000000..609aa0dff4e --- /dev/null +++ b/misc/cgo/testplugin/testdata/issue44956/base/base.go @@ -0,0 +1,7 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base + +var X = &map[int]int{123: 456} diff --git a/misc/cgo/testplugin/testdata/issue44956/main.go b/misc/cgo/testplugin/testdata/issue44956/main.go new file mode 100644 index 00000000000..287a60585e0 --- /dev/null +++ b/misc/cgo/testplugin/testdata/issue44956/main.go @@ -0,0 +1,47 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 44956: writable static temp is not exported correctly. +// In the test below, package base is +// +// X = &map{...} +// +// which compiles to +// +// X = &stmp // static +// stmp = makemap(...) // in init function +// +// plugin1 and plugin2 both import base. plugin1 doesn't use +// base.X, so that symbol is deadcoded in plugin1. +// +// plugin1 is loaded first. base.init runs at that point, which +// initialize base.stmp. +// +// plugin2 is then loaded. base.init already ran, so it doesn't run +// again. When base.stmp is not exported, plugin2's base.X points to +// its own private base.stmp, which is not initialized, fail. + +package main + +import "plugin" + +func main() { + _, err := plugin.Open("issue44956p1.so") + if err != nil { + panic("FAIL") + } + + p2, err := plugin.Open("issue44956p2.so") + if err != nil { + panic("FAIL") + } + f, err := p2.Lookup("F") + if err != nil { + panic("FAIL") + } + x := f.(func() *map[int]int)() + if x == nil || (*x)[123] != 456 { + panic("FAIL") + } +} diff --git a/misc/cgo/testplugin/testdata/issue44956/plugin1.go b/misc/cgo/testplugin/testdata/issue44956/plugin1.go new file mode 100644 index 00000000000..499fa31abf8 --- /dev/null +++ b/misc/cgo/testplugin/testdata/issue44956/plugin1.go @@ -0,0 +1,9 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import _ "testplugin/issue44956/base" + +func main() {} diff --git a/test/abi/regabipragma.go b/misc/cgo/testplugin/testdata/issue44956/plugin2.go similarity index 56% rename from test/abi/regabipragma.go rename to misc/cgo/testplugin/testdata/issue44956/plugin2.go index e7ecd58fc89..a73542ca716 100644 --- a/test/abi/regabipragma.go +++ b/misc/cgo/testplugin/testdata/issue44956/plugin2.go @@ -1,10 +1,11 @@ -// runindir -gcflags=-c=1 -// +build !windows - // Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// TODO May delete or adapt this test once regabi is the default +package main -package ignore +import "testplugin/issue44956/base" + +func F() *map[int]int { return base.X } + +func main() {} diff --git a/misc/cgo/testplugin/testdata/method2/main.go b/misc/cgo/testplugin/testdata/method2/main.go new file mode 100644 index 00000000000..89afbda3d47 --- /dev/null +++ b/misc/cgo/testplugin/testdata/method2/main.go @@ -0,0 +1,32 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// A type can be passed to a plugin and converted to interface +// there. So its methods need to be live. + +package main + +import ( + "plugin" + + "testplugin/method2/p" +) + +var t p.T + +type I interface{ M() } + +func main() { + pl, err := plugin.Open("method2.so") + if err != nil { + panic(err) + } + + f, err := pl.Lookup("F") + if err != nil { + panic(err) + } + + f.(func(p.T) interface{})(t).(I).M() +} diff --git a/misc/cgo/testplugin/testdata/method2/p/p.go b/misc/cgo/testplugin/testdata/method2/p/p.go new file mode 100644 index 00000000000..acb526acec9 --- /dev/null +++ b/misc/cgo/testplugin/testdata/method2/p/p.go @@ -0,0 +1,9 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type T int + +func (T) M() { println("M") } diff --git a/misc/cgo/testplugin/testdata/method2/plugin.go b/misc/cgo/testplugin/testdata/method2/plugin.go new file mode 100644 index 00000000000..6198e7648ee --- /dev/null +++ b/misc/cgo/testplugin/testdata/method2/plugin.go @@ -0,0 +1,11 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "testplugin/method2/p" + +func main() {} + +func F(t p.T) interface{} { return t } diff --git a/misc/cgo/testsanitizers/cc_test.go b/misc/cgo/testsanitizers/cc_test.go index 0192a663ddd..384b6250e1e 100644 --- a/misc/cgo/testsanitizers/cc_test.go +++ b/misc/cgo/testsanitizers/cc_test.go @@ -11,7 +11,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -36,7 +35,7 @@ func requireOvercommit(t *testing.T) { overcommit.Once.Do(func() { var out []byte - out, overcommit.err = ioutil.ReadFile("/proc/sys/vm/overcommit_memory") + out, overcommit.err = os.ReadFile("/proc/sys/vm/overcommit_memory") if overcommit.err != nil { return } @@ -313,14 +312,14 @@ int main() { `) func (c *config) checkCSanitizer() (skip bool, err error) { - dir, err := ioutil.TempDir("", c.sanitizer) + dir, err := os.MkdirTemp("", c.sanitizer) if err != nil { return false, fmt.Errorf("failed to create temp directory: %v", err) } defer os.RemoveAll(dir) src := filepath.Join(dir, "return0.c") - if err := ioutil.WriteFile(src, cMain, 0600); err != nil { + if err := os.WriteFile(src, cMain, 0600); err != nil { return false, fmt.Errorf("failed to write C source file: %v", err) } @@ -418,7 +417,7 @@ func (d *tempDir) Join(name string) string { func newTempDir(t *testing.T) *tempDir { t.Helper() - dir, err := ioutil.TempDir("", filepath.Dir(t.Name())) + dir, err := os.MkdirTemp("", filepath.Dir(t.Name())) if err != nil { t.Fatalf("Failed to create temp dir: %v", err) } @@ -440,3 +439,14 @@ func hangProneCmd(name string, arg ...string) *exec.Cmd { } return cmd } + +// mSanSupported is a copy of the function cmd/internal/sys.MSanSupported, +// because the internal pacakage can't be used here. +func mSanSupported(goos, goarch string) bool { + switch goos { + case "linux": + return goarch == "amd64" || goarch == "arm64" + default: + return false + } +} diff --git a/misc/cgo/testsanitizers/cshared_test.go b/misc/cgo/testsanitizers/cshared_test.go index 56063ea6201..8fd03715a11 100644 --- a/misc/cgo/testsanitizers/cshared_test.go +++ b/misc/cgo/testsanitizers/cshared_test.go @@ -6,7 +6,7 @@ package sanitizers_test import ( "fmt" - "io/ioutil" + "os" "strings" "testing" ) @@ -19,6 +19,12 @@ func TestShared(t *testing.T) { if err != nil { t.Fatal(err) } + + GOARCH, err := goEnv("GOARCH") + if err != nil { + t.Fatal(err) + } + libExt := "so" if GOOS == "darwin" { libExt = "dylib" @@ -41,6 +47,11 @@ func TestShared(t *testing.T) { for _, tc := range cases { tc := tc name := strings.TrimSuffix(tc.src, ".go") + //The memory sanitizer tests require support for the -msan option. + if tc.sanitizer == "memory" && !mSanSupported(GOOS, GOARCH) { + t.Logf("skipping %s test on %s/%s; -msan option is not supported.", name, GOOS, GOARCH) + continue + } t.Run(name, func(t *testing.T) { t.Parallel() config := configure(tc.sanitizer) @@ -53,7 +64,7 @@ func TestShared(t *testing.T) { mustRun(t, config.goCmd("build", "-buildmode=c-shared", "-o", lib, srcPath(tc.src))) cSrc := dir.Join("main.c") - if err := ioutil.WriteFile(cSrc, cMain, 0600); err != nil { + if err := os.WriteFile(cSrc, cMain, 0600); err != nil { t.Fatalf("failed to write C source file: %v", err) } diff --git a/misc/cgo/testsanitizers/msan_test.go b/misc/cgo/testsanitizers/msan_test.go index 5e2f9759bae..2a3494fbfc1 100644 --- a/misc/cgo/testsanitizers/msan_test.go +++ b/misc/cgo/testsanitizers/msan_test.go @@ -10,6 +10,19 @@ import ( ) func TestMSAN(t *testing.T) { + goos, err := goEnv("GOOS") + if err != nil { + t.Fatal(err) + } + goarch, err := goEnv("GOARCH") + if err != nil { + t.Fatal(err) + } + // The msan tests require support for the -msan option. + if !mSanSupported(goos, goarch) { + t.Skipf("skipping on %s/%s; -msan option is not supported.", goos, goarch) + } + t.Parallel() requireOvercommit(t) config := configure("memory") diff --git a/misc/cgo/testsanitizers/testdata/tsan9.go b/misc/cgo/testsanitizers/testdata/tsan9.go index f166d8b495a..06304be751b 100644 --- a/misc/cgo/testsanitizers/testdata/tsan9.go +++ b/misc/cgo/testsanitizers/testdata/tsan9.go @@ -44,7 +44,7 @@ void spin() { import "C" import ( - "io/ioutil" + "io" "runtime/pprof" "time" ) @@ -60,7 +60,7 @@ func goSpin() { } func main() { - pprof.StartCPUProfile(ioutil.Discard) + pprof.StartCPUProfile(io.Discard) go C.spin() goSpin() pprof.StopCPUProfile() diff --git a/misc/cgo/testshared/shared_test.go b/misc/cgo/testshared/shared_test.go index f52391c6f6c..e77f8489154 100644 --- a/misc/cgo/testshared/shared_test.go +++ b/misc/cgo/testshared/shared_test.go @@ -13,7 +13,6 @@ import ( "fmt" "go/build" "io" - "io/ioutil" "log" "os" "os/exec" @@ -90,7 +89,7 @@ func goCmd(t *testing.T, args ...string) string { // TestMain calls testMain so that the latter can use defer (TestMain exits with os.Exit). func testMain(m *testing.M) (int, error) { - workDir, err := ioutil.TempDir("", "shared_test") + workDir, err := os.MkdirTemp("", "shared_test") if err != nil { return 0, err } @@ -177,7 +176,7 @@ func cloneTestdataModule(gopath string) (string, error) { if err := overlayDir(modRoot, "testdata"); err != nil { return "", err } - if err := ioutil.WriteFile(filepath.Join(modRoot, "go.mod"), []byte("module testshared\n"), 0644); err != nil { + if err := os.WriteFile(filepath.Join(modRoot, "go.mod"), []byte("module testshared\n"), 0644); err != nil { return "", err } return modRoot, nil @@ -318,7 +317,7 @@ func TestShlibnameFiles(t *testing.T) { } for _, pkg := range pkgs { shlibnamefile := filepath.Join(gorootInstallDir, pkg+".shlibname") - contentsb, err := ioutil.ReadFile(shlibnamefile) + contentsb, err := os.ReadFile(shlibnamefile) if err != nil { t.Errorf("error reading shlibnamefile for %s: %v", pkg, err) continue @@ -791,7 +790,7 @@ func resetFileStamps() { // It also sets the time of the file, so that we can see if it is rewritten. func touch(t *testing.T, path string) (cleanup func()) { t.Helper() - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { t.Fatal(err) } @@ -837,14 +836,14 @@ func touch(t *testing.T, path string) (cleanup func()) { // user-writable. perm := fi.Mode().Perm() | 0200 - if err := ioutil.WriteFile(path, data, perm); err != nil { + if err := os.WriteFile(path, data, perm); err != nil { t.Fatal(err) } if err := os.Chtimes(path, nearlyNew, nearlyNew); err != nil { t.Fatal(err) } return func() { - if err := ioutil.WriteFile(path, old, perm); err != nil { + if err := os.WriteFile(path, old, perm); err != nil { t.Fatal(err) } } diff --git a/misc/cgo/testshared/testdata/depBase/asm.s b/misc/cgo/testshared/testdata/depBase/asm.s index a8acf77f0b9..0f1111f3927 100644 --- a/misc/cgo/testshared/testdata/depBase/asm.s +++ b/misc/cgo/testshared/testdata/depBase/asm.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/misc/cgo/testshared/testdata/depBase/stubs.go b/misc/cgo/testshared/testdata/depBase/stubs.go index 04534f38ddd..c77953803bd 100644 --- a/misc/cgo/testshared/testdata/depBase/stubs.go +++ b/misc/cgo/testshared/testdata/depBase/stubs.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc package depBase diff --git a/misc/cgo/testso/so_test.go b/misc/cgo/testso/so_test.go index 57f0fd34f78..2023c51f113 100644 --- a/misc/cgo/testso/so_test.go +++ b/misc/cgo/testso/so_test.go @@ -7,7 +7,6 @@ package so_test import ( - "io/ioutil" "log" "os" "os/exec" @@ -37,7 +36,7 @@ func requireTestSOSupported(t *testing.T) { func TestSO(t *testing.T) { requireTestSOSupported(t) - GOPATH, err := ioutil.TempDir("", "cgosotest") + GOPATH, err := os.MkdirTemp("", "cgosotest") if err != nil { log.Fatal(err) } @@ -47,7 +46,7 @@ func TestSO(t *testing.T) { if err := overlayDir(modRoot, "testdata"); err != nil { log.Panic(err) } - if err := ioutil.WriteFile(filepath.Join(modRoot, "go.mod"), []byte("module cgosotest\n"), 0666); err != nil { + if err := os.WriteFile(filepath.Join(modRoot, "go.mod"), []byte("module cgosotest\n"), 0666); err != nil { log.Panic(err) } @@ -80,6 +79,10 @@ func TestSO(t *testing.T) { case "windows": ext = "dll" args = append(args, "-DEXPORT_DLL") + // At least in mingw-clang it is not permitted to just name a .dll + // on the command line. You must name the corresponding import + // library instead, even though the dll is used when the executable is run. + args = append(args, "-Wl,-out-implib,libcgosotest.a") case "aix": ext = "so.1" } diff --git a/misc/cgo/testso/testdata/cgoso.go b/misc/cgo/testso/testdata/cgoso.go index bba5de33121..b59b2a8e8b1 100644 --- a/misc/cgo/testso/testdata/cgoso.go +++ b/misc/cgo/testso/testdata/cgoso.go @@ -14,7 +14,7 @@ package cgosotest #cgo solaris LDFLAGS: -L. -lcgosotest #cgo netbsd LDFLAGS: -L. libcgosotest.so #cgo darwin LDFLAGS: -L. libcgosotest.dylib -#cgo windows LDFLAGS: -L. libcgosotest.dll +#cgo windows LDFLAGS: -L. libcgosotest.a #cgo aix LDFLAGS: -L. -l cgosotest void init(void); diff --git a/misc/cgo/testsovar/so_test.go b/misc/cgo/testsovar/so_test.go index 57f0fd34f78..2023c51f113 100644 --- a/misc/cgo/testsovar/so_test.go +++ b/misc/cgo/testsovar/so_test.go @@ -7,7 +7,6 @@ package so_test import ( - "io/ioutil" "log" "os" "os/exec" @@ -37,7 +36,7 @@ func requireTestSOSupported(t *testing.T) { func TestSO(t *testing.T) { requireTestSOSupported(t) - GOPATH, err := ioutil.TempDir("", "cgosotest") + GOPATH, err := os.MkdirTemp("", "cgosotest") if err != nil { log.Fatal(err) } @@ -47,7 +46,7 @@ func TestSO(t *testing.T) { if err := overlayDir(modRoot, "testdata"); err != nil { log.Panic(err) } - if err := ioutil.WriteFile(filepath.Join(modRoot, "go.mod"), []byte("module cgosotest\n"), 0666); err != nil { + if err := os.WriteFile(filepath.Join(modRoot, "go.mod"), []byte("module cgosotest\n"), 0666); err != nil { log.Panic(err) } @@ -80,6 +79,10 @@ func TestSO(t *testing.T) { case "windows": ext = "dll" args = append(args, "-DEXPORT_DLL") + // At least in mingw-clang it is not permitted to just name a .dll + // on the command line. You must name the corresponding import + // library instead, even though the dll is used when the executable is run. + args = append(args, "-Wl,-out-implib,libcgosotest.a") case "aix": ext = "so.1" } diff --git a/misc/cgo/testsovar/testdata/cgoso.go b/misc/cgo/testsovar/testdata/cgoso.go index 9c7f95e92ea..d9deb556da8 100644 --- a/misc/cgo/testsovar/testdata/cgoso.go +++ b/misc/cgo/testsovar/testdata/cgoso.go @@ -18,7 +18,7 @@ package cgosotest #cgo solaris LDFLAGS: -L. -lcgosotest #cgo netbsd LDFLAGS: -L. libcgosotest.so #cgo darwin LDFLAGS: -L. libcgosotest.dylib -#cgo windows LDFLAGS: -L. libcgosotest.dll +#cgo windows LDFLAGS: -L. libcgosotest.a #cgo aix LDFLAGS: -L. -l cgosotest #include "cgoso_c.h" diff --git a/misc/chrome/gophertool/popup.html b/misc/chrome/gophertool/popup.html index 97404062761..ad42a3847c7 100644 --- a/misc/chrome/gophertool/popup.html +++ b/misc/chrome/gophertool/popup.html @@ -15,7 +15,7 @@ pkg id/name:

    Also: buildbots -Github +GitHub diff --git a/misc/ios/detect.go b/misc/ios/detect.go index d32bcc3202a..cde57238923 100644 --- a/misc/ios/detect.go +++ b/misc/ios/detect.go @@ -16,7 +16,6 @@ import ( "bytes" "crypto/x509" "fmt" - "io/ioutil" "os" "os/exec" "strings" @@ -38,7 +37,7 @@ func main() { fmt.Println("# will be overwritten when running Go programs.") for _, mp := range mps { fmt.Println() - f, err := ioutil.TempFile("", "go_ios_detect_") + f, err := os.CreateTemp("", "go_ios_detect_") check(err) fname := f.Name() defer os.Remove(fname) diff --git a/misc/ios/go_ios_exec.go b/misc/ios/go_ios_exec.go index 0acf1b259c0..9e63717d921 100644 --- a/misc/ios/go_ios_exec.go +++ b/misc/ios/go_ios_exec.go @@ -26,7 +26,6 @@ import ( "fmt" "go/build" "io" - "io/ioutil" "log" "net" "os" @@ -79,7 +78,7 @@ func main() { func runMain() (int, error) { var err error - tmpdir, err = ioutil.TempDir("", "go_ios_exec_") + tmpdir, err = os.MkdirTemp("", "go_ios_exec_") if err != nil { return 1, err } @@ -205,13 +204,13 @@ func assembleApp(appdir, bin string) error { } entitlementsPath := filepath.Join(tmpdir, "Entitlements.plist") - if err := ioutil.WriteFile(entitlementsPath, []byte(entitlementsPlist()), 0744); err != nil { + if err := os.WriteFile(entitlementsPath, []byte(entitlementsPlist()), 0744); err != nil { return err } - if err := ioutil.WriteFile(filepath.Join(appdir, "Info.plist"), []byte(infoPlist(pkgpath)), 0744); err != nil { + if err := os.WriteFile(filepath.Join(appdir, "Info.plist"), []byte(infoPlist(pkgpath)), 0744); err != nil { return err } - if err := ioutil.WriteFile(filepath.Join(appdir, "ResourceRules.plist"), []byte(resourceRules), 0744); err != nil { + if err := os.WriteFile(filepath.Join(appdir, "ResourceRules.plist"), []byte(resourceRules), 0744); err != nil { return err } return nil diff --git a/misc/linkcheck/linkcheck.go b/misc/linkcheck/linkcheck.go index d9bfd2f767e..570b430da4f 100644 --- a/misc/linkcheck/linkcheck.go +++ b/misc/linkcheck/linkcheck.go @@ -11,7 +11,7 @@ import ( "errors" "flag" "fmt" - "io/ioutil" + "io" "log" "net/http" "os" @@ -144,7 +144,7 @@ func doCrawl(url string) error { if res.StatusCode != 200 { return errors.New(res.Status) } - slurp, err := ioutil.ReadAll(res.Body) + slurp, err := io.ReadAll(res.Body) res.Body.Close() if err != nil { log.Fatalf("Error reading %s body: %v", url, err) diff --git a/misc/reboot/experiment_toolid_test.go b/misc/reboot/experiment_toolid_test.go index eabf06b19ee..4f40284d80f 100644 --- a/misc/reboot/experiment_toolid_test.go +++ b/misc/reboot/experiment_toolid_test.go @@ -13,7 +13,6 @@ package reboot_test import ( "bytes" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -23,7 +22,7 @@ import ( func TestExperimentToolID(t *testing.T) { // Set up GOROOT - goroot, err := ioutil.TempDir("", "experiment-goroot") + goroot, err := os.MkdirTemp("", "experiment-goroot") if err != nil { t.Fatal(err) } @@ -34,13 +33,13 @@ func TestExperimentToolID(t *testing.T) { t.Fatal(err) } - if err := ioutil.WriteFile(filepath.Join(goroot, "VERSION"), []byte("go1.999"), 0666); err != nil { + if err := os.WriteFile(filepath.Join(goroot, "VERSION"), []byte("go1.999"), 0666); err != nil { t.Fatal(err) } env := append(os.Environ(), "GOROOT=", "GOROOT_BOOTSTRAP="+runtime.GOROOT()) // Use a clean cache. - gocache, err := ioutil.TempDir("", "experiment-gocache") + gocache, err := os.MkdirTemp("", "experiment-gocache") if err != nil { t.Fatal(err) } diff --git a/misc/reboot/reboot_test.go b/misc/reboot/reboot_test.go index 717c0fb7096..6bafc608b5e 100644 --- a/misc/reboot/reboot_test.go +++ b/misc/reboot/reboot_test.go @@ -7,7 +7,6 @@ package reboot_test import ( - "io/ioutil" "os" "os/exec" "path/filepath" @@ -16,7 +15,7 @@ import ( ) func TestRepeatBootstrap(t *testing.T) { - goroot, err := ioutil.TempDir("", "reboot-goroot") + goroot, err := os.MkdirTemp("", "reboot-goroot") if err != nil { t.Fatal(err) } @@ -27,7 +26,7 @@ func TestRepeatBootstrap(t *testing.T) { t.Fatal(err) } - if err := ioutil.WriteFile(filepath.Join(goroot, "VERSION"), []byte(runtime.Version()), 0666); err != nil { + if err := os.WriteFile(filepath.Join(goroot, "VERSION"), []byte(runtime.Version()), 0666); err != nil { t.Fatal(err) } diff --git a/misc/trace/trace_viewer_full.html b/misc/trace/trace_viewer_full.html index ef2e0ea5733..ae6e35fca22 100644 --- a/misc/trace/trace_viewer_full.html +++ b/misc/trace/trace_viewer_full.html @@ -993,13 +993,13 @@
    - X no feedback
    - 0 uninitialized
    - . premonomorphic
    - 1 monomorphic
    - ^ recompute handler
    - P polymorphic
    - N megamorphic
    + X no feedback
    + 0 uninitialized
    + . premonomorphic
    + 1 monomorphic
    + ^ recompute handler
    + P polymorphic
    + N megamorphic
    G generic
    @@ -3596,7 +3596,7 @@
    Graphics Pipeline and Raster Tasks
    - When raster tasks are completed in comparison to the rest of the graphics pipeline.
    + When raster tasks are completed in comparison to the rest of the graphics pipeline.
    Only pages where raster tasks are completed after beginFrame is issued are included.
    diff --git a/misc/wasm/wasm_exec.js b/misc/wasm/wasm_exec.js index 82041e6bb90..3e41e628ef9 100644 --- a/misc/wasm/wasm_exec.js +++ b/misc/wasm/wasm_exec.js @@ -296,8 +296,8 @@ setInt64(sp + 8, (timeOrigin + performance.now()) * 1000000); }, - // func walltime1() (sec int64, nsec int32) - "runtime.walltime1": (sp) => { + // func walltime() (sec int64, nsec int32) + "runtime.walltime": (sp) => { sp >>>= 0; const msec = (new Date).getTime(); setInt64(sp + 8, msec / 1000); diff --git a/robots.txt b/robots.txt deleted file mode 100644 index 1f53798bb4f..00000000000 --- a/robots.txt +++ /dev/null @@ -1,2 +0,0 @@ -User-agent: * -Disallow: / diff --git a/src/archive/tar/stat_actime1.go b/src/archive/tar/stat_actime1.go index 1bdd1c9dcb2..4fdf2a04b3d 100644 --- a/src/archive/tar/stat_actime1.go +++ b/src/archive/tar/stat_actime1.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || linux || dragonfly || openbsd || solaris // +build aix linux dragonfly openbsd solaris package tar diff --git a/src/archive/tar/stat_actime2.go b/src/archive/tar/stat_actime2.go index 6f17dbe3072..5a9a35cbb4e 100644 --- a/src/archive/tar/stat_actime2.go +++ b/src/archive/tar/stat_actime2.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin || freebsd || netbsd // +build darwin freebsd netbsd package tar diff --git a/src/archive/tar/stat_unix.go b/src/archive/tar/stat_unix.go index 581d87dca9d..3957349d6ef 100644 --- a/src/archive/tar/stat_unix.go +++ b/src/archive/tar/stat_unix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || linux || darwin || dragonfly || freebsd || openbsd || netbsd || solaris // +build aix linux darwin dragonfly freebsd openbsd netbsd solaris package tar diff --git a/src/archive/tar/tar_test.go b/src/archive/tar/tar_test.go index 91b38401b6c..e9fafc7cc70 100644 --- a/src/archive/tar/tar_test.go +++ b/src/archive/tar/tar_test.go @@ -262,16 +262,11 @@ func TestFileInfoHeaderDir(t *testing.T) { func TestFileInfoHeaderSymlink(t *testing.T) { testenv.MustHaveSymlink(t) - tmpdir, err := os.MkdirTemp("", "TestFileInfoHeaderSymlink") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() link := filepath.Join(tmpdir, "link") target := tmpdir - err = os.Symlink(target, link) - if err != nil { + if err := os.Symlink(target, link); err != nil { t.Fatal(err) } fi, err := os.Lstat(link) diff --git a/src/archive/zip/reader.go b/src/archive/zip/reader.go index 8b4e77875fb..808cf274cad 100644 --- a/src/archive/zip/reader.go +++ b/src/archive/zip/reader.go @@ -52,12 +52,9 @@ type File struct { FileHeader zip *Reader zipr io.ReaderAt - zipsize int64 headerOffset int64 -} - -func (f *File) hasDataDescriptor() bool { - return f.Flags&0x8 != 0 + zip64 bool // zip64 extended information extra field presence + descErr error // error reading the data descriptor during init } // OpenReader will open the Zip file specified by name and return a ReadCloser. @@ -112,7 +109,7 @@ func (z *Reader) init(r io.ReaderAt, size int64) error { // a bad one, and then only report an ErrFormat or UnexpectedEOF if // the file count modulo 65536 is incorrect. for { - f := &File{zip: z, zipr: r, zipsize: size} + f := &File{zip: z, zipr: r} err = readDirectoryHeader(f, buf) if err == ErrFormat || err == io.ErrUnexpectedEOF { break @@ -120,6 +117,7 @@ func (z *Reader) init(r io.ReaderAt, size int64) error { if err != nil { return err } + f.readDataDescriptor() z.File = append(z.File, f) } if uint16(len(z.File)) != uint16(end.directoryRecords) { // only compare 16 bits here @@ -180,26 +178,68 @@ func (f *File) Open() (io.ReadCloser, error) { return nil, ErrAlgorithm } var rc io.ReadCloser = dcomp(r) - var desr io.Reader - if f.hasDataDescriptor() { - desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen) - } rc = &checksumReader{ rc: rc, hash: crc32.NewIEEE(), f: f, - desr: desr, } return rc, nil } +// OpenRaw returns a Reader that provides access to the File's contents without +// decompression. +func (f *File) OpenRaw() (io.Reader, error) { + bodyOffset, err := f.findBodyOffset() + if err != nil { + return nil, err + } + r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, int64(f.CompressedSize64)) + return r, nil +} + +func (f *File) readDataDescriptor() { + if !f.hasDataDescriptor() { + return + } + + bodyOffset, err := f.findBodyOffset() + if err != nil { + f.descErr = err + return + } + + // In section 4.3.9.2 of the spec: "However ZIP64 format MAY be used + // regardless of the size of a file. When extracting, if the zip64 + // extended information extra field is present for the file the + // compressed and uncompressed sizes will be 8 byte values." + // + // Historically, this package has used the compressed and uncompressed + // sizes from the central directory to determine if the package is + // zip64. + // + // For this case we allow either the extra field or sizes to determine + // the data descriptor length. + zip64 := f.zip64 || f.isZip64() + n := int64(dataDescriptorLen) + if zip64 { + n = dataDescriptor64Len + } + size := int64(f.CompressedSize64) + r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, n) + dd, err := readDataDescriptor(r, zip64) + if err != nil { + f.descErr = err + return + } + f.CRC32 = dd.crc32 +} + type checksumReader struct { rc io.ReadCloser hash hash.Hash32 nread uint64 // number of bytes read so far f *File - desr io.Reader // if non-nil, where to read the data descriptor - err error // sticky error + err error // sticky error } func (r *checksumReader) Stat() (fs.FileInfo, error) { @@ -220,12 +260,12 @@ func (r *checksumReader) Read(b []byte) (n int, err error) { if r.nread != r.f.UncompressedSize64 { return 0, io.ErrUnexpectedEOF } - if r.desr != nil { - if err1 := readDataDescriptor(r.desr, r.f); err1 != nil { - if err1 == io.EOF { + if r.f.hasDataDescriptor() { + if r.f.descErr != nil { + if r.f.descErr == io.EOF { err = io.ErrUnexpectedEOF } else { - err = err1 + err = r.f.descErr } } else if r.hash.Sum32() != r.f.CRC32 { err = ErrChecksum @@ -336,6 +376,8 @@ parseExtras: switch fieldTag { case zip64ExtraID: + f.zip64 = true + // update directory values from the zip64 extra block. // They should only be consulted if the sizes read earlier // are maxed out. @@ -435,8 +477,9 @@ parseExtras: return nil } -func readDataDescriptor(r io.Reader, f *File) error { - var buf [dataDescriptorLen]byte +func readDataDescriptor(r io.Reader, zip64 bool) (*dataDescriptor, error) { + // Create enough space for the largest possible size + var buf [dataDescriptor64Len]byte // The spec says: "Although not originally assigned a // signature, the value 0x08074b50 has commonly been adopted @@ -446,10 +489,9 @@ func readDataDescriptor(r io.Reader, f *File) error { // descriptors and should account for either case when reading // ZIP files to ensure compatibility." // - // dataDescriptorLen includes the size of the signature but - // first read just those 4 bytes to see if it exists. + // First read just those 4 bytes to see if the signature exists. if _, err := io.ReadFull(r, buf[:4]); err != nil { - return err + return nil, err } off := 0 maybeSig := readBuf(buf[:4]) @@ -458,21 +500,28 @@ func readDataDescriptor(r io.Reader, f *File) error { // bytes. off += 4 } - if _, err := io.ReadFull(r, buf[off:12]); err != nil { - return err + + end := dataDescriptorLen - 4 + if zip64 { + end = dataDescriptor64Len - 4 } - b := readBuf(buf[:12]) - if b.uint32() != f.CRC32 { - return ErrChecksum + if _, err := io.ReadFull(r, buf[off:end]); err != nil { + return nil, err + } + b := readBuf(buf[:end]) + + out := &dataDescriptor{ + crc32: b.uint32(), } - // The two sizes that follow here can be either 32 bits or 64 bits - // but the spec is not very clear on this and different - // interpretations has been made causing incompatibilities. We - // already have the sizes from the central directory so we can - // just ignore these. - - return nil + if zip64 { + out.compressedSize = b.uint64() + out.uncompressedSize = b.uint64() + } else { + out.compressedSize = uint64(b.uint32()) + out.uncompressedSize = uint64(b.uint32()) + } + return out, nil } func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, err error) { @@ -628,10 +677,11 @@ func (b *readBuf) sub(n int) readBuf { } // A fileListEntry is a File and its ename. -// If file == nil, the fileListEntry describes a directory, without metadata. +// If file == nil, the fileListEntry describes a directory without metadata. type fileListEntry struct { - name string - file *File // nil for directories + name string + file *File + isDir bool } type fileInfoDirEntry interface { @@ -640,20 +690,26 @@ type fileInfoDirEntry interface { } func (e *fileListEntry) stat() fileInfoDirEntry { - if e.file != nil { + if !e.isDir { return headerFileInfo{&e.file.FileHeader} } return e } // Only used for directories. -func (f *fileListEntry) Name() string { _, elem, _ := split(f.name); return elem } -func (f *fileListEntry) Size() int64 { return 0 } -func (f *fileListEntry) ModTime() time.Time { return time.Time{} } -func (f *fileListEntry) Mode() fs.FileMode { return fs.ModeDir | 0555 } -func (f *fileListEntry) Type() fs.FileMode { return fs.ModeDir } -func (f *fileListEntry) IsDir() bool { return true } -func (f *fileListEntry) Sys() interface{} { return nil } +func (f *fileListEntry) Name() string { _, elem, _ := split(f.name); return elem } +func (f *fileListEntry) Size() int64 { return 0 } +func (f *fileListEntry) Mode() fs.FileMode { return fs.ModeDir | 0555 } +func (f *fileListEntry) Type() fs.FileMode { return fs.ModeDir } +func (f *fileListEntry) IsDir() bool { return true } +func (f *fileListEntry) Sys() interface{} { return nil } + +func (f *fileListEntry) ModTime() time.Time { + if f.file == nil { + return time.Time{} + } + return f.file.FileHeader.Modified.UTC() +} func (f *fileListEntry) Info() (fs.FileInfo, error) { return f, nil } @@ -664,7 +720,7 @@ func toValidName(name string) string { if strings.HasPrefix(p, "/") { p = p[len("/"):] } - for strings.HasPrefix(name, "../") { + for strings.HasPrefix(p, "../") { p = p[len("../"):] } return p @@ -673,15 +729,32 @@ func toValidName(name string) string { func (r *Reader) initFileList() { r.fileListOnce.Do(func() { dirs := make(map[string]bool) + knownDirs := make(map[string]bool) for _, file := range r.File { + isDir := len(file.Name) > 0 && file.Name[len(file.Name)-1] == '/' name := toValidName(file.Name) for dir := path.Dir(name); dir != "."; dir = path.Dir(dir) { dirs[dir] = true } - r.fileList = append(r.fileList, fileListEntry{name, file}) + entry := fileListEntry{ + name: name, + file: file, + isDir: isDir, + } + r.fileList = append(r.fileList, entry) + if isDir { + knownDirs[name] = true + } } for dir := range dirs { - r.fileList = append(r.fileList, fileListEntry{dir + "/", nil}) + if !knownDirs[dir] { + entry := fileListEntry{ + name: dir, + file: nil, + isDir: true, + } + r.fileList = append(r.fileList, entry) + } } sort.Slice(r.fileList, func(i, j int) bool { return fileEntryLess(r.fileList[i].name, r.fileList[j].name) }) @@ -705,7 +778,7 @@ func (r *Reader) Open(name string) (fs.File, error) { if e == nil || !fs.ValidPath(name) { return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist} } - if e.file == nil || strings.HasSuffix(e.file.Name, "/") { + if e.isDir { return &openDir{e, r.openReadDir(name), 0}, nil } rc, err := e.file.Open() @@ -730,7 +803,7 @@ func split(name string) (dir, elem string, isDir bool) { return name[:i], name[i+1:], isDir } -var dotFile = &fileListEntry{name: "./"} +var dotFile = &fileListEntry{name: "./", isDir: true} func (r *Reader) openLookup(name string) *fileListEntry { if name == "." { diff --git a/src/archive/zip/reader_test.go b/src/archive/zip/reader_test.go index 34e96f7da43..35e681ec699 100644 --- a/src/archive/zip/reader_test.go +++ b/src/archive/zip/reader_test.go @@ -499,9 +499,15 @@ func TestReader(t *testing.T) { func readTestZip(t *testing.T, zt ZipTest) { var z *Reader var err error + var raw []byte if zt.Source != nil { rat, size := zt.Source() z, err = NewReader(rat, size) + raw = make([]byte, size) + if _, err := rat.ReadAt(raw, 0); err != nil { + t.Errorf("ReadAt error=%v", err) + return + } } else { path := filepath.Join("testdata", zt.Name) if zt.Obscured { @@ -519,6 +525,12 @@ func readTestZip(t *testing.T, zt ZipTest) { defer rc.Close() z = &rc.Reader } + var err2 error + raw, err2 = os.ReadFile(path) + if err2 != nil { + t.Errorf("ReadFile(%s) error=%v", path, err2) + return + } } if err != zt.Error { t.Errorf("error=%v, want %v", err, zt.Error) @@ -545,7 +557,7 @@ func readTestZip(t *testing.T, zt ZipTest) { // test read of each file for i, ft := range zt.File { - readTestFile(t, zt, ft, z.File[i]) + readTestFile(t, zt, ft, z.File[i], raw) } if t.Failed() { return @@ -557,7 +569,7 @@ func readTestZip(t *testing.T, zt ZipTest) { for i := 0; i < 5; i++ { for j, ft := range zt.File { go func(j int, ft ZipTestFile) { - readTestFile(t, zt, ft, z.File[j]) + readTestFile(t, zt, ft, z.File[j], raw) done <- true }(j, ft) n++ @@ -574,7 +586,7 @@ func equalTimeAndZone(t1, t2 time.Time) bool { return t1.Equal(t2) && name1 == name2 && offset1 == offset2 } -func readTestFile(t *testing.T, zt ZipTest, ft ZipTestFile, f *File) { +func readTestFile(t *testing.T, zt ZipTest, ft ZipTestFile, f *File, raw []byte) { if f.Name != ft.Name { t.Errorf("name=%q, want %q", f.Name, ft.Name) } @@ -594,6 +606,31 @@ func readTestFile(t *testing.T, zt ZipTest, ft ZipTestFile, f *File) { t.Errorf("%v: UncompressedSize=%#x does not match UncompressedSize64=%#x", f.Name, size, f.UncompressedSize64) } + // Check that OpenRaw returns the correct byte segment + rw, err := f.OpenRaw() + if err != nil { + t.Errorf("%v: OpenRaw error=%v", f.Name, err) + return + } + start, err := f.DataOffset() + if err != nil { + t.Errorf("%v: DataOffset error=%v", f.Name, err) + return + } + got, err := io.ReadAll(rw) + if err != nil { + t.Errorf("%v: OpenRaw ReadAll error=%v", f.Name, err) + return + } + end := uint64(start) + f.CompressedSize64 + want := raw[start:end] + if !bytes.Equal(got, want) { + t.Logf("got %q", got) + t.Logf("want %q", want) + t.Errorf("%v: OpenRaw returned unexpected bytes", f.Name) + return + } + r, err := f.Open() if err != nil { t.Errorf("%v", err) @@ -776,8 +813,8 @@ func returnRecursiveZip() (r io.ReaderAt, size int64) { // "archive/zip" // "bytes" // "io" -// "io/ioutil" // "log" +// "os" // ) // // type zeros struct{} @@ -1073,11 +1110,218 @@ func TestIssue12449(t *testing.T) { } func TestFS(t *testing.T) { - z, err := OpenReader("testdata/unix.zip") + for _, test := range []struct { + file string + want []string + }{ + { + "testdata/unix.zip", + []string{"hello", "dir/bar", "readonly"}, + }, + { + "testdata/subdir.zip", + []string{"a/b/c"}, + }, + } { + t.Run(test.file, func(t *testing.T) { + t.Parallel() + z, err := OpenReader(test.file) + if err != nil { + t.Fatal(err) + } + defer z.Close() + if err := fstest.TestFS(z, test.want...); err != nil { + t.Error(err) + } + }) + } +} + +func TestFSModTime(t *testing.T) { + t.Parallel() + z, err := OpenReader("testdata/subdir.zip") if err != nil { t.Fatal(err) } - if err := fstest.TestFS(z, "hello", "dir/bar", "dir/empty", "readonly"); err != nil { - t.Fatal(err) + defer z.Close() + + for _, test := range []struct { + name string + want time.Time + }{ + { + "a", + time.Date(2021, 4, 19, 12, 29, 56, 0, timeZone(-7*time.Hour)).UTC(), + }, + { + "a/b/c", + time.Date(2021, 4, 19, 12, 29, 59, 0, timeZone(-7*time.Hour)).UTC(), + }, + } { + fi, err := fs.Stat(z, test.name) + if err != nil { + t.Errorf("%s: %v", test.name, err) + continue + } + if got := fi.ModTime(); !got.Equal(test.want) { + t.Errorf("%s: got modtime %v, want %v", test.name, got, test.want) + } + } +} + +func TestCVE202127919(t *testing.T) { + // Archive containing only the file "../test.txt" + data := []byte{ + 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x00, + 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x2e, 0x2e, + 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74, 0x78, + 0x74, 0x0a, 0xc9, 0xc8, 0x2c, 0x56, 0xc8, 0x2c, + 0x56, 0x48, 0x54, 0x28, 0x49, 0x2d, 0x2e, 0x51, + 0x28, 0x49, 0xad, 0x28, 0x51, 0x48, 0xcb, 0xcc, + 0x49, 0xd5, 0xe3, 0x02, 0x04, 0x00, 0x00, 0xff, + 0xff, 0x50, 0x4b, 0x07, 0x08, 0xc0, 0xd7, 0xed, + 0xc3, 0x20, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, + 0x00, 0x50, 0x4b, 0x01, 0x02, 0x14, 0x00, 0x14, + 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xc0, 0xd7, 0xed, 0xc3, 0x20, 0x00, 0x00, + 0x00, 0x1a, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, + 0x2e, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74, + 0x78, 0x74, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x39, 0x00, + 0x00, 0x00, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00, + } + r, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data))) + if err != nil { + t.Fatalf("Error reading the archive: %v", err) + } + _, err = r.Open("test.txt") + if err != nil { + t.Errorf("Error reading file: %v", err) + } +} + +func TestReadDataDescriptor(t *testing.T) { + tests := []struct { + desc string + in []byte + zip64 bool + want *dataDescriptor + wantErr error + }{{ + desc: "valid 32 bit with signature", + in: []byte{ + 0x50, 0x4b, 0x07, 0x08, // signature + 0x00, 0x01, 0x02, 0x03, // crc32 + 0x04, 0x05, 0x06, 0x07, // compressed size + 0x08, 0x09, 0x0a, 0x0b, // uncompressed size + }, + want: &dataDescriptor{ + crc32: 0x03020100, + compressedSize: 0x07060504, + uncompressedSize: 0x0b0a0908, + }, + }, { + desc: "valid 32 bit without signature", + in: []byte{ + 0x00, 0x01, 0x02, 0x03, // crc32 + 0x04, 0x05, 0x06, 0x07, // compressed size + 0x08, 0x09, 0x0a, 0x0b, // uncompressed size + }, + want: &dataDescriptor{ + crc32: 0x03020100, + compressedSize: 0x07060504, + uncompressedSize: 0x0b0a0908, + }, + }, { + desc: "valid 64 bit with signature", + in: []byte{ + 0x50, 0x4b, 0x07, 0x08, // signature + 0x00, 0x01, 0x02, 0x03, // crc32 + 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, // compressed size + 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, // uncompressed size + }, + zip64: true, + want: &dataDescriptor{ + crc32: 0x03020100, + compressedSize: 0x0b0a090807060504, + uncompressedSize: 0x131211100f0e0d0c, + }, + }, { + desc: "valid 64 bit without signature", + in: []byte{ + 0x00, 0x01, 0x02, 0x03, // crc32 + 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, // compressed size + 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, // uncompressed size + }, + zip64: true, + want: &dataDescriptor{ + crc32: 0x03020100, + compressedSize: 0x0b0a090807060504, + uncompressedSize: 0x131211100f0e0d0c, + }, + }, { + desc: "invalid 32 bit with signature", + in: []byte{ + 0x50, 0x4b, 0x07, 0x08, // signature + 0x00, 0x01, 0x02, 0x03, // crc32 + 0x04, 0x05, // unexpected end + }, + wantErr: io.ErrUnexpectedEOF, + }, { + desc: "invalid 32 bit without signature", + in: []byte{ + 0x00, 0x01, 0x02, 0x03, // crc32 + 0x04, 0x05, // unexpected end + }, + wantErr: io.ErrUnexpectedEOF, + }, { + desc: "invalid 64 bit with signature", + in: []byte{ + 0x50, 0x4b, 0x07, 0x08, // signature + 0x00, 0x01, 0x02, 0x03, // crc32 + 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, // compressed size + 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, // unexpected end + }, + zip64: true, + wantErr: io.ErrUnexpectedEOF, + }, { + desc: "invalid 64 bit without signature", + in: []byte{ + 0x00, 0x01, 0x02, 0x03, // crc32 + 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, // compressed size + 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, // unexpected end + }, + zip64: true, + wantErr: io.ErrUnexpectedEOF, + }} + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + r := bytes.NewReader(test.in) + + desc, err := readDataDescriptor(r, test.zip64) + if err != test.wantErr { + t.Fatalf("got err %v; want nil", err) + } + if test.want == nil { + return + } + if desc == nil { + t.Fatalf("got nil DataDescriptor; want non-nil") + } + if desc.crc32 != test.want.crc32 { + t.Errorf("got CRC32 %#x; want %#x", desc.crc32, test.want.crc32) + } + if desc.compressedSize != test.want.compressedSize { + t.Errorf("got CompressedSize %#x; want %#x", desc.compressedSize, test.want.compressedSize) + } + if desc.uncompressedSize != test.want.uncompressedSize { + t.Errorf("got UncompressedSize %#x; want %#x", desc.uncompressedSize, test.want.uncompressedSize) + } + }) } } diff --git a/src/archive/zip/struct.go b/src/archive/zip/struct.go index 4dd29f35fa6..ff9f605eb69 100644 --- a/src/archive/zip/struct.go +++ b/src/archive/zip/struct.go @@ -42,7 +42,7 @@ const ( directoryHeaderLen = 46 // + filename + extra + comment directoryEndLen = 22 // + comment dataDescriptorLen = 16 // four uint32: descriptor signature, crc32, compressed size, size - dataDescriptor64Len = 24 // descriptor with 8 byte sizes + dataDescriptor64Len = 24 // two uint32: signature, crc32 | two uint64: compressed size, size directory64LocLen = 20 // directory64EndLen = 56 // + extra @@ -315,6 +315,10 @@ func (h *FileHeader) isZip64() bool { return h.CompressedSize64 >= uint32max || h.UncompressedSize64 >= uint32max } +func (f *FileHeader) hasDataDescriptor() bool { + return f.Flags&0x8 != 0 +} + func msdosModeToFileMode(m uint32) (mode fs.FileMode) { if m&msdosDir != 0 { mode = fs.ModeDir | 0777 @@ -341,11 +345,9 @@ func fileModeToUnixMode(mode fs.FileMode) uint32 { case fs.ModeSocket: m = s_IFSOCK case fs.ModeDevice: - if mode&fs.ModeCharDevice != 0 { - m = s_IFCHR - } else { - m = s_IFBLK - } + m = s_IFBLK + case fs.ModeDevice | fs.ModeCharDevice: + m = s_IFCHR } if mode&fs.ModeSetuid != 0 { m |= s_ISUID @@ -388,3 +390,11 @@ func unixModeToFileMode(m uint32) fs.FileMode { } return mode } + +// dataDescriptor holds the data descriptor that optionally follows the file +// contents in the zip file. +type dataDescriptor struct { + crc32 uint32 + compressedSize uint64 + uncompressedSize uint64 +} diff --git a/src/archive/zip/testdata/subdir.zip b/src/archive/zip/testdata/subdir.zip new file mode 100644 index 00000000000..324d06b48d1 Binary files /dev/null and b/src/archive/zip/testdata/subdir.zip differ diff --git a/src/archive/zip/writer.go b/src/archive/zip/writer.go index cdc534eaf01..3b23cc3391d 100644 --- a/src/archive/zip/writer.go +++ b/src/archive/zip/writer.go @@ -37,6 +37,7 @@ type Writer struct { type header struct { *FileHeader offset uint64 + raw bool } // NewWriter returns a new Writer writing a zip file to w. @@ -245,22 +246,31 @@ func detectUTF8(s string) (valid, require bool) { return true, require } +// prepare performs the bookkeeping operations required at the start of +// CreateHeader and CreateRaw. +func (w *Writer) prepare(fh *FileHeader) error { + if w.last != nil && !w.last.closed { + if err := w.last.close(); err != nil { + return err + } + } + if len(w.dir) > 0 && w.dir[len(w.dir)-1].FileHeader == fh { + // See https://golang.org/issue/11144 confusion. + return errors.New("archive/zip: invalid duplicate FileHeader") + } + return nil +} + // CreateHeader adds a file to the zip archive using the provided FileHeader // for the file metadata. Writer takes ownership of fh and may mutate // its fields. The caller must not modify fh after calling CreateHeader. // // This returns a Writer to which the file contents should be written. // The file's contents must be written to the io.Writer before the next -// call to Create, CreateHeader, or Close. +// call to Create, CreateHeader, CreateRaw, or Close. func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) { - if w.last != nil && !w.last.closed { - if err := w.last.close(); err != nil { - return nil, err - } - } - if len(w.dir) > 0 && w.dir[len(w.dir)-1].FileHeader == fh { - // See https://golang.org/issue/11144 confusion. - return nil, errors.New("archive/zip: invalid duplicate FileHeader") + if err := w.prepare(fh); err != nil { + return nil, err } // The ZIP format has a sad state of affairs regarding character encoding. @@ -365,7 +375,7 @@ func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) { ow = fw } w.dir = append(w.dir, h) - if err := writeHeader(w.cw, fh); err != nil { + if err := writeHeader(w.cw, h); err != nil { return nil, err } // If we're creating a directory, fw is nil. @@ -373,7 +383,7 @@ func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) { return ow, nil } -func writeHeader(w io.Writer, h *FileHeader) error { +func writeHeader(w io.Writer, h *header) error { const maxUint16 = 1<<16 - 1 if len(h.Name) > maxUint16 { return errLongName @@ -390,9 +400,20 @@ func writeHeader(w io.Writer, h *FileHeader) error { b.uint16(h.Method) b.uint16(h.ModifiedTime) b.uint16(h.ModifiedDate) - b.uint32(0) // since we are writing a data descriptor crc32, - b.uint32(0) // compressed size, - b.uint32(0) // and uncompressed size should be zero + // In raw mode (caller does the compression), the values are either + // written here or in the trailing data descriptor based on the header + // flags. + if h.raw && !h.hasDataDescriptor() { + b.uint32(h.CRC32) + b.uint32(uint32(min64(h.CompressedSize64, uint32max))) + b.uint32(uint32(min64(h.UncompressedSize64, uint32max))) + } else { + // When this package handle the compression, these values are + // always written to the trailing data descriptor. + b.uint32(0) // crc32 + b.uint32(0) // compressed size + b.uint32(0) // uncompressed size + } b.uint16(uint16(len(h.Name))) b.uint16(uint16(len(h.Extra))) if _, err := w.Write(buf[:]); err != nil { @@ -405,6 +426,65 @@ func writeHeader(w io.Writer, h *FileHeader) error { return err } +func min64(x, y uint64) uint64 { + if x < y { + return x + } + return y +} + +// CreateRaw adds a file to the zip archive using the provided FileHeader and +// returns a Writer to which the file contents should be written. The file's +// contents must be written to the io.Writer before the next call to Create, +// CreateHeader, CreateRaw, or Close. +// +// In contrast to CreateHeader, the bytes passed to Writer are not compressed. +func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) { + if err := w.prepare(fh); err != nil { + return nil, err + } + + fh.CompressedSize = uint32(min64(fh.CompressedSize64, uint32max)) + fh.UncompressedSize = uint32(min64(fh.UncompressedSize64, uint32max)) + + h := &header{ + FileHeader: fh, + offset: uint64(w.cw.count), + raw: true, + } + w.dir = append(w.dir, h) + if err := writeHeader(w.cw, h); err != nil { + return nil, err + } + + if strings.HasSuffix(fh.Name, "/") { + w.last = nil + return dirWriter{}, nil + } + + fw := &fileWriter{ + header: h, + zipw: w.cw, + } + w.last = fw + return fw, nil +} + +// Copy copies the file f (obtained from a Reader) into w. It copies the raw +// form directly bypassing decompression, compression, and validation. +func (w *Writer) Copy(f *File) error { + r, err := f.OpenRaw() + if err != nil { + return err + } + fw, err := w.CreateRaw(&f.FileHeader) + if err != nil { + return err + } + _, err = io.Copy(fw, r) + return err +} + // RegisterCompressor registers or overrides a custom compressor for a specific // method ID. If a compressor for a given method is not found, Writer will // default to looking up the compressor at the package level. @@ -446,6 +526,9 @@ func (w *fileWriter) Write(p []byte) (int, error) { if w.closed { return 0, errors.New("zip: write to closed file") } + if w.raw { + return w.zipw.Write(p) + } w.crc32.Write(p) return w.rawCount.Write(p) } @@ -455,6 +538,9 @@ func (w *fileWriter) close() error { return errors.New("zip: file closed twice") } w.closed = true + if w.raw { + return w.writeDataDescriptor() + } if err := w.comp.Close(); err != nil { return err } @@ -474,26 +560,33 @@ func (w *fileWriter) close() error { fh.UncompressedSize = uint32(fh.UncompressedSize64) } + return w.writeDataDescriptor() +} + +func (w *fileWriter) writeDataDescriptor() error { + if !w.hasDataDescriptor() { + return nil + } // Write data descriptor. This is more complicated than one would // think, see e.g. comments in zipfile.c:putextended() and // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7073588. // The approach here is to write 8 byte sizes if needed without // adding a zip64 extra in the local header (too late anyway). var buf []byte - if fh.isZip64() { + if w.isZip64() { buf = make([]byte, dataDescriptor64Len) } else { buf = make([]byte, dataDescriptorLen) } b := writeBuf(buf) b.uint32(dataDescriptorSignature) // de-facto standard, required by OS X - b.uint32(fh.CRC32) - if fh.isZip64() { - b.uint64(fh.CompressedSize64) - b.uint64(fh.UncompressedSize64) + b.uint32(w.CRC32) + if w.isZip64() { + b.uint64(w.CompressedSize64) + b.uint64(w.UncompressedSize64) } else { - b.uint32(fh.CompressedSize) - b.uint32(fh.UncompressedSize) + b.uint32(w.CompressedSize) + b.uint32(w.UncompressedSize) } _, err := w.zipw.Write(buf) return err diff --git a/src/archive/zip/writer_test.go b/src/archive/zip/writer_test.go index 5985144e5c2..97c6c529799 100644 --- a/src/archive/zip/writer_test.go +++ b/src/archive/zip/writer_test.go @@ -6,8 +6,10 @@ package zip import ( "bytes" + "compress/flate" "encoding/binary" "fmt" + "hash/crc32" "io" "io/fs" "math/rand" @@ -57,6 +59,18 @@ var writeTests = []WriteTest{ Method: Deflate, Mode: 0755 | fs.ModeSymlink, }, + { + Name: "device", + Data: []byte("device file"), + Method: Deflate, + Mode: 0755 | fs.ModeDevice, + }, + { + Name: "chardevice", + Data: []byte("char device file"), + Method: Deflate, + Mode: 0755 | fs.ModeDevice | fs.ModeCharDevice, + }, } func TestWriter(t *testing.T) { @@ -353,6 +367,171 @@ func TestWriterDirAttributes(t *testing.T) { } } +func TestWriterCopy(t *testing.T) { + // make a zip file + buf := new(bytes.Buffer) + w := NewWriter(buf) + for _, wt := range writeTests { + testCreate(t, w, &wt) + } + if err := w.Close(); err != nil { + t.Fatal(err) + } + + // read it back + src, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len())) + if err != nil { + t.Fatal(err) + } + for i, wt := range writeTests { + testReadFile(t, src.File[i], &wt) + } + + // make a new zip file copying the old compressed data. + buf2 := new(bytes.Buffer) + dst := NewWriter(buf2) + for _, f := range src.File { + if err := dst.Copy(f); err != nil { + t.Fatal(err) + } + } + if err := dst.Close(); err != nil { + t.Fatal(err) + } + + // read the new one back + r, err := NewReader(bytes.NewReader(buf2.Bytes()), int64(buf2.Len())) + if err != nil { + t.Fatal(err) + } + for i, wt := range writeTests { + testReadFile(t, r.File[i], &wt) + } +} + +func TestWriterCreateRaw(t *testing.T) { + files := []struct { + name string + content []byte + method uint16 + flags uint16 + crc32 uint32 + uncompressedSize uint64 + compressedSize uint64 + }{ + { + name: "small store w desc", + content: []byte("gophers"), + method: Store, + flags: 0x8, + }, + { + name: "small deflate wo desc", + content: bytes.Repeat([]byte("abcdefg"), 2048), + method: Deflate, + }, + } + + // write a zip file + archive := new(bytes.Buffer) + w := NewWriter(archive) + + for i := range files { + f := &files[i] + f.crc32 = crc32.ChecksumIEEE(f.content) + size := uint64(len(f.content)) + f.uncompressedSize = size + f.compressedSize = size + + var compressedContent []byte + if f.method == Deflate { + var buf bytes.Buffer + w, err := flate.NewWriter(&buf, flate.BestSpeed) + if err != nil { + t.Fatalf("flate.NewWriter err = %v", err) + } + _, err = w.Write(f.content) + if err != nil { + t.Fatalf("flate Write err = %v", err) + } + err = w.Close() + if err != nil { + t.Fatalf("flate Writer.Close err = %v", err) + } + compressedContent = buf.Bytes() + f.compressedSize = uint64(len(compressedContent)) + } + + h := &FileHeader{ + Name: f.name, + Method: f.method, + Flags: f.flags, + CRC32: f.crc32, + CompressedSize64: f.compressedSize, + UncompressedSize64: f.uncompressedSize, + } + w, err := w.CreateRaw(h) + if err != nil { + t.Fatal(err) + } + if compressedContent != nil { + _, err = w.Write(compressedContent) + } else { + _, err = w.Write(f.content) + } + if err != nil { + t.Fatalf("%s Write got %v; want nil", f.name, err) + } + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + // read it back + r, err := NewReader(bytes.NewReader(archive.Bytes()), int64(archive.Len())) + if err != nil { + t.Fatal(err) + } + for i, want := range files { + got := r.File[i] + if got.Name != want.name { + t.Errorf("got Name %s; want %s", got.Name, want.name) + } + if got.Method != want.method { + t.Errorf("%s: got Method %#x; want %#x", want.name, got.Method, want.method) + } + if got.Flags != want.flags { + t.Errorf("%s: got Flags %#x; want %#x", want.name, got.Flags, want.flags) + } + if got.CRC32 != want.crc32 { + t.Errorf("%s: got CRC32 %#x; want %#x", want.name, got.CRC32, want.crc32) + } + if got.CompressedSize64 != want.compressedSize { + t.Errorf("%s: got CompressedSize64 %d; want %d", want.name, got.CompressedSize64, want.compressedSize) + } + if got.UncompressedSize64 != want.uncompressedSize { + t.Errorf("%s: got UncompressedSize64 %d; want %d", want.name, got.UncompressedSize64, want.uncompressedSize) + } + + r, err := got.Open() + if err != nil { + t.Errorf("%s: Open err = %v", got.Name, err) + continue + } + + buf, err := io.ReadAll(r) + if err != nil { + t.Errorf("%s: ReadAll err = %v", got.Name, err) + continue + } + + if !bytes.Equal(buf, want.content) { + t.Errorf("%v: ReadAll returned unexpected bytes", got.Name) + } + } +} + func testCreate(t *testing.T, w *Writer, wt *WriteTest) { header := &FileHeader{ Name: wt.Name, @@ -378,15 +557,15 @@ func testReadFile(t *testing.T, f *File, wt *WriteTest) { testFileMode(t, f, wt.Mode) rc, err := f.Open() if err != nil { - t.Fatal("opening:", err) + t.Fatalf("opening %s: %v", f.Name, err) } b, err := io.ReadAll(rc) if err != nil { - t.Fatal("reading:", err) + t.Fatalf("reading %s: %v", f.Name, err) } err = rc.Close() if err != nil { - t.Fatal("closing:", err) + t.Fatalf("closing %s: %v", f.Name, err) } if !bytes.Equal(b, wt.Data) { t.Errorf("File contents %q, want %q", b, wt.Data) diff --git a/src/bufio/bufio.go b/src/bufio/bufio.go index 6baf9b9e400..ec928e7ad69 100644 --- a/src/bufio/bufio.go +++ b/src/bufio/bufio.go @@ -670,7 +670,8 @@ func (b *Writer) WriteByte(c byte) error { // WriteRune writes a single Unicode code point, returning // the number of bytes written and any error. func (b *Writer) WriteRune(r rune) (size int, err error) { - if r < utf8.RuneSelf { + // Compare as uint32 to correctly handle negative runes. + if uint32(r) < utf8.RuneSelf { err = b.WriteByte(byte(r)) if err != nil { return 0, err diff --git a/src/bufio/bufio_test.go b/src/bufio/bufio_test.go index d7b34bd0d8f..ebcc711db9d 100644 --- a/src/bufio/bufio_test.go +++ b/src/bufio/bufio_test.go @@ -534,6 +534,20 @@ func TestReadWriteRune(t *testing.T) { } } +func TestWriteInvalidRune(t *testing.T) { + // Invalid runes, including negative ones, should be written as the + // replacement character. + for _, r := range []rune{-1, utf8.MaxRune + 1} { + var buf bytes.Buffer + w := NewWriter(&buf) + w.WriteRune(r) + w.Flush() + if s := buf.String(); s != "\uFFFD" { + t.Errorf("WriteRune(%d) wrote %q, not replacement character", r, s) + } + } +} + func TestReadStringAllocs(t *testing.T) { r := strings.NewReader(" foo foo 42 42 42 42 42 42 42 42 4.2 4.2 4.2 4.2\n") buf := NewReader(r) diff --git a/src/bufio/scan.go b/src/bufio/scan.go index af46a14fbbe..4846d4f7336 100644 --- a/src/bufio/scan.go +++ b/src/bufio/scan.go @@ -48,7 +48,8 @@ type Scanner struct { // and the next token to return to the user, if any, plus an error, if any. // // Scanning stops if the function returns an error, in which case some of -// the input may be discarded. +// the input may be discarded. If that error is ErrFinalToken, scanning +// stops with no error. // // Otherwise, the Scanner advances the input. If the token is not nil, // the Scanner returns it to the user. If the token is nil, the diff --git a/src/bytes/boundary_test.go b/src/bytes/boundary_test.go index ea84f1e40fd..5a47526593b 100644 --- a/src/bytes/boundary_test.go +++ b/src/bytes/boundary_test.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +//go:build linux // +build linux package bytes_test diff --git a/src/bytes/buffer.go b/src/bytes/buffer.go index f19a4cfff09..549b077708f 100644 --- a/src/bytes/buffer.go +++ b/src/bytes/buffer.go @@ -275,7 +275,8 @@ func (b *Buffer) WriteByte(c byte) error { // included to match bufio.Writer's WriteRune. The buffer is grown as needed; // if it becomes too large, WriteRune will panic with ErrTooLarge. func (b *Buffer) WriteRune(r rune) (n int, err error) { - if r < utf8.RuneSelf { + // Compare as uint32 to correctly handle negative runes. + if uint32(r) < utf8.RuneSelf { b.WriteByte(byte(r)) return 1, nil } diff --git a/src/bytes/buffer_test.go b/src/bytes/buffer_test.go index fec5ef8a35f..9c9b7440ffa 100644 --- a/src/bytes/buffer_test.go +++ b/src/bytes/buffer_test.go @@ -6,6 +6,7 @@ package bytes_test import ( . "bytes" + "fmt" "io" "math/rand" "testing" @@ -387,6 +388,16 @@ func TestRuneIO(t *testing.T) { } } +func TestWriteInvalidRune(t *testing.T) { + // Invalid runes, including negative ones, should be written as + // utf8.RuneError. + for _, r := range []rune{-1, utf8.MaxRune + 1} { + var buf Buffer + buf.WriteRune(r) + check(t, fmt.Sprintf("TestWriteInvalidRune (%d)", r), &buf, "\uFFFD") + } +} + func TestNext(t *testing.T) { b := []byte{0, 1, 2, 3, 4} tmp := make([]byte, 5) diff --git a/src/cmd/api/goapi.go b/src/cmd/api/goapi.go index efc2696f8fe..b07a238d679 100644 --- a/src/cmd/api/goapi.go +++ b/src/cmd/api/goapi.go @@ -215,8 +215,7 @@ func main() { } optional := fileFeatures(*nextFile) exception := fileFeatures(*exceptFile) - fail = !compareAPI(bw, features, required, optional, exception, - *allowNew && strings.Contains(runtime.Version(), "devel")) + fail = !compareAPI(bw, features, required, optional, exception, *allowNew) } // export emits the exported package features. diff --git a/src/cmd/api/run.go b/src/cmd/api/run.go index ecb1d0f81aa..81979de191a 100644 --- a/src/cmd/api/run.go +++ b/src/cmd/api/run.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build ignore // +build ignore // The run program is invoked via the dist tool. @@ -9,8 +10,11 @@ package main import ( + "errors" "fmt" exec "internal/execabs" + "internal/goversion" + "io/fs" "log" "os" "path/filepath" @@ -42,6 +46,7 @@ func main() { apiDir := filepath.Join(goroot, "api") out, err := exec.Command(goCmd(), "tool", "api", "-c", findAPIDirFiles(apiDir), + allowNew(apiDir), "-next", filepath.Join(apiDir, "next.txt"), "-except", filepath.Join(apiDir, "except.txt")).CombinedOutput() if err != nil { @@ -70,3 +75,35 @@ func findAPIDirFiles(apiDir string) string { } return strings.Join(apiFiles, ",") } + +// allowNew returns the -allow_new flag to use for the 'go tool api' invocation. +func allowNew(apiDir string) string { + // Verify that the api/go1.n.txt for previous Go version exists. + // It definitely should, otherwise it's a signal that the logic below may be outdated. + if _, err := os.Stat(filepath.Join(apiDir, fmt.Sprintf("go1.%d.txt", goversion.Version-1))); err != nil { + log.Fatalln("Problem with api file for previous release:", err) + } + + // See whether the api/go1.n.txt for this Go version has been created. + // (As of April 2021, it gets created during the release of the first Beta.) + _, err := os.Stat(filepath.Join(apiDir, fmt.Sprintf("go1.%d.txt", goversion.Version))) + if errors.Is(err, fs.ErrNotExist) { + // It doesn't exist, so we're in development or before Beta 1. + // At this stage, unmentioned API additions are deemed okay. + // (They will be quietly shown in API check output, but the test won't fail). + return "-allow_new=true" + } else if err == nil { + // The api/go1.n.txt for this Go version has been created, + // so we're definitely past Beta 1 in the release cycle. + // + // From this point, enforce that api/go1.n.txt is an accurate and complete + // representation of what's going into the release by failing API check if + // there are API additions (a month into the freeze, there shouldn't be many). + // + // See golang.org/issue/43956. + return "-allow_new=false" + } else { + log.Fatal(err) + } + panic("unreachable") +} diff --git a/src/cmd/asm/internal/arch/arm64.go b/src/cmd/asm/internal/arch/arm64.go index e557630ca60..40d828a1fea 100644 --- a/src/cmd/asm/internal/arch/arm64.go +++ b/src/cmd/asm/internal/arch/arm64.go @@ -147,7 +147,16 @@ func arm64RegisterNumber(name string, n int16) (int16, bool) { return 0, false } -// ARM64RegisterExtension parses an ARM64 register with extension or arrangement. +// ARM64RegisterShift constructs an ARM64 register with shift operation. +func ARM64RegisterShift(reg, op, count int16) (int64, error) { + // the base register of shift operations must be general register. + if reg > arm64.REG_R31 || reg < arm64.REG_R0 { + return 0, errors.New("invalid register for shift operation") + } + return int64(reg&31)<<16 | int64(op)<<22 | int64(uint16(count)), nil +} + +// ARM64RegisterExtension constructs an ARM64 register with extension or arrangement. func ARM64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, isIndex bool) error { Rnum := (reg & 31) + int16(num<<5) if isAmount { @@ -155,154 +164,163 @@ func ARM64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, i return errors.New("index shift amount is out of range") } } - switch ext { - case "UXTB": - if !isAmount { - return errors.New("invalid register extension") - } - if a.Type == obj.TYPE_MEM { - return errors.New("invalid shift for the register offset addressing mode") - } - a.Reg = arm64.REG_UXTB + Rnum - case "UXTH": - if !isAmount { - return errors.New("invalid register extension") - } - if a.Type == obj.TYPE_MEM { - return errors.New("invalid shift for the register offset addressing mode") - } - a.Reg = arm64.REG_UXTH + Rnum - case "UXTW": - if !isAmount { - return errors.New("invalid register extension") - } - // effective address of memory is a base register value and an offset register value. - if a.Type == obj.TYPE_MEM { - a.Index = arm64.REG_UXTW + Rnum - } else { - a.Reg = arm64.REG_UXTW + Rnum - } - case "UXTX": - if !isAmount { - return errors.New("invalid register extension") - } - if a.Type == obj.TYPE_MEM { - return errors.New("invalid shift for the register offset addressing mode") - } - a.Reg = arm64.REG_UXTX + Rnum - case "SXTB": - if !isAmount { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_SXTB + Rnum - case "SXTH": - if !isAmount { - return errors.New("invalid register extension") - } - if a.Type == obj.TYPE_MEM { - return errors.New("invalid shift for the register offset addressing mode") - } - a.Reg = arm64.REG_SXTH + Rnum - case "SXTW": - if !isAmount { - return errors.New("invalid register extension") - } - if a.Type == obj.TYPE_MEM { - a.Index = arm64.REG_SXTW + Rnum - } else { - a.Reg = arm64.REG_SXTW + Rnum - } - case "SXTX": - if !isAmount { - return errors.New("invalid register extension") - } - if a.Type == obj.TYPE_MEM { - a.Index = arm64.REG_SXTX + Rnum - } else { - a.Reg = arm64.REG_SXTX + Rnum - } - case "LSL": - if !isAmount { - return errors.New("invalid register extension") - } - a.Index = arm64.REG_LSL + Rnum - case "B8": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8B & 15) << 5) - case "B16": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_16B & 15) << 5) - case "H4": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4H & 15) << 5) - case "H8": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8H & 15) << 5) - case "S2": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2S & 15) << 5) - case "S4": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4S & 15) << 5) - case "D1": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_1D & 15) << 5) - case "D2": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2D & 15) << 5) - case "Q1": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_1Q & 15) << 5) - case "B": - if !isIndex { - return nil - } - a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_B & 15) << 5) - a.Index = num - case "H": - if !isIndex { - return nil - } - a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_H & 15) << 5) - a.Index = num - case "S": - if !isIndex { - return nil - } - a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_S & 15) << 5) - a.Index = num - case "D": - if !isIndex { - return nil - } - a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_D & 15) << 5) - a.Index = num - default: - return errors.New("unsupported register extension type: " + ext) - } + if reg <= arm64.REG_R31 && reg >= arm64.REG_R0 { + switch ext { + case "UXTB": + if !isAmount { + return errors.New("invalid register extension") + } + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = arm64.REG_UXTB + Rnum + case "UXTH": + if !isAmount { + return errors.New("invalid register extension") + } + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = arm64.REG_UXTH + Rnum + case "UXTW": + if !isAmount { + return errors.New("invalid register extension") + } + // effective address of memory is a base register value and an offset register value. + if a.Type == obj.TYPE_MEM { + a.Index = arm64.REG_UXTW + Rnum + } else { + a.Reg = arm64.REG_UXTW + Rnum + } + case "UXTX": + if !isAmount { + return errors.New("invalid register extension") + } + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = arm64.REG_UXTX + Rnum + case "SXTB": + if !isAmount { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_SXTB + Rnum + case "SXTH": + if !isAmount { + return errors.New("invalid register extension") + } + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = arm64.REG_SXTH + Rnum + case "SXTW": + if !isAmount { + return errors.New("invalid register extension") + } + if a.Type == obj.TYPE_MEM { + a.Index = arm64.REG_SXTW + Rnum + } else { + a.Reg = arm64.REG_SXTW + Rnum + } + case "SXTX": + if !isAmount { + return errors.New("invalid register extension") + } + if a.Type == obj.TYPE_MEM { + a.Index = arm64.REG_SXTX + Rnum + } else { + a.Reg = arm64.REG_SXTX + Rnum + } + case "LSL": + if !isAmount { + return errors.New("invalid register extension") + } + a.Index = arm64.REG_LSL + Rnum + default: + return errors.New("unsupported general register extension type: " + ext) + } + } else if reg <= arm64.REG_V31 && reg >= arm64.REG_V0 { + switch ext { + case "B8": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8B & 15) << 5) + case "B16": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_16B & 15) << 5) + case "H4": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4H & 15) << 5) + case "H8": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8H & 15) << 5) + case "S2": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2S & 15) << 5) + case "S4": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4S & 15) << 5) + case "D1": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_1D & 15) << 5) + case "D2": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2D & 15) << 5) + case "Q1": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_1Q & 15) << 5) + case "B": + if !isIndex { + return nil + } + a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_B & 15) << 5) + a.Index = num + case "H": + if !isIndex { + return nil + } + a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_H & 15) << 5) + a.Index = num + case "S": + if !isIndex { + return nil + } + a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_S & 15) << 5) + a.Index = num + case "D": + if !isIndex { + return nil + } + a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_D & 15) << 5) + a.Index = num + default: + return errors.New("unsupported simd register extension type: " + ext) + } + } else { + return errors.New("invalid register and extension combination") + } return nil } -// ARM64RegisterArrangement parses an ARM64 vector register arrangement. +// ARM64RegisterArrangement constructs an ARM64 vector register arrangement. func ARM64RegisterArrangement(reg int16, name, arng string) (int64, error) { var curQ, curSize uint16 if name[0] != 'V' { diff --git a/src/cmd/asm/internal/asm/asm.go b/src/cmd/asm/internal/asm/asm.go index c4032759bb5..cf0d1550f99 100644 --- a/src/cmd/asm/internal/asm/asm.go +++ b/src/cmd/asm/internal/asm/asm.go @@ -134,6 +134,14 @@ func (p *Parser) asmText(operands [][]lex.Token) { next++ } + // Issue an error if we see a function defined as ABIInternal + // without NOSPLIT. In ABIInternal, obj needs to know the function + // signature in order to construct the morestack path, so this + // currently isn't supported for asm functions. + if nameAddr.Sym.ABI() == obj.ABIInternal && flag&obj.NOSPLIT == 0 { + p.errorf("TEXT %q: ABIInternal requires NOSPLIT", name) + } + // Next operand is the frame and arg size. // Bizarre syntax: $frameSize-argSize is two words, not subtraction. // Both frameSize and argSize must be simple integers; only frameSize @@ -799,22 +807,11 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { p.errorf("can't handle %s instruction with 4 operands", op) return case 5: - if p.arch.Family == sys.PPC64 && arch.IsPPC64RLD(op) { - // Always reg, reg, con, con, reg. (con, con is a 'mask'). + if p.arch.Family == sys.PPC64 { prog.From = a[0] + // Second arg is always a register type on ppc64. prog.Reg = p.getRegister(prog, op, &a[1]) - mask1 := p.getConstant(prog, op, &a[2]) - mask2 := p.getConstant(prog, op, &a[3]) - var mask uint32 - if mask1 < mask2 { - mask = (^uint32(0) >> uint(mask1)) & (^uint32(0) << uint(31-mask2)) - } else { - mask = (^uint32(0) >> uint(mask2+1)) & (^uint32(0) << uint(31-(mask1-1))) - } - prog.SetFrom3(obj.Addr{ - Type: obj.TYPE_CONST, - Offset: int64(mask), - }) + prog.SetRestArgs([]obj.Addr{a[2], a[3]}) prog.To = a[4] break } diff --git a/src/cmd/asm/internal/asm/endtoend_test.go b/src/cmd/asm/internal/asm/endtoend_test.go index a4153f3af12..ead8b27b015 100644 --- a/src/cmd/asm/internal/asm/endtoend_test.go +++ b/src/cmd/asm/internal/asm/endtoend_test.go @@ -8,6 +8,7 @@ import ( "bufio" "bytes" "fmt" + "internal/buildcfg" "io/ioutil" "os" "path/filepath" @@ -19,7 +20,6 @@ import ( "cmd/asm/internal/lex" "cmd/internal/obj" - "cmd/internal/objabi" ) // An end-to-end test for the assembler: Do we print what we parse? @@ -270,7 +270,7 @@ var ( errQuotesRE = regexp.MustCompile(`"([^"]*)"`) ) -func testErrors(t *testing.T, goarch, file string) { +func testErrors(t *testing.T, goarch, file string, flags ...string) { input := filepath.Join("testdata", file+".s") architecture, ctxt := setArch(goarch) lexer := lex.NewLexer(input) @@ -292,6 +292,14 @@ func testErrors(t *testing.T, goarch, file string) { } errBuf.WriteString(s) } + for _, flag := range flags { + switch flag { + case "dynlink": + ctxt.Flag_dynlink = true + default: + t.Errorf("unknown flag %s", flag) + } + } pList.Firstpc, ok = parser.Parse() obj.Flushplist(ctxt, pList, nil, "") if ok && !failed { @@ -360,10 +368,10 @@ func Test386EndToEnd(t *testing.T) { } func TestARMEndToEnd(t *testing.T) { - defer func(old int) { objabi.GOARM = old }(objabi.GOARM) + defer func(old int) { buildcfg.GOARM = old }(buildcfg.GOARM) for _, goarm := range []int{5, 6, 7} { t.Logf("GOARM=%d", goarm) - objabi.GOARM = goarm + buildcfg.GOARM = goarm testEndToEnd(t, "arm", "arm") if goarm == 6 { testEndToEnd(t, "arm", "armv6") @@ -430,6 +438,10 @@ func TestAMD64Errors(t *testing.T) { testErrors(t, "amd64", "amd64error") } +func TestAMD64DynLinkErrors(t *testing.T) { + testErrors(t, "amd64", "amd64dynlinkerror", "dynlink") +} + func TestMIPSEndToEnd(t *testing.T) { testEndToEnd(t, "mips", "mips") testEndToEnd(t, "mips64", "mips64") @@ -439,8 +451,12 @@ func TestPPC64EndToEnd(t *testing.T) { testEndToEnd(t, "ppc64", "ppc64") } -func TestRISCVEncoder(t *testing.T) { - testEndToEnd(t, "riscv64", "riscvenc") +func TestRISCVEndToEnd(t *testing.T) { + testEndToEnd(t, "riscv64", "riscv64") +} + +func TestRISCVErrors(t *testing.T) { + testErrors(t, "riscv64", "riscv64error") } func TestS390XEndToEnd(t *testing.T) { diff --git a/src/cmd/asm/internal/asm/operand_test.go b/src/cmd/asm/internal/asm/operand_test.go index c6def15e20e..8ef02b1a0e8 100644 --- a/src/cmd/asm/internal/asm/operand_test.go +++ b/src/cmd/asm/internal/asm/operand_test.go @@ -5,20 +5,20 @@ package asm import ( + "internal/buildcfg" "strings" "testing" "cmd/asm/internal/arch" "cmd/asm/internal/lex" "cmd/internal/obj" - "cmd/internal/objabi" ) // A simple in-out test: Do we print what we parse? func setArch(goarch string) (*arch.Arch, *obj.Link) { - objabi.GOOS = "linux" // obj can handle this OS for all architectures. - objabi.GOARCH = goarch + buildcfg.GOOS = "linux" // obj can handle this OS for all architectures. + buildcfg.GOARCH = goarch architecture := arch.Set(goarch) if architecture == nil { panic("asm: unrecognized architecture " + goarch) diff --git a/src/cmd/asm/internal/asm/parse.go b/src/cmd/asm/internal/asm/parse.go index f1d37bc2c8d..ab48632a44b 100644 --- a/src/cmd/asm/internal/asm/parse.go +++ b/src/cmd/asm/internal/asm/parse.go @@ -689,7 +689,11 @@ func (p *Parser) registerShift(name string, prefix rune) int64 { p.errorf("unexpected %s in register shift", tok.String()) } if p.arch.Family == sys.ARM64 { - return int64(r1&31)<<16 | int64(op)<<22 | int64(uint16(count)) + off, err := arch.ARM64RegisterShift(r1, op, count) + if err != nil { + p.errorf(err.Error()) + } + return off } else { return int64((r1 & 15) | op<<5 | count) } @@ -999,15 +1003,17 @@ func (p *Parser) registerIndirect(a *obj.Addr, prefix rune) { p.errorf("unimplemented two-register form") } a.Index = r1 - if scale == 0 && p.arch.Family == sys.ARM64 { - // scale is 1 by default for ARM64 - a.Scale = 1 + if scale != 0 && p.arch.Family == sys.ARM64 { + p.errorf("arm64 doesn't support scaled register format") } else { a.Scale = int16(scale) } } p.get(')') } else if scale != 0 { + if p.arch.Family == sys.ARM64 { + p.errorf("arm64 doesn't support scaled register format") + } // First (R) was missing, all we have is (R*scale). a.Reg = 0 a.Index = r1 diff --git a/src/cmd/asm/internal/asm/pseudo_test.go b/src/cmd/asm/internal/asm/pseudo_test.go index 622ee25ce71..fe6ffa60740 100644 --- a/src/cmd/asm/internal/asm/pseudo_test.go +++ b/src/cmd/asm/internal/asm/pseudo_test.go @@ -25,11 +25,13 @@ func tokenize(s string) [][]lex.Token { func TestErroneous(t *testing.T) { - tests := []struct { + type errtest struct { pseudo string operands string expected string - }{ + } + + nonRuntimeTests := []errtest{ {"TEXT", "", "expect two or three operands for TEXT"}, {"TEXT", "%", "expect two or three operands for TEXT"}, {"TEXT", "1, 1", "TEXT symbol \"\" must be a symbol(SB)"}, @@ -58,23 +60,44 @@ func TestErroneous(t *testing.T) { {"PCDATA", "1", "expect two operands for PCDATA"}, } + runtimeTests := []errtest{ + {"TEXT", "foo(SB),0", "TEXT \"foo\": ABIInternal requires NOSPLIT"}, + } + + testcats := []struct { + compilingRuntime bool + tests []errtest + }{ + { + compilingRuntime: false, + tests: nonRuntimeTests, + }, + { + compilingRuntime: true, + tests: runtimeTests, + }, + } + // Note these errors should be independent of the architecture. // Just run the test with amd64. parser := newParser("amd64") var buf bytes.Buffer parser.errorWriter = &buf - for _, test := range tests { - parser.errorCount = 0 - parser.lineNum++ - if !parser.pseudo(test.pseudo, tokenize(test.operands)) { - t.Fatalf("Wrong pseudo-instruction: %s", test.pseudo) + for _, cat := range testcats { + for _, test := range cat.tests { + parser.compilingRuntime = cat.compilingRuntime + parser.errorCount = 0 + parser.lineNum++ + if !parser.pseudo(test.pseudo, tokenize(test.operands)) { + t.Fatalf("Wrong pseudo-instruction: %s", test.pseudo) + } + errorLine := buf.String() + if test.expected != errorLine { + t.Errorf("Unexpected error %q; expected %q", errorLine, test.expected) + } + buf.Reset() } - errorLine := buf.String() - if test.expected != errorLine { - t.Errorf("Unexpected error %q; expected %q", errorLine, test.expected) - } - buf.Reset() } } diff --git a/src/cmd/asm/internal/asm/testdata/amd64dynlinkerror.s b/src/cmd/asm/internal/asm/testdata/amd64dynlinkerror.s new file mode 100644 index 00000000000..1eee1a17db2 --- /dev/null +++ b/src/cmd/asm/internal/asm/testdata/amd64dynlinkerror.s @@ -0,0 +1,68 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test to make sure that if we use R15 after it is clobbered by +// a global variable access while dynamic linking, we get an error. +// See issue 43661. + +TEXT ·a1(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + MOVL $0, R15 + RET +TEXT ·a2(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + MOVQ $0, R15 + RET +TEXT ·a3(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + XORL R15, R15 + RET +TEXT ·a4(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + XORQ R15, R15 + RET +TEXT ·a5(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + XORL R15, R15 + RET +TEXT ·a6(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + POPQ R15 + PUSHQ R15 + RET +TEXT ·a7(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + MOVQ R15, AX // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" + RET +TEXT ·a8(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + ADDQ AX, R15 // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" + RET +TEXT ·a9(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + ORQ R15, R15 // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" + RET +TEXT ·a10(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + JEQ one + ORQ R15, R15 // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" +one: + RET +TEXT ·a11(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + JEQ one + JMP two +one: + ORQ R15, R15 // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" +two: + RET +TEXT ·a12(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + JMP one +two: + ORQ R15, R15 + RET +one: + MOVL $0, R15 + JMP two diff --git a/src/cmd/asm/internal/asm/testdata/arm64.s b/src/cmd/asm/internal/asm/testdata/arm64.s index 91e3a0ca0a4..1146c1a7898 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64.s +++ b/src/cmd/asm/internal/asm/testdata/arm64.s @@ -64,6 +64,16 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 CMN R1.SXTX<<2, R10 // 5fe921ab CMPW R2.UXTH<<3, R11 // 7f2d226b CMNW R1.SXTB, R9 // 3f81212b + ADD R1<<1, RSP, R3 // e367218b + ADDW R1<<2, R3, RSP // 7f48210b + SUB R1<<3, RSP // ff6f21cb + SUBS R1<<4, RSP, R3 // e37321eb + ADDS R1<<1, RSP, R4 // e46721ab + CMP R1<<2, RSP // ff6b21eb + CMN R1<<3, RSP // ff6f21ab + ADDS R1<<1, ZR, R4 // e40701ab + ADD R3<<50, ZR, ZR // ffcb038b + CMP R4<<24, ZR // ff6304eb CMPW $0x60060, R2 // CMPW $393312, R2 // 1b0c8052db00a0725f001b6b CMPW $40960, R0 // 1f284071 CMPW $27745, R2 // 3b8c8d525f001b6b @@ -207,6 +217,18 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 VUADDW2 V9.B16, V12.H8, V14.H8 // 8e11296e VUADDW2 V13.H8, V20.S4, V30.S4 // 9e126d6e VUADDW2 V21.S4, V24.D2, V29.D2 // 1d13b56e + VUMAX V3.B8, V2.B8, V1.B8 // 4164232e + VUMAX V3.B16, V2.B16, V1.B16 // 4164236e + VUMAX V3.H4, V2.H4, V1.H4 // 4164632e + VUMAX V3.H8, V2.H8, V1.H8 // 4164636e + VUMAX V3.S2, V2.S2, V1.S2 // 4164a32e + VUMAX V3.S4, V2.S4, V1.S4 // 4164a36e + VUMIN V3.B8, V2.B8, V1.B8 // 416c232e + VUMIN V3.B16, V2.B16, V1.B16 // 416c236e + VUMIN V3.H4, V2.H4, V1.H4 // 416c632e + VUMIN V3.H8, V2.H8, V1.H8 // 416c636e + VUMIN V3.S2, V2.S2, V1.S2 // 416ca32e + VUMIN V3.S4, V2.S4, V1.S4 // 416ca36e FCCMPS LT, F1, F2, $1 // 41b4211e FMADDS F1, F3, F2, F4 // 440c011f FMADDD F4, F5, F4, F4 // 8414441f @@ -352,6 +374,9 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 MOVD $1, ZR MOVD $1, R1 MOVK $1, R1 + MOVD $0x1000100010001000, RSP // MOVD $1152939097061330944, RSP // ff8304b2 + MOVW $0x10001000, RSP // MOVW $268439552, RSP // ff830432 + ADDW $0x10001000, R1 // ADDW $268439552, R1 // fb83043221001b0b // move a large constant to a Vd. VMOVS $0x80402010, V11 // VMOVS $2151686160, V11 @@ -380,13 +405,13 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 // LD1/ST1 VLD1 (R8), [V1.B16, V2.B16] // 01a1404c VLD1.P (R3), [V31.H8, V0.H8] // 7fa4df4c - VLD1.P (R8)(R20), [V21.B16, V22.B16] // VLD1.P (R8)(R20*1), [V21.B16,V22.B16] // 15a1d44c + VLD1.P (R8)(R20), [V21.B16, V22.B16] // 15a1d44c VLD1.P 64(R1), [V5.B16, V6.B16, V7.B16, V8.B16] // 2520df4c VLD1.P 1(R0), V4.B[15] // 041cdf4d VLD1.P 2(R0), V4.H[7] // 0458df4d VLD1.P 4(R0), V4.S[3] // 0490df4d VLD1.P 8(R0), V4.D[1] // 0484df4d - VLD1.P (R0)(R1), V4.D[1] // VLD1.P (R0)(R1*1), V4.D[1] // 0484c14d + VLD1.P (R0)(R1), V4.D[1] // 0484c14d VLD1 (R0), V4.D[1] // 0484404d VST1.P [V4.S4, V5.S4], 32(R1) // 24a89f4c VST1 [V0.S4, V1.S4], (R0) // 00a8004c @@ -394,29 +419,29 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 VLD1.P 24(R30), [V3.S2,V4.S2,V5.S2] // c36bdf0c VLD2 (R29), [V23.H8, V24.H8] // b787404c VLD2.P 16(R0), [V18.B8, V19.B8] // 1280df0c - VLD2.P (R1)(R2), [V15.S2, V16.S2] // VLD2.P (R1)(R2*1), [V15.S2,V16.S2] // 2f88c20c + VLD2.P (R1)(R2), [V15.S2, V16.S2] // 2f88c20c VLD3 (R27), [V11.S4, V12.S4, V13.S4] // 6b4b404c VLD3.P 48(RSP), [V11.S4, V12.S4, V13.S4] // eb4bdf4c - VLD3.P (R30)(R2), [V14.D2, V15.D2, V16.D2] // VLD3.P (R30)(R2*1), [V14.D2,V15.D2,V16.D2] // ce4fc24c + VLD3.P (R30)(R2), [V14.D2, V15.D2, V16.D2] // ce4fc24c VLD4 (R15), [V10.H4, V11.H4, V12.H4, V13.H4] // ea05400c VLD4.P 32(R24), [V31.B8, V0.B8, V1.B8, V2.B8] // 1f03df0c - VLD4.P (R13)(R9), [V14.S2, V15.S2, V16.S2, V17.S2] // VLD4.P (R13)(R9*1), [V14.S2,V15.S2,V16.S2,V17.S2] // ae09c90c + VLD4.P (R13)(R9), [V14.S2, V15.S2, V16.S2, V17.S2] // ae09c90c VLD1R (R1), [V9.B8] // 29c0400d VLD1R.P (R1), [V9.B8] // 29c0df0d VLD1R.P 1(R1), [V2.B8] // 22c0df0d VLD1R.P 2(R1), [V2.H4] // 22c4df0d VLD1R (R0), [V0.B16] // 00c0404d VLD1R.P (R0), [V0.B16] // 00c0df4d - VLD1R.P (R15)(R1), [V15.H4] // VLD1R.P (R15)(R1*1), [V15.H4] // efc5c10d + VLD1R.P (R15)(R1), [V15.H4] // efc5c10d VLD2R (R15), [V15.H4, V16.H4] // efc5600d VLD2R.P 16(R0), [V0.D2, V1.D2] // 00ccff4d - VLD2R.P (R0)(R5), [V31.D1, V0.D1] // VLD2R.P (R0)(R5*1), [V31.D1, V0.D1] // 1fcce50d + VLD2R.P (R0)(R5), [V31.D1, V0.D1] // 1fcce50d VLD3R (RSP), [V31.S2, V0.S2, V1.S2] // ffeb400d VLD3R.P 6(R15), [V15.H4, V16.H4, V17.H4] // efe5df0d - VLD3R.P (R15)(R6), [V15.H8, V16.H8, V17.H8] // VLD3R.P (R15)(R6*1), [V15.H8, V16.H8, V17.H8] // efe5c64d + VLD3R.P (R15)(R6), [V15.H8, V16.H8, V17.H8] // efe5c64d VLD4R (R0), [V0.B8, V1.B8, V2.B8, V3.B8] // 00e0600d VLD4R.P 16(RSP), [V31.S4, V0.S4, V1.S4, V2.S4] // ffebff4d - VLD4R.P (R15)(R9), [V15.H4, V16.H4, V17.H4, V18.H4] // VLD4R.P (R15)(R9*1), [V15.H4, V16.H4, V17.H4, V18.H4] // efe5e90d + VLD4R.P (R15)(R9), [V15.H4, V16.H4, V17.H4, V18.H4] // efe5e90d VST1.P [V24.S2], 8(R2) // 58789f0c VST1 [V29.S2, V30.S2], (R29) // bdab000c VST1 [V14.H4, V15.H4, V16.H4], (R27) // 6e67000c @@ -424,17 +449,17 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 VST1.P V4.H[7], 2(R0) // 04589f4d VST1.P V4.S[3], 4(R0) // 04909f4d VST1.P V4.D[1], 8(R0) // 04849f4d - VST1.P V4.D[1], (R0)(R1) // VST1.P V4.D[1], (R0)(R1*1) // 0484814d + VST1.P V4.D[1], (R0)(R1) // 0484814d VST1 V4.D[1], (R0) // 0484004d VST2 [V22.H8, V23.H8], (R23) // f686004c VST2.P [V14.H4, V15.H4], 16(R17) // 2e869f0c - VST2.P [V14.H4, V15.H4], (R3)(R17) // VST2.P [V14.H4,V15.H4], (R3)(R17*1) // 6e84910c + VST2.P [V14.H4, V15.H4], (R3)(R17) // 6e84910c VST3 [V1.D2, V2.D2, V3.D2], (R11) // 614d004c VST3.P [V18.S4, V19.S4, V20.S4], 48(R25) // 324b9f4c - VST3.P [V19.B8, V20.B8, V21.B8], (R3)(R7) // VST3.P [V19.B8, V20.B8, V21.B8], (R3)(R7*1) // 7340870c + VST3.P [V19.B8, V20.B8, V21.B8], (R3)(R7) // 7340870c VST4 [V22.D2, V23.D2, V24.D2, V25.D2], (R3) // 760c004c VST4.P [V14.D2, V15.D2, V16.D2, V17.D2], 64(R15) // ee0d9f4c - VST4.P [V24.B8, V25.B8, V26.B8, V27.B8], (R3)(R23) // VST4.P [V24.B8, V25.B8, V26.B8, V27.B8], (R3)(R23*1) // 7800970c + VST4.P [V24.B8, V25.B8, V26.B8, V27.B8], (R3)(R23) // 7800970c // pre/post-indexed FMOVS.P F20, 4(R0) // 144400bc @@ -521,29 +546,29 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 // shifted or extended register offset. MOVD (R2)(R6.SXTW), R4 // 44c866f8 - MOVD (R3)(R6), R5 // MOVD (R3)(R6*1), R5 // 656866f8 - MOVD (R2)(R6), R4 // MOVD (R2)(R6*1), R4 // 446866f8 + MOVD (R3)(R6), R5 // 656866f8 + MOVD (R2)(R6), R4 // 446866f8 MOVWU (R19)(R20<<2), R20 // 747a74b8 MOVD (R2)(R6<<3), R4 // 447866f8 MOVD (R3)(R7.SXTX<<3), R8 // 68f867f8 MOVWU (R5)(R4.UXTW), R10 // aa4864b8 MOVBU (R3)(R9.UXTW), R8 // 68486938 - MOVBU (R5)(R8), R10 // MOVBU (R5)(R8*1), R10 // aa686838 + MOVBU (R5)(R8), R10 // aa686838 MOVHU (R2)(R7.SXTW<<1), R11 // 4bd86778 MOVHU (R1)(R2<<1), R5 // 25786278 MOVB (R9)(R3.UXTW), R6 // 2649a338 - MOVB (R10)(R6), R15 // MOVB (R10)(R6*1), R15 // 4f69a638 + MOVB (R10)(R6), R15 // 4f69a638 MOVB (R29)(R30<<0), R14 // ae7bbe38 - MOVB (R29)(R30), R14 // MOVB (R29)(R30*1), R14 // ae6bbe38 + MOVB (R29)(R30), R14 // ae6bbe38 MOVH (R5)(R7.SXTX<<1), R19 // b3f8a778 MOVH (R8)(R4<<1), R10 // 0a79a478 MOVW (R9)(R8.SXTW<<2), R19 // 33d9a8b8 MOVW (R1)(R4.SXTX), R11 // 2be8a4b8 MOVW (R1)(R4.SXTX), ZR // 3fe8a4b8 - MOVW (R2)(R5), R12 // MOVW (R2)(R5*1), R12 // 4c68a5b8 - FMOVS (R2)(R6), F4 // FMOVS (R2)(R6*1), F4 // 446866bc + MOVW (R2)(R5), R12 // 4c68a5b8 + FMOVS (R2)(R6), F4 // 446866bc FMOVS (R2)(R6<<2), F4 // 447866bc - FMOVD (R2)(R6), F4 // FMOVD (R2)(R6*1), F4 // 446866fc + FMOVD (R2)(R6), F4 // 446866fc FMOVD (R2)(R6<<3), F4 // 447866fc MOVD R5, (R2)(R6<<3) // 457826f8 @@ -553,15 +578,15 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 MOVW R7, (R3)(R4.SXTW) // 67c824b8 MOVB R4, (R2)(R6.SXTX) // 44e82638 MOVB R8, (R3)(R9.UXTW) // 68482938 - MOVB R10, (R5)(R8) // MOVB R10, (R5)(R8*1) // aa682838 + MOVB R10, (R5)(R8) // aa682838 MOVH R11, (R2)(R7.SXTW<<1) // 4bd82778 MOVH R5, (R1)(R2<<1) // 25782278 MOVH R7, (R2)(R5.SXTX<<1) // 47f82578 MOVH R8, (R3)(R6.UXTW) // 68482678 MOVB R4, (R2)(R6.SXTX) // 44e82638 - FMOVS F4, (R2)(R6) // FMOVS F4, (R2)(R6*1) // 446826bc + FMOVS F4, (R2)(R6) // 446826bc FMOVS F4, (R2)(R6<<2) // 447826bc - FMOVD F4, (R2)(R6) // FMOVD F4, (R2)(R6*1) // 446826fc + FMOVD F4, (R2)(R6) // 446826fc FMOVD F4, (R2)(R6<<3) // 447826fc // vmov @@ -571,9 +596,12 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 VMOV R20, V1.S[0] // 811e044e VMOV R20, V1.S[1] // 811e0c4e VMOV R1, V9.H4 // 290c020e + VDUP R1, V9.H4 // 290c020e VMOV R22, V11.D2 // cb0e084e + VDUP R22, V11.D2 // cb0e084e VMOV V2.B16, V4.B16 // 441ca24e VMOV V20.S[0], V20 // 9406045e + VDUP V20.S[0], V20 // 9406045e VMOV V12.D[0], V12.D[1] // 8c05186e VMOV V10.S[0], V12.S[1] // 4c050c6e VMOV V9.H[0], V12.H[1] // 2c05066e @@ -982,6 +1010,54 @@ again: FSTPS (F3, F4), x(SB) FSTPS (F3, F4), x+8(SB) +// FLDPQ/FSTPQ + FLDPQ -4000(R0), (F1, F2) // 1b803ed1610b40ad + FLDPQ -1024(R0), (F1, F2) // 010860ad + FLDPQ (R0), (F1, F2) // 010840ad + FLDPQ 16(R0), (F1, F2) // 018840ad + FLDPQ -16(R0), (F1, F2) // 01887fad + FLDPQ.W 32(R0), (F1, F2) // 0108c1ad + FLDPQ.P 32(R0), (F1, F2) // 0108c1ac + FLDPQ 11(R0), (F1, F2) // 1b2c0091610b40ad + FLDPQ 1024(R0), (F1, F2) // 1b001091610b40ad + FLDPQ 4104(R0), (F1, F2) + FLDPQ -4000(RSP), (F1, F2) // fb833ed1610b40ad + FLDPQ -1024(RSP), (F1, F2) // e10b60ad + FLDPQ (RSP), (F1, F2) // e10b40ad + FLDPQ 16(RSP), (F1, F2) // e18b40ad + FLDPQ -16(RSP), (F1, F2) // e18b7fad + FLDPQ.W 32(RSP), (F1, F2) // e10bc1ad + FLDPQ.P 32(RSP), (F1, F2) // e10bc1ac + FLDPQ 11(RSP), (F1, F2) // fb2f0091610b40ad + FLDPQ 1024(RSP), (F1, F2) // fb031091610b40ad + FLDPQ 4104(RSP), (F1, F2) + FLDPQ -31(R0), (F1, F2) // 1b7c00d1610b40ad + FLDPQ -4(R0), (F1, F2) // 1b1000d1610b40ad + FLDPQ x(SB), (F1, F2) + FLDPQ x+8(SB), (F1, F2) + FSTPQ (F3, F4), -4000(R5) // bb803ed1631300ad + FSTPQ (F3, F4), -1024(R5) // a31020ad + FSTPQ (F3, F4), (R5) // a31000ad + FSTPQ (F3, F4), 16(R5) // a39000ad + FSTPQ (F3, F4), -16(R5) // a3903fad + FSTPQ.W (F3, F4), 32(R5) // a31081ad + FSTPQ.P (F3, F4), 32(R5) // a31081ac + FSTPQ (F3, F4), 11(R5) // bb2c0091631300ad + FSTPQ (F3, F4), 1024(R5) // bb001091631300ad + FSTPQ (F3, F4), 4104(R5) + FSTPQ (F3, F4), -4000(RSP) // fb833ed1631300ad + FSTPQ (F3, F4), -1024(RSP) // e31320ad + FSTPQ (F3, F4), (RSP) // e31300ad + FSTPQ (F3, F4), 16(RSP) // e39300ad + FSTPQ (F3, F4), -16(RSP) // e3933fad + FSTPQ.W (F3, F4), 32(RSP) // e31381ad + FSTPQ.P (F3, F4), 32(RSP) // e31381ac + FSTPQ (F3, F4), 11(RSP) // fb2f0091631300ad + FSTPQ (F3, F4), 1024(RSP) // fb031091631300ad + FSTPQ (F3, F4), 4104(RSP) + FSTPQ (F3, F4), x(SB) + FSTPQ (F3, F4), x+8(SB) + // System Register MSR $1, SPSel // bf4100d5 MSR $9, DAIFSet // df4903d5 diff --git a/src/cmd/asm/internal/asm/testdata/arm64enc.s b/src/cmd/asm/internal/asm/testdata/arm64enc.s index e802ee76f5b..a29862822d3 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64enc.s +++ b/src/cmd/asm/internal/asm/testdata/arm64enc.s @@ -188,7 +188,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8 MOVBU 2916(R24), R3 // 03936d39 MOVBU (R19)(R14<<0), R23 // 777a6e38 MOVBU (R2)(R8.SXTX), R19 // 53e86838 - MOVBU (R27)(R23), R14 // MOVBU (R27)(R23*1), R14 // 6e6b7738 + MOVBU (R27)(R23), R14 // 6e6b7738 MOVHU.P 107(R14), R13 // cdb54678 MOVHU.W 192(R3), R2 // 620c4c78 MOVHU 6844(R4), R19 // 93787579 @@ -201,9 +201,9 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8 MOVB 997(R9), R23 // 37958f39 //TODO MOVBW (R2<<1)(R21), R15 // af7ae238 //TODO MOVBW (R26)(R0), R21 // 1568fa38 - MOVB (R5)(R15), R16 // MOVB (R5)(R15*1), R16 // b068af38 + MOVB (R5)(R15), R16 // b068af38 MOVB (R19)(R26.SXTW), R19 // 73caba38 - MOVB (R29)(R30), R14 // MOVB (R29)(R30*1), R14 // ae6bbe38 + MOVB (R29)(R30), R14 // ae6bbe38 //TODO MOVHW.P 218(R22), R25 // d9a6cd78 MOVH.P 179(R23), R5 // e5368b78 //TODO MOVHW.W 136(R2), R27 // 5b8cc878 @@ -357,12 +357,12 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8 MOVD R25, -137(R17) // 397217f8 MOVW R4, (R12)(R22.UXTW<<2) // 845936b8 MOVD R27, (R5)(R15.UXTW<<3) // bb582ff8 - MOVB R2, (R10)(R16) // MOVB R2, (R10)(R16*1) // 42693038 - MOVB R2, (R29)(R26) // MOVB R2, (R29)(R26*1) // a26b3a38 + MOVB R2, (R10)(R16) // 42693038 + MOVB R2, (R29)(R26) // a26b3a38 MOVH R11, -80(R23) // eb021b78 MOVH R11, (R27)(R14.SXTW<<1) // 6bdb2e78 - MOVB R19, (R0)(R4) // MOVB R19, (R0)(R4*1) // 13682438 - MOVB R1, (R6)(R4) // MOVB R1, (R6)(R4*1) // c1682438 + MOVB R19, (R0)(R4) // 13682438 + MOVB R1, (R6)(R4) // c1682438 MOVH R3, (R11)(R13<<1) // 63792d78 //TODO STTR 55(R4), R29 // 9d7803b8 //TODO STTR 124(R5), R25 // b9c807f8 @@ -669,6 +669,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8 VCMEQ V24.S4, V13.S4, V12.S4 // ac8db86e VCNT V13.B8, V11.B8 // ab59200e VMOV V31.B[15], V18 // f2071f5e + VDUP V31.B[15], V18 // f2071f5e VDUP V31.B[13], V20.B16 // f4071b4e VEOR V4.B8, V18.B8, V7.B8 // 471e242e VEXT $4, V2.B8, V1.B8, V3.B8 // 2320022e @@ -679,27 +680,28 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8 VLD1 (R24), [V18.D1, V19.D1, V20.D1] // 126f400c VLD1 (R29), [V14.D1, V15.D1, V16.D1, V17.D1] // ae2f400c VLD1.P 16(R23), [V1.B16] // e172df4c - VLD1.P (R6)(R11), [V31.D1] // VLD1.P (R6)(R11*1), [V31.D1] // df7ccb0c + VLD1.P (R6)(R11), [V31.D1] // df7ccb0c VLD1.P 16(R7), [V31.D1, V0.D1] // ffacdf0c - VLD1.P (R19)(R4), [V24.B8, V25.B8] // VLD1.P (R19)(R4*1), [V24.B8, V25.B8] // 78a2c40c - VLD1.P (R20)(R8), [V7.H8, V8.H8, V9.H8] // VLD1.P (R20)(R8*1), [V7.H8, V8.H8, V9.H8] // 8766c84c + VLD1.P (R19)(R4), [V24.B8, V25.B8] // 78a2c40c + VLD1.P (R20)(R8), [V7.H8, V8.H8, V9.H8] // 8766c84c VLD1.P 32(R30), [V5.B8, V6.B8, V7.B8, V8.B8] // c523df0c VLD1 (R19), V14.B[15] // 6e1e404d VLD1 (R29), V0.H[1] // a04b400d VLD1 (R27), V2.S[0] // 6283400d VLD1 (R21), V5.D[1] // a586404d VLD1.P 1(R19), V10.B[14] // 6a1adf4d - VLD1.P (R3)(R14), V16.B[11] // VLD1.P (R3)(R14*1), V16.B[11] // 700cce4d + VLD1.P (R3)(R14), V16.B[11] // 700cce4d VLD1.P 2(R1), V28.H[2] // 3c50df0d - VLD1.P (R13)(R20), V9.H[2] // VLD1.P (R13)(R20*1), V9.H[2] // a951d40d + VLD1.P (R13)(R20), V9.H[2] // a951d40d VLD1.P 4(R17), V1.S[3] // 2192df4d - VLD1.P (R14)(R2), V17.S[2] // VLD1.P (R14)(R2*1), V17.S[2] // d181c24d + VLD1.P (R14)(R2), V17.S[2] // d181c24d VLD1.P 8(R5), V30.D[1] // be84df4d - VLD1.P (R27)(R13), V27.D[0] // VLD1.P (R27)(R13*1), V27.D[0] // 7b87cd0d + VLD1.P (R27)(R13), V27.D[0] // 7b87cd0d //TODO FMOVS.P -29(RSP), F8 // e8375ebc //TODO FMOVS.W 71(R29), F28 // bc7f44bc FMOVS 6160(R4), F23 // 971058bd VMOV V18.B[10], V27 // 5b06155e + VDUP V18.B[10], V27 // 5b06155e VMOV V12.B[2], V28.B[12] // 9c15196e VMOV R30, V4.B[13] // c41f1b4e VMOV V2.B16, V4.B16 // 441ca24e @@ -732,25 +734,25 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8 VSHL $7, V22.D2, V25.D2 // d956474f VST1 [V14.H4, V15.H4, V16.H4], (R27) // 6e67000c VST1 [V2.S4, V3.S4, V4.S4, V5.S4], (R14) // c229004c - VST1.P [V25.S4], (R7)(R29) // VST1.P [V25.S4], (R7)(R29*1) // f9789d4c + VST1.P [V25.S4], (R7)(R29) // f9789d4c VST1.P [V25.D2, V26.D2], 32(R7) // f9ac9f4c - VST1.P [V14.D1, V15.D1], (R7)(R23) // VST1.P [V14.D1, V15.D1], (R7)(R23*1) // eeac970c + VST1.P [V14.D1, V15.D1], (R7)(R23) // eeac970c VST1.P [V25.D2, V26.D2, V27.D2], 48(R27) // 796f9f4c - VST1.P [V13.H8, V14.H8, V15.H8], (R3)(R14) // VST1.P [V13.H8, V14.H8, V15.H8], (R3)(R14*1) // 6d648e4c + VST1.P [V13.H8, V14.H8, V15.H8], (R3)(R14) // 6d648e4c VST1.P [V16.S4, V17.S4, V18.S4, V19.S4], 64(R6) // d0289f4c - VST1.P [V19.H4, V20.H4, V21.H4, V22.H4], (R4)(R16) // VST1.P [V19.H4, V20.H4, V21.H4, V22.H4], (R4)(R16*1) // 9324900c + VST1.P [V19.H4, V20.H4, V21.H4, V22.H4], (R4)(R16) // 9324900c VST1 V12.B[3], (R1) // 2c0c000d VST1 V12.B[3], (R1) // 2c0c000d VST1 V25.S[2], (R20) // 9982004d VST1 V9.D[1], (RSP) // e987004d VST1.P V30.B[6], 1(R3) // 7e189f0d - VST1.P V8.B[0], (R3)(R21) // VST1.P V8.B[0], (R3)(R21*1) // 6800950d + VST1.P V8.B[0], (R3)(R21) // 6800950d VST1.P V15.H[5], 2(R10) // 4f499f4d - VST1.P V1.H[7], (R23)(R11) // VST1.P V1.H[7], (R23)(R11*1) // e15a8b4d + VST1.P V1.H[7], (R23)(R11) // e15a8b4d VST1.P V26.S[0], 4(R11) // 7a819f0d - VST1.P V9.S[1], (R16)(R21) // VST1.P V9.S[1], (R16)(R21*1) // 0992950d + VST1.P V9.S[1], (R16)(R21) // 0992950d VST1.P V16.D[0], 8(R9) // 30859f0d - VST1.P V23.D[1], (R21)(R16) // VST1.P V23.D[1], (R21)(R16*1) // b786904d + VST1.P V23.D[1], (R21)(R16) // b786904d VSUB V1, V12, V23 // 9785e17e VUADDLV V31.S4, V11 // eb3bb06e UCVTFWS R11, F19 // 7301231e diff --git a/src/cmd/asm/internal/asm/testdata/arm64error.s b/src/cmd/asm/internal/asm/testdata/arm64error.s index e579f20836a..66fc9107594 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64error.s +++ b/src/cmd/asm/internal/asm/testdata/arm64error.s @@ -8,10 +8,48 @@ TEXT errors(SB),$0 ADDSW R7->32, R14, R13 // ERROR "shift amount out of range 0 to 31" ADD R1.UXTB<<5, R2, R3 // ERROR "shift amount out of range 0 to 4" ADDS R1.UXTX<<7, R2, R3 // ERROR "shift amount out of range 0 to 4" + ADDS R5, R6, RSP // ERROR "illegal destination register" + SUBS R5, R6, RSP // ERROR "illegal destination register" + ADDSW R5, R6, RSP // ERROR "illegal destination register" + SUBSW R5, R6, RSP // ERROR "illegal destination register" + ADDS $0xff, R6, RSP // ERROR "illegal destination register" + ADDS $0xffff0, R6, RSP // ERROR "illegal destination register" + ADDS $0x1000100010001000, R6, RSP // ERROR "illegal destination register" + ADDS $0x10001000100011, R6, RSP // ERROR "illegal destination register" + ADDSW $0xff, R6, RSP // ERROR "illegal destination register" + ADDSW $0xffff0, R6, RSP // ERROR "illegal destination register" + ADDSW $0x1000100010001000, R6, RSP // ERROR "illegal destination register" + ADDSW $0x10001000100011, R6, RSP // ERROR "illegal destination register" + SUBS $0xff, R6, RSP // ERROR "illegal destination register" + SUBS $0xffff0, R6, RSP // ERROR "illegal destination register" + SUBS $0x1000100010001000, R6, RSP // ERROR "illegal destination register" + SUBS $0x10001000100011, R6, RSP // ERROR "illegal destination register" + SUBSW $0xff, R6, RSP // ERROR "illegal destination register" + SUBSW $0xffff0, R6, RSP // ERROR "illegal destination register" + SUBSW $0x1000100010001000, R6, RSP // ERROR "illegal destination register" + SUBSW $0x10001000100011, R6, RSP // ERROR "illegal destination register" AND $0x22220000, R2, RSP // ERROR "illegal combination" ANDS $0x22220000, R2, RSP // ERROR "illegal combination" ADD R1, R2, R3, R4 // ERROR "illegal combination" BICW R7@>33, R5, R16 // ERROR "shift amount out of range 0 to 31" + NEGW R7<<33, R5 // ERROR "shift amount out of range 0 to 31" + NEGSW R7<<33, R5 // ERROR "shift amount out of range 0 to 31" + ADD R7@>2, R5, R16 // ERROR "unsupported shift operator" + ADDW R7@>2, R5, R16 // ERROR "unsupported shift operator" + ADDS R7@>2, R5, R16 // ERROR "unsupported shift operator" + ADDSW R7@>2, R5, R16 // ERROR "unsupported shift operator" + SUB R7@>2, R5, R16 // ERROR "unsupported shift operator" + SUBW R7@>2, R5, R16 // ERROR "unsupported shift operator" + SUBS R7@>2, R5, R16 // ERROR "unsupported shift operator" + SUBSW R7@>2, R5, R16 // ERROR "unsupported shift operator" + CMP R7@>2, R5 // ERROR "unsupported shift operator" + CMPW R7@>2, R5 // ERROR "unsupported shift operator" + CMN R7@>2, R5 // ERROR "unsupported shift operator" + CMNW R7@>2, R5 // ERROR "unsupported shift operator" + NEG R7@>2, R5 // ERROR "unsupported shift operator" + NEGW R7@>2, R5 // ERROR "unsupported shift operator" + NEGS R7@>2, R5 // ERROR "unsupported shift operator" + NEGSW R7@>2, R5 // ERROR "unsupported shift operator" CINC CS, R2, R3, R4 // ERROR "illegal combination" CSEL LT, R1, R2 // ERROR "illegal combination" LDP.P 8(R2), (R2, R3) // ERROR "constrained unpredictable behavior" @@ -21,8 +59,8 @@ TEXT errors(SB),$0 LDP (R0), (R3, ZR) // ERROR "invalid register pair" LDXPW (RSP), (R2, R2) // ERROR "constrained unpredictable behavior" LDAXPW (R5), (R2, R2) // ERROR "constrained unpredictable behavior" - MOVD.P 300(R2), R3 // ERROR "offset out of range [-255,254]" - MOVD.P R3, 344(R2) // ERROR "offset out of range [-255,254]" + MOVD.P 300(R2), R3 // ERROR "offset out of range [-256,255]" + MOVD.P R3, 344(R2) // ERROR "offset out of range [-256,255]" MOVD (R3)(R7.SXTX<<2), R8 // ERROR "invalid index shift amount" MOVWU (R5)(R4.UXTW<<3), R10 // ERROR "invalid index shift amount" MOVWU (R5)(R4<<1), R10 // ERROR "invalid index shift amount" @@ -58,13 +96,13 @@ TEXT errors(SB),$0 VMOV V8.H[9], R3 // ERROR "register element index out of range 0 to 7" VMOV V8.S[4], R3 // ERROR "register element index out of range 0 to 3" VMOV V8.D[2], R3 // ERROR "register element index out of range 0 to 1" - VDUP V8.B[16], R3.B16 // ERROR "register element index out of range 0 to 15" - VDUP V8.B[17], R3.B8 // ERROR "register element index out of range 0 to 15" - VDUP V8.H[9], R3.H4 // ERROR "register element index out of range 0 to 7" - VDUP V8.H[9], R3.H8 // ERROR "register element index out of range 0 to 7" - VDUP V8.S[4], R3.S2 // ERROR "register element index out of range 0 to 3" - VDUP V8.S[4], R3.S4 // ERROR "register element index out of range 0 to 3" - VDUP V8.D[2], R3.D2 // ERROR "register element index out of range 0 to 1" + VDUP V8.B[16], V3.B16 // ERROR "register element index out of range 0 to 15" + VDUP V8.B[17], V3.B8 // ERROR "register element index out of range 0 to 15" + VDUP V8.H[9], V3.H4 // ERROR "register element index out of range 0 to 7" + VDUP V8.H[9], V3.H8 // ERROR "register element index out of range 0 to 7" + VDUP V8.S[4], V3.S2 // ERROR "register element index out of range 0 to 3" + VDUP V8.S[4], V3.S4 // ERROR "register element index out of range 0 to 3" + VDUP V8.D[2], V3.D2 // ERROR "register element index out of range 0 to 1" VFMLA V1.D2, V12.D2, V3.S2 // ERROR "operand mismatch" VFMLA V1.S2, V12.S2, V3.D2 // ERROR "operand mismatch" VFMLA V1.S4, V12.S2, V3.D2 // ERROR "operand mismatch" @@ -109,6 +147,9 @@ TEXT errors(SB),$0 VREV16 V1.D1, V2.D1 // ERROR "invalid arrangement" VREV16 V1.B8, V2.B16 // ERROR "invalid arrangement" VREV16 V1.H4, V2.H4 // ERROR "invalid arrangement" + FLDPQ (R0), (R1, R2) // ERROR "invalid register pair" + FLDPQ (R1), (F2, F2) // ERROR "constrained unpredictable behavior" + FSTPQ (R1, R2), (R0) // ERROR "invalid register pair" FLDPD (R0), (R1, R2) // ERROR "invalid register pair" FLDPD (R1), (F2, F2) // ERROR "constrained unpredictable behavior" FLDPS (R2), (F3, F3) // ERROR "constrained unpredictable behavior" @@ -355,10 +396,17 @@ TEXT errors(SB),$0 VBIF V0.D2, V1.D2, V2.D2 // ERROR "invalid arrangement" VUADDW V9.B8, V12.H8, V14.B8 // ERROR "invalid arrangement" VUADDW2 V9.B8, V12.S4, V14.S4 // ERROR "operand mismatch" + VUMAX V1.D2, V2.D2, V3.D2 // ERROR "invalid arrangement" + VUMIN V1.D2, V2.D2, V3.D2 // ERROR "invalid arrangement" + VUMAX V1.B8, V2.B8, V3.B16 // ERROR "operand mismatch" + VUMIN V1.H4, V2.S4, V3.H4 // ERROR "operand mismatch" VSLI $64, V7.D2, V8.D2 // ERROR "shift out of range" VUSRA $0, V7.D2, V8.D2 // ERROR "shift out of range" CASPD (R3, R4), (R2), (R8, R9) // ERROR "source register pair must start from even register" CASPD (R2, R3), (R2), (R9, R10) // ERROR "destination register pair must start from even register" CASPD (R2, R4), (R2), (R8, R9) // ERROR "source register pair must be contiguous" CASPD (R2, R3), (R2), (R8, R10) // ERROR "destination register pair must be contiguous" + ADD R1>>2, RSP, R3 // ERROR "illegal combination" + ADDS R2<<3, R3, RSP // ERROR "unexpected SP reference" + CMP R1<<5, RSP // ERROR "the left shift amount out of range 0 to 4" RET diff --git a/src/cmd/asm/internal/asm/testdata/mips64.s b/src/cmd/asm/internal/asm/testdata/mips64.s index 21ab82f319c..99044d89f7b 100644 --- a/src/cmd/asm/internal/asm/testdata/mips64.s +++ b/src/cmd/asm/internal/asm/testdata/mips64.s @@ -407,6 +407,8 @@ label4: SRLV R27, R6, R17 // 03668816 SRA R11, R19, R20 // 0173a007 SRAV R20, R19, R19 // 02939817 + ROTR R19, R18, R20 // 0272a046 + ROTRV R9, R13, R16 // 012d8056 // LSHW rreg ',' rreg // { @@ -418,6 +420,8 @@ label4: SRLV R27, R6 // 03663016 SRA R11, R19 // 01739807 SRAV R20, R19 // 02939817 + ROTR R20, R19 // 02939846 + ROTRV R16, R9 // 02094856 // LSHW imm ',' sreg ',' rreg // { @@ -429,6 +433,8 @@ label4: SRLV $31, R6, R17 // 00068ffa SRA $8, R8, R19 // 00089a03 SRAV $19, R8, R7 // 00083cfb + ROTR $12, R8, R3 // 00281b02 + ROTRV $8, R22, R22 // 0036b23a // LSHW imm ',' rreg // { @@ -440,6 +446,8 @@ label4: SRLV $31, R17 // 00118ffa SRA $3, R12 // 000c60c3 SRAV $12, R3 // 00031b3b + ROTR $12, R8 // 00284302 + ROTRV $63, R22 // 0036b7fe // LAND/LXOR/LNOR/LOR rreg ',' rreg diff --git a/src/cmd/asm/internal/asm/testdata/ppc64.s b/src/cmd/asm/internal/asm/testdata/ppc64.s index 8f6eb14f73c..b6c0aa5035c 100644 --- a/src/cmd/asm/internal/asm/testdata/ppc64.s +++ b/src/cmd/asm/internal/asm/testdata/ppc64.s @@ -41,6 +41,8 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0 MOVDBR (R3)(R4), R5 // 7ca41c28 MOVWBR (R3)(R4), R5 // 7ca41c2c MOVHBR (R3)(R4), R5 // 7ca41e2c + MOVD $foo+4009806848(FP), R5 // 3ca1ef0138a5cc20 + MOVD $foo(SB), R5 // 3ca0000038a50000 MOVDU 8(R3), R4 // e8830009 MOVDU (R3)(R4), R5 // 7ca4186a @@ -77,6 +79,15 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0 MOVBU R4, 1(R3) // 9c830001 MOVBU R5, (R3)(R4) // 7ca419ee + MOVB $0, R4 // 38800000 + MOVBZ $0, R4 // 38800000 + MOVH $0, R4 // 38800000 + MOVHZ $0, R4 // 38800000 + MOVW $0, R4 // 38800000 + MOVWZ $0, R4 // 38800000 + MOVD $0, R4 // 38800000 + MOVD $0, R0 // 38000000 + ADD $1, R3 // 38630001 ADD $1, R3, R4 // 38830001 ADD $-1, R4 // 3884ffff @@ -280,11 +291,17 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0 ROTLW R3, R4, R5 // 5c85183e EXTSWSLI $3, R4, R5 // 7c851ef4 RLWMI $7, R3, $65535, R6 // 50663c3e + RLWMI $7, R3, $16, $31, R6 // 50663c3e RLWMICC $7, R3, $65535, R6 // 50663c3f + RLWMICC $7, R3, $16, $31, R6 // 50663c3f RLWNM $3, R4, $7, R6 // 54861f7e + RLWNM $3, R4, $29, $31, R6 // 54861f7e RLWNM R3, R4, $7, R6 // 5c861f7e + RLWNM R3, R4, $29, $31, R6 // 5c861f7e RLWNMCC $3, R4, $7, R6 // 54861f7f + RLWNMCC $3, R4, $29, $31, R6 // 54861f7f RLWNMCC R3, R4, $7, R6 // 5c861f7f + RLWNMCC R3, R4, $29, $31, R6 // 5c861f7f RLDMI $0, R4, $7, R6 // 7886076c RLDMICC $0, R4, $7, R6 // 7886076d RLDIMI $0, R4, $7, R6 // 788601cc @@ -303,6 +320,8 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0 RLDICCC $0, R4, $15, R6 // 788603c9 CLRLSLWI $16, R5, $8, R4 // 54a4422e CLRLSLDI $24, R4, $2, R3 // 78831588 + RLDCR $1, R1, $-16, R1 // 78210ee4 + RLDCRCC $1, R1, $-16, R1 // 78210ee5 BEQ 0(PC) // 41820000 BEQ CR1,0(PC) // 41860000 diff --git a/src/cmd/asm/internal/asm/testdata/riscvenc.s b/src/cmd/asm/internal/asm/testdata/riscv64.s similarity index 94% rename from src/cmd/asm/internal/asm/testdata/riscvenc.s rename to src/cmd/asm/internal/asm/testdata/riscv64.s index 9a49d96ca0a..77c0764c481 100644 --- a/src/cmd/asm/internal/asm/testdata/riscvenc.s +++ b/src/cmd/asm/internal/asm/testdata/riscv64.s @@ -280,6 +280,9 @@ start: MOV $2047, X5 // 9b02f07f MOV $-2048, X5 // 9b020080 + // Converted to load of symbol. + MOV $4294967296, X5 // 97020000 + MOV (X5), X6 // 03b30200 MOV 4(X5), X6 // 03b34200 MOVB (X5), X6 // 03830200 @@ -325,7 +328,7 @@ start: // These jumps can get printed as jumps to 2 because they go to the // second instruction in the function (the first instruction is an // invisible stack pointer adjustment). - JMP start // JMP 2 // 6ff09fc2 + JMP start // JMP 2 // 6ff01fc2 JMP (X5) // 67800200 JMP 4(X5) // 67804200 @@ -338,16 +341,16 @@ start: JMP asmtest(SB) // 970f0000 // Branch pseudo-instructions - BEQZ X5, start // BEQZ X5, 2 // e38602c0 - BGEZ X5, start // BGEZ X5, 2 // e3d402c0 - BGT X5, X6, start // BGT X5, X6, 2 // e34253c0 - BGTU X5, X6, start // BGTU X5, X6, 2 // e36053c0 - BGTZ X5, start // BGTZ X5, 2 // e34e50be - BLE X5, X6, start // BLE X5, X6, 2 // e35c53be - BLEU X5, X6, start // BLEU X5, X6, 2 // e37a53be - BLEZ X5, start // BLEZ X5, 2 // e35850be - BLTZ X5, start // BLTZ X5, 2 // e3c602be - BNEZ X5, start // BNEZ X5, 2 // e39402be + BEQZ X5, start // BEQZ X5, 2 // e38202c0 + BGEZ X5, start // BGEZ X5, 2 // e3d002c0 + BGT X5, X6, start // BGT X5, X6, 2 // e34e53be + BGTU X5, X6, start // BGTU X5, X6, 2 // e36c53be + BGTZ X5, start // BGTZ X5, 2 // e34a50be + BLE X5, X6, start // BLE X5, X6, 2 // e35853be + BLEU X5, X6, start // BLEU X5, X6, 2 // e37653be + BLEZ X5, start // BLEZ X5, 2 // e35450be + BLTZ X5, start // BLTZ X5, 2 // e3c202be + BNEZ X5, start // BNEZ X5, 2 // e39002be // Set pseudo-instructions SEQZ X15, X15 // 93b71700 diff --git a/src/cmd/asm/internal/asm/testdata/riscv64error.s b/src/cmd/asm/internal/asm/testdata/riscv64error.s new file mode 100644 index 00000000000..fb43e68fc17 --- /dev/null +++ b/src/cmd/asm/internal/asm/testdata/riscv64error.s @@ -0,0 +1,14 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +TEXT errors(SB),$0 + MOV $0, 0(SP) // ERROR "constant load must target register" + MOV $0, 8(SP) // ERROR "constant load must target register" + MOV $1234, 0(SP) // ERROR "constant load must target register" + MOV $1234, 8(SP) // ERROR "constant load must target register" + MOVB $1, X5 // ERROR "unsupported constant load" + MOVH $1, X5 // ERROR "unsupported constant load" + MOVW $1, X5 // ERROR "unsupported constant load" + MOVF $1, X5 // ERROR "unsupported constant load" + RET diff --git a/src/cmd/asm/internal/lex/input.go b/src/cmd/asm/internal/lex/input.go index da4ebe6d6e3..e373ae817e0 100644 --- a/src/cmd/asm/internal/lex/input.go +++ b/src/cmd/asm/internal/lex/input.go @@ -6,6 +6,7 @@ package lex import ( "fmt" + "internal/buildcfg" "os" "path/filepath" "strconv" @@ -45,6 +46,21 @@ func NewInput(name string) *Input { // predefine installs the macros set by the -D flag on the command line. func predefine(defines flags.MultiFlag) map[string]*Macro { macros := make(map[string]*Macro) + + // Set macros for GOEXPERIMENTs so we can easily switch + // runtime assembly code based on them. + if *flags.CompilingRuntime { + for _, exp := range buildcfg.EnabledExperiments() { + // Define macro. + name := "GOEXPERIMENT_" + exp + macros[name] = &Macro{ + name: name, + args: nil, + tokens: Tokenize("1"), + } + } + } + for _, name := range defines { value := "1" i := strings.IndexRune(name, '=') diff --git a/src/cmd/asm/main.go b/src/cmd/asm/main.go index 98618a67ef5..043bc696e58 100644 --- a/src/cmd/asm/main.go +++ b/src/cmd/asm/main.go @@ -8,6 +8,7 @@ import ( "bufio" "flag" "fmt" + "internal/buildcfg" "log" "os" @@ -25,7 +26,8 @@ func main() { log.SetFlags(0) log.SetPrefix("asm: ") - GOARCH := objabi.GOARCH + buildcfg.Check() + GOARCH := buildcfg.GOARCH architecture := arch.Set(GOARCH) if architecture == nil { @@ -68,7 +70,7 @@ func main() { defer buf.Close() if !*flags.SymABIs { - fmt.Fprintf(buf, "go object %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version) + buf.WriteString(objabi.HeaderString()) fmt.Fprintf(buf, "!\n") } diff --git a/src/cmd/cgo/doc.go b/src/cmd/cgo/doc.go index e782c866ac7..a6787f64050 100644 --- a/src/cmd/cgo/doc.go +++ b/src/cmd/cgo/doc.go @@ -387,6 +387,9 @@ and of course there is nothing stopping the C code from doing anything it likes. However, programs that break these rules are likely to fail in unexpected and unpredictable ways. +The runtime/cgo.Handle type can be used to safely pass Go values +between Go and C. See the runtime/cgo package documentation for details. + Note: the current implementation has a bug. While Go code is permitted to write nil or a C pointer (but not a Go pointer) to C memory, the current implementation may sometimes cause a runtime error if the diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go index b5e28e32545..ae61725bc75 100644 --- a/src/cmd/cgo/gcc.go +++ b/src/cmd/cgo/gcc.go @@ -909,7 +909,7 @@ func (p *Package) rewriteCall(f *File, call *Call) (string, bool) { var sbCheck bytes.Buffer for i, param := range params { origArg := args[i] - arg, nu := p.mangle(f, &args[i]) + arg, nu := p.mangle(f, &args[i], true) if nu { needsUnsafe = true } @@ -952,7 +952,7 @@ func (p *Package) rewriteCall(f *File, call *Call) (string, bool) { sb.WriteString("return ") } - m, nu := p.mangle(f, &call.Call.Fun) + m, nu := p.mangle(f, &call.Call.Fun, false) if nu { needsUnsafe = true } @@ -1086,7 +1086,8 @@ func (p *Package) hasPointer(f *File, t ast.Expr, top bool) bool { // rewriting calls when it finds them. // It removes the corresponding references in f.Ref and f.Calls, so that we // don't try to do the replacement again in rewriteRef or rewriteCall. -func (p *Package) mangle(f *File, arg *ast.Expr) (ast.Expr, bool) { +// If addPosition is true, add position info to the idents of C names in arg. +func (p *Package) mangle(f *File, arg *ast.Expr, addPosition bool) (ast.Expr, bool) { needsUnsafe := false f.walk(arg, ctxExpr, func(f *File, arg interface{}, context astContext) { px, ok := arg.(*ast.Expr) @@ -1101,7 +1102,7 @@ func (p *Package) mangle(f *File, arg *ast.Expr) (ast.Expr, bool) { for _, r := range f.Ref { if r.Expr == px { - *px = p.rewriteName(f, r) + *px = p.rewriteName(f, r, addPosition) r.Done = true break } @@ -1361,7 +1362,7 @@ func (p *Package) rewriteRef(f *File) { } } - expr := p.rewriteName(f, r) + expr := p.rewriteName(f, r, false) if *godefs { // Substitute definition for mangled type name. @@ -1424,8 +1425,23 @@ func (p *Package) rewriteRef(f *File) { } // rewriteName returns the expression used to rewrite a reference. -func (p *Package) rewriteName(f *File, r *Ref) ast.Expr { - var expr ast.Expr = ast.NewIdent(r.Name.Mangle) // default +// If addPosition is true, add position info in the ident name. +func (p *Package) rewriteName(f *File, r *Ref, addPosition bool) ast.Expr { + getNewIdent := ast.NewIdent + if addPosition { + getNewIdent = func(newName string) *ast.Ident { + mangledIdent := ast.NewIdent(newName) + if len(newName) == len(r.Name.Go) { + return mangledIdent + } + p := fset.Position((*r.Expr).End()) + if p.Column == 0 { + return mangledIdent + } + return ast.NewIdent(fmt.Sprintf("%s /*line :%d:%d*/", newName, p.Line, p.Column)) + } + } + var expr ast.Expr = getNewIdent(r.Name.Mangle) // default switch r.Context { case ctxCall, ctxCall2: if r.Name.Kind != "func" { @@ -1453,7 +1469,7 @@ func (p *Package) rewriteName(f *File, r *Ref) ast.Expr { n.Mangle = "_C2func_" + n.Go f.Name["2"+r.Name.Go] = n } - expr = ast.NewIdent(n.Mangle) + expr = getNewIdent(n.Mangle) r.Name = n break } @@ -1484,7 +1500,7 @@ func (p *Package) rewriteName(f *File, r *Ref) ast.Expr { // issue 7757. expr = &ast.CallExpr{ Fun: &ast.Ident{NamePos: (*r.Expr).Pos(), Name: "_Cgo_ptr"}, - Args: []ast.Expr{ast.NewIdent(name.Mangle)}, + Args: []ast.Expr{getNewIdent(name.Mangle)}, } case "type": // Okay - might be new(T) @@ -1566,9 +1582,17 @@ func (p *Package) gccMachine() []string { case "s390x": return []string{"-m64"} case "mips64", "mips64le": - return []string{"-mabi=64"} + if gomips64 == "hardfloat" { + return []string{"-mabi=64", "-mhard-float"} + } else if gomips64 == "softfloat" { + return []string{"-mabi=64", "-msoft-float"} + } case "mips", "mipsle": - return []string{"-mabi=32"} + if gomips == "hardfloat" { + return []string{"-mabi=32", "-mfp32", "-mhard-float", "-mno-odd-spreg"} + } else if gomips == "softfloat" { + return []string{"-mabi=32", "-msoft-float"} + } } return nil } diff --git a/src/cmd/cgo/main.go b/src/cmd/cgo/main.go index c1116e28ecd..03a662e689d 100644 --- a/src/cmd/cgo/main.go +++ b/src/cmd/cgo/main.go @@ -17,9 +17,11 @@ import ( "go/ast" "go/printer" "go/token" + "internal/buildcfg" "io" "io/ioutil" "os" + "os/exec" "path/filepath" "reflect" "runtime" @@ -245,7 +247,7 @@ var importRuntimeCgo = flag.Bool("import_runtime_cgo", true, "import runtime/cgo var importSyscall = flag.Bool("import_syscall", true, "import syscall in generated code") var trimpath = flag.String("trimpath", "", "applies supplied rewrites or trims prefixes to recorded source file paths") -var goarch, goos string +var goarch, goos, gomips, gomips64 string func main() { objabi.AddVersionFlag() // -V @@ -302,6 +304,14 @@ func main() { p := newPackage(args[:i]) + // We need a C compiler to be available. Check this. + gccName := p.gccBaseCmd()[0] + _, err := exec.LookPath(gccName) + if err != nil { + fatalf("C compiler %q not found: %v", gccName, err) + os.Exit(2) + } + // Record CGO_LDFLAGS from the environment for external linking. if ldflags := os.Getenv("CGO_LDFLAGS"); ldflags != "" { args, err := splitQuoted(ldflags) @@ -405,6 +415,9 @@ func newPackage(args []string) *Package { if s := os.Getenv("GOOS"); s != "" { goos = s } + buildcfg.Check() + gomips = buildcfg.GOMIPS + gomips64 = buildcfg.GOMIPS64 ptrSize := ptrSizeMap[goarch] if ptrSize == 0 { fatalf("unknown ptrSize for $GOARCH %q", goarch) diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go index 8e83f02202f..8c31d5b7941 100644 --- a/src/cmd/cgo/out.go +++ b/src/cmd/cgo/out.go @@ -1021,11 +1021,25 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) { } fmt.Fprintf(fgcc, "}\n") + // In internal linking mode, the Go linker sees both + // the C wrapper written above and the Go wrapper it + // references. Hence, export the C wrapper (e.g., for + // if we're building a shared object). The Go linker + // will resolve the C wrapper's reference to the Go + // wrapper without a separate export. + fmt.Fprintf(fgo2, "//go:cgo_export_dynamic %s\n", exp.ExpName) + // cgo_export_static refers to a symbol by its linker + // name, so set the linker name of the Go wrapper. + fmt.Fprintf(fgo2, "//go:linkname _cgoexp%s_%s _cgoexp%s_%s\n", cPrefix, exp.ExpName, cPrefix, exp.ExpName) + // In external linking mode, the Go linker sees the Go + // wrapper, but not the C wrapper. For this case, + // export the Go wrapper so the host linker can + // resolve the reference from the C wrapper to the Go + // wrapper. + fmt.Fprintf(fgo2, "//go:cgo_export_static _cgoexp%s_%s\n", cPrefix, exp.ExpName) + // Build the wrapper function compiled by cmd/compile. // This unpacks the argument struct above and calls the Go function. - fmt.Fprintf(fgo2, "//go:cgo_export_dynamic %s\n", exp.ExpName) - fmt.Fprintf(fgo2, "//go:linkname _cgoexp%s_%s _cgoexp%s_%s\n", cPrefix, exp.ExpName, cPrefix, exp.ExpName) - fmt.Fprintf(fgo2, "//go:cgo_export_static _cgoexp%s_%s\n", cPrefix, exp.ExpName) fmt.Fprintf(fgo2, "func _cgoexp%s_%s(a *%s) {\n", cPrefix, exp.ExpName, gotype) fmt.Fprintf(fm, "int _cgoexp%s_%s;\n", cPrefix, exp.ExpName) @@ -1717,8 +1731,12 @@ typedef struct __go_open_array { struct __go_string __go_byte_array_to_string(const void* p, intgo len); struct __go_open_array __go_string_to_byte_array (struct __go_string str); +extern void runtime_throw(const char *); + const char *_cgoPREFIX_Cfunc_CString(struct __go_string s) { char *p = malloc(s.__length+1); + if(p == NULL) + runtime_throw("runtime: C malloc failed"); memmove(p, s.__data, s.__length); p[s.__length] = 0; return p; @@ -1726,6 +1744,8 @@ const char *_cgoPREFIX_Cfunc_CString(struct __go_string s) { void *_cgoPREFIX_Cfunc_CBytes(struct __go_open_array b) { char *p = malloc(b.__count); + if(p == NULL) + runtime_throw("runtime: C malloc failed"); memmove(p, b.__values, b.__count); return p; } @@ -1744,14 +1764,13 @@ Slice _cgoPREFIX_Cfunc_GoBytes(char *p, int32_t n) { return __go_string_to_byte_array(s); } -extern void runtime_throw(const char *); void *_cgoPREFIX_Cfunc__CMalloc(size_t n) { - void *p = malloc(n); - if(p == NULL && n == 0) - p = malloc(1); - if(p == NULL) - runtime_throw("runtime: C malloc failed"); - return p; + void *p = malloc(n); + if(p == NULL && n == 0) + p = malloc(1); + if(p == NULL) + runtime_throw("runtime: C malloc failed"); + return p; } struct __go_type_descriptor; diff --git a/src/cmd/compile/internal-abi.md b/src/cmd/compile/abi-internal.md similarity index 94% rename from src/cmd/compile/internal-abi.md rename to src/cmd/compile/abi-internal.md index f4ef2cc8694..f901e707be3 100644 --- a/src/cmd/compile/internal-abi.md +++ b/src/cmd/compile/abi-internal.md @@ -153,6 +153,7 @@ Assigning a receiver, argument, or result V of underlying type T works as follows: 1. Remember I and FP. +1. If T has zero size, add T to the stack sequence S and return. 1. Try to register-assign V. 1. If step 2 failed, reset I and FP to the values from step 1, add T to the stack sequence S, and assign V to this field in S. @@ -295,6 +296,15 @@ An architecture may still define register meanings that aren’t compatible with ABI0, but these differences should be easy to account for in the compiler. +The assignment algorithm assigns zero-sized values to the stack +(assignment step 2) in order to support ABI0-equivalence. +While these values take no space themselves, they do result in +alignment padding on the stack in ABI0. +Without this step, the internal ABI would register-assign zero-sized +values even on architectures that provide no argument registers +because they don't consume any registers, and hence not add alignment +padding to the stack. + The algorithm reserves spill space for arguments in the caller’s frame so that the compiler can generate a stack growth path that spills into this reserved space. @@ -391,19 +401,16 @@ without corrupting arguments or results. Special-purpose registers are as follows: -| Register | Call meaning | Body meaning | +| Register | Call meaning | Return meaning | Body meaning | | --- | --- | --- | -| RSP | Stack pointer | Fixed | -| RBP | Frame pointer | Fixed | -| RDX | Closure context pointer | Scratch | -| R12 | None | Scratch | -| R13 | None | Scratch | -| R14 | Current goroutine | Scratch | -| R15 | GOT reference temporary | Fixed if dynlink | -| X15 | Zero value | Fixed | - -TODO: We may start with the existing TLS-based g and move to R14 -later. +| RSP | Stack pointer | Same | Same | +| RBP | Frame pointer | Same | Same | +| RDX | Closure context pointer | Scratch | Scratch | +| R12 | Scratch | Scratch | Scratch | +| R13 | Scratch | Scratch | Scratch | +| R14 | Current goroutine | Same | Same | +| R15 | GOT reference temporary if dynlink | Same | Same | +| X15 | Zero value | Same | Scratch | *Rationale*: These register meanings are compatible with Go’s stack-based calling convention except for R14 and X15, which will have @@ -417,10 +424,21 @@ While this adds one byte to every function prologue, it is hardly ever accessed outside the function prologue and we expect making more single-byte registers available to be a net win. +*Rationale*: We could allow R14 (the current goroutine pointer) to be +a scratch register in function bodies because it can always be +restored from TLS on amd64. +However, we designate it as a fixed register for simplicity and for +consistency with other architectures that may not have a copy of the +current goroutine pointer in TLS. + *Rationale*: We designate X15 as a fixed zero register because functions often have to bulk zero their stack frames, and this is more efficient with a designated zero register. +*Implementation note*: Registers with fixed meaning at calls but not +in function bodies must be initialized by "injected" calls such as +signal-based panics. + #### Stack layout The stack pointer, RSP, grows down and is always aligned to 8 bytes. @@ -455,13 +473,12 @@ The arithmetic status flags are treated like scratch registers and not preserved across calls. All other bits in RFLAGS are system flags. -The CPU is always in MMX technology state (not x87 mode). +At function calls and returns, the CPU is in x87 mode (not MMX +technology mode). -*Rationale*: Go on amd64 uses the XMM registers and never uses the x87 -registers, so it makes sense to assume the CPU is in MMX mode. -Otherwise, any function that used the XMM registers would have to -execute an EMMS instruction before calling another function or -returning (this is the case in the SysV ABI). +*Rationale*: Go on amd64 does not use either the x87 registers or MMX +registers. Hence, we follow the SysV platform conventions in order to +simplify transitions to and from the C ABI. At calls, the MXCSR control bits are always set as follows: diff --git a/src/cmd/compile/doc.go b/src/cmd/compile/doc.go index 46d47220866..b68ef274f37 100644 --- a/src/cmd/compile/doc.go +++ b/src/cmd/compile/doc.go @@ -83,7 +83,8 @@ Flags: Without this flag, the -o output is a combination of both linker and compiler input. -m - Print optimization decisions. + Print optimization decisions. Higher values or repetition + produce more detail. -memprofile file Write memory profile for the compilation to file. -memprofilerate rate diff --git a/src/cmd/compile/internal/abi/abiutils.go b/src/cmd/compile/internal/abi/abiutils.go index e935821802d..cb8e9d7b0fe 100644 --- a/src/cmd/compile/internal/abi/abiutils.go +++ b/src/cmd/compile/internal/abi/abiutils.go @@ -5,6 +5,8 @@ package abi import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/src" "fmt" @@ -27,9 +29,15 @@ type ABIParamResultInfo struct { outparams []ABIParamAssignment offsetToSpillArea int64 spillAreaSize int64 + inRegistersUsed int + outRegistersUsed int config *ABIConfig // to enable String() method } +func (a *ABIParamResultInfo) Config() *ABIConfig { + return a.config +} + func (a *ABIParamResultInfo) InParams() []ABIParamAssignment { return a.inparams } @@ -38,12 +46,20 @@ func (a *ABIParamResultInfo) OutParams() []ABIParamAssignment { return a.outparams } -func (a *ABIParamResultInfo) InParam(i int) ABIParamAssignment { - return a.inparams[i] +func (a *ABIParamResultInfo) InRegistersUsed() int { + return a.inRegistersUsed } -func (a *ABIParamResultInfo) OutParam(i int) ABIParamAssignment { - return a.outparams[i] +func (a *ABIParamResultInfo) OutRegistersUsed() int { + return a.outRegistersUsed +} + +func (a *ABIParamResultInfo) InParam(i int) *ABIParamAssignment { + return &a.inparams[i] +} + +func (a *ABIParamResultInfo) OutParam(i int) *ABIParamAssignment { + return &a.outparams[i] } func (a *ABIParamResultInfo) SpillAreaOffset() int64 { @@ -54,6 +70,14 @@ func (a *ABIParamResultInfo) SpillAreaSize() int64 { return a.spillAreaSize } +// ArgWidth returns the amount of stack needed for all the inputs +// and outputs of a function or method, including ABI-defined parameter +// slots and ABI-defined spill slots for register-resident parameters. +// The name is inherited from (*Type).ArgWidth(), which it replaces. +func (a *ABIParamResultInfo) ArgWidth() int64 { + return a.spillAreaSize + a.offsetToSpillArea - a.config.LocalsOffset() +} + // RegIndex stores the index into the set of machine registers used by // the ABI on a specific architecture for parameter passing. RegIndex // values 0 through N-1 (where N is the number of integer registers @@ -68,10 +92,11 @@ type RegIndex uint8 // ABIParamAssignment holds information about how a specific param or // result will be passed: in registers (in which case 'Registers' is // populated) or on the stack (in which case 'Offset' is set to a -// non-negative stack offset. The values in 'Registers' are indices (as -// described above), not architected registers. +// non-negative stack offset. The values in 'Registers' are indices +// (as described above), not architected registers. type ABIParamAssignment struct { Type *types.Type + Name types.Object // should always be *ir.Name, used to match with a particular ssa.OpArg. Registers []RegIndex offset int32 } @@ -80,20 +105,147 @@ type ABIParamAssignment struct { // This will panic if "a" describes a register-allocated parameter. func (a *ABIParamAssignment) Offset() int32 { if len(a.Registers) > 0 { - panic("Register allocated parameters have no offset") + base.Fatalf("register allocated parameters have no offset") } return a.offset } -// SpillOffset returns the offset *within the spill area* for the parameter that "a" describes. -// Registers will be spilled here; if a memory home is needed (for a pointer method e.g.) -// then that will be the address. -// This will panic if "a" describes a stack-allocated parameter. -func (a *ABIParamAssignment) SpillOffset() int32 { - if len(a.Registers) == 0 { - panic("Stack-allocated parameters have no spill offset") +// RegisterTypes returns a slice of the types of the registers +// corresponding to a slice of parameters. The returned slice +// has capacity for one more, likely a memory type. +func RegisterTypes(apa []ABIParamAssignment) []*types.Type { + rcount := 0 + for _, pa := range apa { + rcount += len(pa.Registers) } - return a.offset + if rcount == 0 { + // Note that this catches top-level struct{} and [0]Foo, which are stack allocated. + return make([]*types.Type, 0, 1) + } + rts := make([]*types.Type, 0, rcount+1) + for _, pa := range apa { + if len(pa.Registers) == 0 { + continue + } + rts = appendParamTypes(rts, pa.Type) + } + return rts +} + +func (pa *ABIParamAssignment) RegisterTypesAndOffsets() ([]*types.Type, []int64) { + l := len(pa.Registers) + if l == 0 { + return nil, nil + } + typs := make([]*types.Type, 0, l) + offs := make([]int64, 0, l) + offs, _ = appendParamOffsets(offs, 0, pa.Type) + return appendParamTypes(typs, pa.Type), offs +} + +func appendParamTypes(rts []*types.Type, t *types.Type) []*types.Type { + w := t.Width + if w == 0 { + return rts + } + if t.IsScalar() || t.IsPtrShaped() { + if t.IsComplex() { + c := types.FloatForComplex(t) + return append(rts, c, c) + } else { + if int(t.Size()) <= types.RegSize { + return append(rts, t) + } + // assume 64bit int on 32-bit machine + // TODO endianness? Should high-order (sign bits) word come first? + if t.IsSigned() { + rts = append(rts, types.Types[types.TINT32]) + } else { + rts = append(rts, types.Types[types.TUINT32]) + } + return append(rts, types.Types[types.TUINT32]) + } + } else { + typ := t.Kind() + switch typ { + case types.TARRAY: + for i := int64(0); i < t.NumElem(); i++ { // 0 gets no registers, plus future-proofing. + rts = appendParamTypes(rts, t.Elem()) + } + case types.TSTRUCT: + for _, f := range t.FieldSlice() { + if f.Type.Size() > 0 { // embedded zero-width types receive no registers + rts = appendParamTypes(rts, f.Type) + } + } + case types.TSLICE: + return appendParamTypes(rts, synthSlice) + case types.TSTRING: + return appendParamTypes(rts, synthString) + case types.TINTER: + return appendParamTypes(rts, synthIface) + } + } + return rts +} + +// appendParamOffsets appends the offset(s) of type t, starting from "at", +// to input offsets, and returns the longer slice and the next unused offset. +func appendParamOffsets(offsets []int64, at int64, t *types.Type) ([]int64, int64) { + at = align(at, t) + w := t.Width + if w == 0 { + return offsets, at + } + if t.IsScalar() || t.IsPtrShaped() { + if t.IsComplex() || int(t.Width) > types.RegSize { // complex and *int64 on 32-bit + s := w / 2 + return append(offsets, at, at+s), at + w + } else { + return append(offsets, at), at + w + } + } else { + typ := t.Kind() + switch typ { + case types.TARRAY: + for i := int64(0); i < t.NumElem(); i++ { + offsets, at = appendParamOffsets(offsets, at, t.Elem()) + } + case types.TSTRUCT: + for i, f := range t.FieldSlice() { + offsets, at = appendParamOffsets(offsets, at, f.Type) + if f.Type.Width == 0 && i == t.NumFields()-1 { + at++ // last field has zero width + } + } + at = align(at, t) // type size is rounded up to its alignment + case types.TSLICE: + return appendParamOffsets(offsets, at, synthSlice) + case types.TSTRING: + return appendParamOffsets(offsets, at, synthString) + case types.TINTER: + return appendParamOffsets(offsets, at, synthIface) + } + } + return offsets, at +} + +// FrameOffset returns the frame-pointer-relative location that a function +// would spill its input or output parameter to, if such a spill slot exists. +// If there is none defined (e.g., register-allocated outputs) it panics. +// For register-allocated inputs that is their spill offset reserved for morestack; +// for stack-allocated inputs and outputs, that is their location on the stack. +// (In a future version of the ABI, register-resident inputs may lose their defined +// spill area to help reduce stack sizes.) +func (a *ABIParamAssignment) FrameOffset(i *ABIParamResultInfo) int64 { + if a.offset == -1 { + base.Fatalf("function parameter has no ABI-defined frame-pointer offset") + } + if len(a.Registers) == 0 { // passed on stack + return int64(a.offset) - i.config.LocalsOffset() + } + // spill area for registers + return int64(a.offset) + i.SpillAreaOffset() - i.config.LocalsOffset() } // RegAmounts holds a specified number of integer/float registers. @@ -106,94 +258,233 @@ type RegAmounts struct { // by the ABI rules for parameter passing and result returning. type ABIConfig struct { // Do we need anything more than this? + offsetForLocals int64 // e.g., obj.(*Link).FixedFrameSize() -- extra linkage information on some architectures. regAmounts RegAmounts regsForTypeCache map[*types.Type]int } // NewABIConfig returns a new ABI configuration for an architecture with // iRegsCount integer/pointer registers and fRegsCount floating point registers. -func NewABIConfig(iRegsCount, fRegsCount int) *ABIConfig { - return &ABIConfig{regAmounts: RegAmounts{iRegsCount, fRegsCount}, regsForTypeCache: make(map[*types.Type]int)} +func NewABIConfig(iRegsCount, fRegsCount int, offsetForLocals int64) *ABIConfig { + return &ABIConfig{offsetForLocals: offsetForLocals, regAmounts: RegAmounts{iRegsCount, fRegsCount}, regsForTypeCache: make(map[*types.Type]int)} +} + +// Copy returns a copy of an ABIConfig for use in a function's compilation so that access to the cache does not need to be protected with a mutex. +func (a *ABIConfig) Copy() *ABIConfig { + b := *a + b.regsForTypeCache = make(map[*types.Type]int) + return &b +} + +// LocalsOffset returns the architecture-dependent offset from SP for args and results. +// In theory this is only used for debugging; it ought to already be incorporated into +// results from the ABI-related methods +func (a *ABIConfig) LocalsOffset() int64 { + return a.offsetForLocals +} + +// FloatIndexFor translates r into an index in the floating point parameter +// registers. If the result is negative, the input index was actually for the +// integer parameter registers. +func (a *ABIConfig) FloatIndexFor(r RegIndex) int64 { + return int64(r) - int64(a.regAmounts.intRegs) } // NumParamRegs returns the number of parameter registers used for a given type, // without regard for the number available. func (a *ABIConfig) NumParamRegs(t *types.Type) int { + var n int if n, ok := a.regsForTypeCache[t]; ok { return n } if t.IsScalar() || t.IsPtrShaped() { - var n int if t.IsComplex() { n = 2 } else { n = (int(t.Size()) + types.RegSize - 1) / types.RegSize } - a.regsForTypeCache[t] = n - return n - } - typ := t.Kind() - n := 0 - switch typ { - case types.TARRAY: - n = a.NumParamRegs(t.Elem()) * int(t.NumElem()) - case types.TSTRUCT: - for _, f := range t.FieldSlice() { - n += a.NumParamRegs(f.Type) + } else { + typ := t.Kind() + switch typ { + case types.TARRAY: + n = a.NumParamRegs(t.Elem()) * int(t.NumElem()) + case types.TSTRUCT: + for _, f := range t.FieldSlice() { + n += a.NumParamRegs(f.Type) + } + case types.TSLICE: + n = a.NumParamRegs(synthSlice) + case types.TSTRING: + n = a.NumParamRegs(synthString) + case types.TINTER: + n = a.NumParamRegs(synthIface) } - case types.TSLICE: - n = a.NumParamRegs(synthSlice) - case types.TSTRING: - n = a.NumParamRegs(synthString) - case types.TINTER: - n = a.NumParamRegs(synthIface) } a.regsForTypeCache[t] = n + return n } -// ABIAnalyze takes a function type 't' and an ABI rules description +// preAllocateParams gets the slice sizes right for inputs and outputs. +func (a *ABIParamResultInfo) preAllocateParams(hasRcvr bool, nIns, nOuts int) { + if hasRcvr { + nIns++ + } + a.inparams = make([]ABIParamAssignment, 0, nIns) + a.outparams = make([]ABIParamAssignment, 0, nOuts) +} + +// ABIAnalyzeTypes takes an optional receiver type, arrays of ins and outs, and returns an ABIParamResultInfo, +// based on the given configuration. This is the same result computed by config.ABIAnalyze applied to the +// corresponding method/function type, except that all the embedded parameter names are nil. +// This is intended for use by ssagen/ssa.go:(*state).rtcall, for runtime functions that lack a parsed function type. +func (config *ABIConfig) ABIAnalyzeTypes(rcvr *types.Type, ins, outs []*types.Type) *ABIParamResultInfo { + setup() + s := assignState{ + stackOffset: config.offsetForLocals, + rTotal: config.regAmounts, + } + result := &ABIParamResultInfo{config: config} + result.preAllocateParams(rcvr != nil, len(ins), len(outs)) + + // Receiver + if rcvr != nil { + result.inparams = append(result.inparams, + s.assignParamOrReturn(rcvr, nil, false)) + } + + // Inputs + for _, t := range ins { + result.inparams = append(result.inparams, + s.assignParamOrReturn(t, nil, false)) + } + s.stackOffset = types.Rnd(s.stackOffset, int64(types.RegSize)) + result.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs + + // Outputs + s.rUsed = RegAmounts{} + for _, t := range outs { + result.outparams = append(result.outparams, s.assignParamOrReturn(t, nil, true)) + } + // The spill area is at a register-aligned offset and its size is rounded up to a register alignment. + // TODO in theory could align offset only to minimum required by spilled data types. + result.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize) + result.spillAreaSize = alignTo(s.spillOffset, types.RegSize) + result.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs + + return result +} + +// ABIAnalyzeFuncType takes a function type 'ft' and an ABI rules description // 'config' and analyzes the function to determine how its parameters // and results will be passed (in registers or on the stack), returning // an ABIParamResultInfo object that holds the results of the analysis. -func (config *ABIConfig) ABIAnalyze(t *types.Type) ABIParamResultInfo { +func (config *ABIConfig) ABIAnalyzeFuncType(ft *types.Func) *ABIParamResultInfo { setup() s := assignState{ - rTotal: config.regAmounts, + stackOffset: config.offsetForLocals, + rTotal: config.regAmounts, } - result := ABIParamResultInfo{config: config} + result := &ABIParamResultInfo{config: config} + result.preAllocateParams(ft.Receiver != nil, ft.Params.NumFields(), ft.Results.NumFields()) // Receiver - ft := t.FuncType() - if t.NumRecvs() != 0 { - rfsl := ft.Receiver.FieldSlice() + // TODO(register args) ? seems like "struct" and "fields" is not right anymore for describing function parameters + if ft.Receiver != nil && ft.Receiver.NumFields() != 0 { + r := ft.Receiver.FieldSlice()[0] result.inparams = append(result.inparams, - s.assignParamOrReturn(rfsl[0].Type, false)) + s.assignParamOrReturn(r.Type, r.Nname, false)) } // Inputs ifsl := ft.Params.FieldSlice() for _, f := range ifsl { result.inparams = append(result.inparams, - s.assignParamOrReturn(f.Type, false)) + s.assignParamOrReturn(f.Type, f.Nname, false)) } s.stackOffset = types.Rnd(s.stackOffset, int64(types.RegSize)) + result.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs // Outputs s.rUsed = RegAmounts{} ofsl := ft.Results.FieldSlice() for _, f := range ofsl { - result.outparams = append(result.outparams, s.assignParamOrReturn(f.Type, true)) + result.outparams = append(result.outparams, s.assignParamOrReturn(f.Type, f.Nname, true)) } // The spill area is at a register-aligned offset and its size is rounded up to a register alignment. // TODO in theory could align offset only to minimum required by spilled data types. result.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize) result.spillAreaSize = alignTo(s.spillOffset, types.RegSize) - + result.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs return result } +// ABIAnalyze returns the same result as ABIAnalyzeFuncType, but also +// updates the offsets of all the receiver, input, and output fields. +// If setNname is true, it also sets the FrameOffset of the Nname for +// the field(s); this is for use when compiling a function and figuring out +// spill locations. Doing this for callers can cause races for register +// outputs because their frame location transitions from BOGUS_FUNARG_OFFSET +// to zero to an as-if-AUTO offset that has no use for callers. +func (config *ABIConfig) ABIAnalyze(t *types.Type, setNname bool) *ABIParamResultInfo { + ft := t.FuncType() + result := config.ABIAnalyzeFuncType(ft) + + // Fill in the frame offsets for receiver, inputs, results + k := 0 + if t.NumRecvs() != 0 { + config.updateOffset(result, ft.Receiver.FieldSlice()[0], result.inparams[0], false, setNname) + k++ + } + for i, f := range ft.Params.FieldSlice() { + config.updateOffset(result, f, result.inparams[k+i], false, setNname) + } + for i, f := range ft.Results.FieldSlice() { + config.updateOffset(result, f, result.outparams[i], true, setNname) + } + return result +} + +// parameterUpdateMu protects the Offset field of function/method parameters (a subset of structure Fields) +var parameterUpdateMu sync.Mutex + +// FieldOffsetOf returns a concurency-safe version of f.Offset +func FieldOffsetOf(f *types.Field) int64 { + parameterUpdateMu.Lock() + defer parameterUpdateMu.Unlock() + return f.Offset +} + +func (config *ABIConfig) updateOffset(result *ABIParamResultInfo, f *types.Field, a ABIParamAssignment, isReturn, setNname bool) { + // Everything except return values in registers has either a frame home (if not in a register) or a frame spill location. + if !isReturn || len(a.Registers) == 0 { + // The type frame offset DOES NOT show effects of minimum frame size. + // Getting this wrong breaks stackmaps, see liveness/plive.go:WriteFuncMap and typebits/typebits.go:Set + parameterUpdateMu.Lock() + defer parameterUpdateMu.Unlock() + off := a.FrameOffset(result) + fOffset := f.Offset + if fOffset == types.BOGUS_FUNARG_OFFSET { + // Set the Offset the first time. After that, we may recompute it, but it should never change. + f.Offset = off + if f.Nname != nil { + // always set it in this case. + f.Nname.(*ir.Name).SetFrameOffset(off) + f.Nname.(*ir.Name).SetIsOutputParamInRegisters(false) + } + } else if fOffset != off { + base.Fatalf("offset for %s at %s changed from %d to %d", f.Sym.Name, base.FmtPos(f.Pos), fOffset, off) + } + } else { + if setNname && f.Nname != nil { + fname := f.Nname.(*ir.Name) + fname.SetIsOutputParamInRegisters(true) + fname.SetFrameOffset(0) + } + } +} + //...................................................................... // // Non-public portions. @@ -208,9 +499,9 @@ func (c *RegAmounts) regString(r RegIndex) string { return fmt.Sprintf("%d", r) } -// toString method renders an ABIParamAssignment in human-readable +// ToString method renders an ABIParamAssignment in human-readable // form, suitable for debugging or unit testing. -func (ri *ABIParamAssignment) toString(config *ABIConfig) string { +func (ri *ABIParamAssignment) ToString(config *ABIConfig, extra bool) string { regs := "R{" offname := "spilloffset" // offset is for spill for register(s) if len(ri.Registers) == 0 { @@ -218,19 +509,25 @@ func (ri *ABIParamAssignment) toString(config *ABIConfig) string { } for _, r := range ri.Registers { regs += " " + config.regAmounts.regString(r) + if extra { + regs += fmt.Sprintf("(%d)", r) + } + } + if extra { + regs += fmt.Sprintf(" | #I=%d, #F=%d", config.regAmounts.intRegs, config.regAmounts.floatRegs) } return fmt.Sprintf("%s } %s: %d typ: %v", regs, offname, ri.offset, ri.Type) } -// toString method renders an ABIParamResultInfo in human-readable +// String method renders an ABIParamResultInfo in human-readable // form, suitable for debugging or unit testing. func (ri *ABIParamResultInfo) String() string { res := "" for k, p := range ri.inparams { - res += fmt.Sprintf("IN %d: %s\n", k, p.toString(ri.config)) + res += fmt.Sprintf("IN %d: %s\n", k, p.ToString(ri.config, false)) } for k, r := range ri.outparams { - res += fmt.Sprintf("OUT %d: %s\n", k, r.toString(ri.config)) + res += fmt.Sprintf("OUT %d: %s\n", k, r.ToString(ri.config, false)) } res += fmt.Sprintf("offsetToSpillArea: %d spillAreaSize: %d", ri.offsetToSpillArea, ri.spillAreaSize) @@ -268,31 +565,61 @@ func (state *assignState) stackSlot(t *types.Type) int64 { return rv } -// allocateRegs returns a set of register indices for a parameter or result +// allocateRegs returns an ordered list of register indices for a parameter or result // that we've just determined to be register-assignable. The number of registers // needed is assumed to be stored in state.pUsed. -func (state *assignState) allocateRegs() []RegIndex { - regs := []RegIndex{} - - // integer - for r := state.rUsed.intRegs; r < state.rUsed.intRegs+state.pUsed.intRegs; r++ { - regs = append(regs, RegIndex(r)) +func (state *assignState) allocateRegs(regs []RegIndex, t *types.Type) []RegIndex { + if t.Width == 0 { + return regs } - state.rUsed.intRegs += state.pUsed.intRegs - - // floating - for r := state.rUsed.floatRegs; r < state.rUsed.floatRegs+state.pUsed.floatRegs; r++ { - regs = append(regs, RegIndex(r+state.rTotal.intRegs)) + ri := state.rUsed.intRegs + rf := state.rUsed.floatRegs + if t.IsScalar() || t.IsPtrShaped() { + if t.IsComplex() { + regs = append(regs, RegIndex(rf+state.rTotal.intRegs), RegIndex(rf+1+state.rTotal.intRegs)) + rf += 2 + } else if t.IsFloat() { + regs = append(regs, RegIndex(rf+state.rTotal.intRegs)) + rf += 1 + } else { + n := (int(t.Size()) + types.RegSize - 1) / types.RegSize + for i := 0; i < n; i++ { // looking ahead to really big integers + regs = append(regs, RegIndex(ri)) + ri += 1 + } + } + state.rUsed.intRegs = ri + state.rUsed.floatRegs = rf + return regs + } else { + typ := t.Kind() + switch typ { + case types.TARRAY: + for i := int64(0); i < t.NumElem(); i++ { + regs = state.allocateRegs(regs, t.Elem()) + } + return regs + case types.TSTRUCT: + for _, f := range t.FieldSlice() { + regs = state.allocateRegs(regs, f.Type) + } + return regs + case types.TSLICE: + return state.allocateRegs(regs, synthSlice) + case types.TSTRING: + return state.allocateRegs(regs, synthString) + case types.TINTER: + return state.allocateRegs(regs, synthIface) + } } - state.rUsed.floatRegs += state.pUsed.floatRegs - - return regs + base.Fatalf("was not expecting type %s", t) + panic("unreachable") } // regAllocate creates a register ABIParamAssignment object for a param // or result with the specified type, as a final step (this assumes // that all of the safety/suitability analysis is complete). -func (state *assignState) regAllocate(t *types.Type, isReturn bool) ABIParamAssignment { +func (state *assignState) regAllocate(t *types.Type, name types.Object, isReturn bool) ABIParamAssignment { spillLoc := int64(-1) if !isReturn { // Spill for register-resident t must be aligned for storage of a t. @@ -301,7 +628,8 @@ func (state *assignState) regAllocate(t *types.Type, isReturn bool) ABIParamAssi } return ABIParamAssignment{ Type: t, - Registers: state.allocateRegs(), + Name: name, + Registers: state.allocateRegs([]RegIndex{}, t), offset: int32(spillLoc), } } @@ -309,9 +637,10 @@ func (state *assignState) regAllocate(t *types.Type, isReturn bool) ABIParamAssi // stackAllocate creates a stack memory ABIParamAssignment object for // a param or result with the specified type, as a final step (this // assumes that all of the safety/suitability analysis is complete). -func (state *assignState) stackAllocate(t *types.Type) ABIParamAssignment { +func (state *assignState) stackAllocate(t *types.Type, name types.Object) ABIParamAssignment { return ABIParamAssignment{ Type: t, + Name: name, offset: int32(state.stackSlot(t)), } } @@ -439,23 +768,73 @@ func (state *assignState) regassign(pt *types.Type) bool { case types.TINTER: return state.regassignStruct(synthIface) default: - panic("not expected") + base.Fatalf("not expected") + panic("unreachable") } } // assignParamOrReturn processes a given receiver, param, or result -// of type 'pt' to determine whether it can be register assigned. +// of field f to determine whether it can be register assigned. // The result of the analysis is recorded in the result // ABIParamResultInfo held in 'state'. -func (state *assignState) assignParamOrReturn(pt *types.Type, isReturn bool) ABIParamAssignment { +func (state *assignState) assignParamOrReturn(pt *types.Type, n types.Object, isReturn bool) ABIParamAssignment { state.pUsed = RegAmounts{} if pt.Width == types.BADWIDTH { - panic("should never happen") + base.Fatalf("should never happen") + panic("unreachable") } else if pt.Width == 0 { - return state.stackAllocate(pt) + return state.stackAllocate(pt, n) } else if state.regassign(pt) { - return state.regAllocate(pt, isReturn) + return state.regAllocate(pt, n, isReturn) } else { - return state.stackAllocate(pt) + return state.stackAllocate(pt, n) } } + +// ComputePadding returns a list of "post element" padding values in +// the case where we have a structure being passed in registers. Give +// a param assignment corresponding to a struct, it returns a list of +// contaning padding values for each field, e.g. the Kth element in +// the list is the amount of padding between field K and the following +// field. For things that are not struct (or structs without padding) +// it returns a list of zeros. Example: +// +// type small struct { +// x uint16 +// y uint8 +// z int32 +// w int32 +// } +// +// For this struct we would return a list [0, 1, 0, 0], meaning that +// we have one byte of padding after the second field, and no bytes of +// padding after any of the other fields. Input parameter "storage" +// is with enough capacity to accommodate padding elements for +// the architected register set in question. +func (pa *ABIParamAssignment) ComputePadding(storage []uint64) []uint64 { + nr := len(pa.Registers) + padding := storage[:nr] + for i := 0; i < nr; i++ { + padding[i] = 0 + } + if pa.Type.Kind() != types.TSTRUCT || nr == 0 { + return padding + } + types := make([]*types.Type, 0, nr) + types = appendParamTypes(types, pa.Type) + if len(types) != nr { + panic("internal error") + } + off := int64(0) + for idx, t := range types { + ts := t.Size() + off += int64(ts) + if idx < len(types)-1 { + noff := align(off, types[idx+1]) + if noff != off { + padding[idx] = uint64(noff - off) + } + } + } + return padding +} diff --git a/src/cmd/compile/internal/amd64/galign.go b/src/cmd/compile/internal/amd64/galign.go index ce1c402902f..2785aa03368 100644 --- a/src/cmd/compile/internal/amd64/galign.go +++ b/src/cmd/compile/internal/amd64/galign.go @@ -23,4 +23,6 @@ func Init(arch *ssagen.ArchInfo) { arch.SSAMarkMoves = ssaMarkMoves arch.SSAGenValue = ssaGenValue arch.SSAGenBlock = ssaGenBlock + arch.LoadRegResults = loadRegResults + arch.SpillArgReg = spillArgReg } diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go index aefdb14a69b..1484ad5404b 100644 --- a/src/cmd/compile/internal/amd64/ggen.go +++ b/src/cmd/compile/internal/amd64/ggen.go @@ -11,11 +11,11 @@ import ( "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/x86" - "cmd/internal/objabi" + "internal/buildcfg" ) // no floating point in note handlers on Plan 9 -var isPlan9 = objabi.GOOS == "plan9" +var isPlan9 = buildcfg.GOOS == "plan9" // DUFFZERO consists of repeated blocks of 4 MOVUPSs + LEAQ, // See runtime/mkduff.go. @@ -56,8 +56,8 @@ func dzDI(b int64) int64 { func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog { const ( - ax = 1 << iota - x0 + r13 = 1 << iota // if R13 is already zeroed. + x15 // if X15 is already zeroed. Note: in new ABI, X15 is always zero. ) if cnt == 0 { @@ -69,56 +69,80 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj. if cnt%int64(types.PtrSize) != 0 { base.Fatalf("zerorange count not a multiple of widthptr %d", cnt) } - if *state&ax == 0 { - p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0) - *state |= ax + if *state&r13 == 0 { + p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_R13, 0) + *state |= r13 } - p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off) + p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_R13, 0, obj.TYPE_MEM, x86.REG_SP, off) off += int64(types.PtrSize) cnt -= int64(types.PtrSize) } if cnt == 8 { - if *state&ax == 0 { - p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0) - *state |= ax + if *state&r13 == 0 { + p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_R13, 0) + *state |= r13 } - p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off) + p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R13, 0, obj.TYPE_MEM, x86.REG_SP, off) } else if !isPlan9 && cnt <= int64(8*types.RegSize) { - if *state&x0 == 0 { - p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0) - *state |= x0 + if !buildcfg.Experiment.RegabiG && *state&x15 == 0 { + p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_REG, x86.REG_X15, 0) + *state |= x15 } for i := int64(0); i < cnt/16; i++ { - p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16) + p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16) } if cnt%16 != 0 { - p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16)) + p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16)) } } else if !isPlan9 && (cnt <= int64(128*types.RegSize)) { - if *state&x0 == 0 { - p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0) - *state |= x0 + if !buildcfg.Experiment.RegabiG && *state&x15 == 0 { + p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_REG, x86.REG_X15, 0) + *state |= x15 } + // Save DI to r12. With the amd64 Go register abi, DI can contain + // an incoming parameter, whereas R12 is always scratch. + p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_DI, 0, obj.TYPE_REG, x86.REG_R12, 0) + // Emit duffzero call p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0) p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt)) p.To.Sym = ir.Syms.Duffzero - if cnt%16 != 0 { - p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8)) - } - } else { - if *state&ax == 0 { - p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0) - *state |= ax + p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8)) } + // Restore DI from r12 + p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R12, 0, obj.TYPE_REG, x86.REG_DI, 0) + } else { + // When the register ABI is in effect, at this point in the + // prolog we may have live values in all of RAX,RDI,RCX. Save + // them off to registers before the REPSTOSQ below, then + // restore. Note that R12 and R13 are always available as + // scratch regs; here we also use R15 (this is safe to do + // since there won't be any globals accessed in the prolog). + // See rewriteToUseGot() in obj6.go for more on r15 use. + + // Save rax/rdi/rcx + p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_DI, 0, obj.TYPE_REG, x86.REG_R12, 0) + p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_REG, x86.REG_R13, 0) + p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_CX, 0, obj.TYPE_REG, x86.REG_R15, 0) + + // Set up the REPSTOSQ and kick it off. + p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0) p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0) p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0) p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) p = pp.Append(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) + + // Restore rax/rdi/rcx + p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R12, 0, obj.TYPE_REG, x86.REG_DI, 0) + p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R13, 0, obj.TYPE_REG, x86.REG_AX, 0) + p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R15, 0, obj.TYPE_REG, x86.REG_CX, 0) + + // Record the fact that r13 is no longer zero. + *state &= ^uint32(r13) } return p diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 4938e4b0e35..ca5f36e7759 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -6,11 +6,13 @@ package amd64 import ( "fmt" + "internal/buildcfg" "math" "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" + "cmd/compile/internal/objw" "cmd/compile/internal/ssa" "cmd/compile/internal/ssagen" "cmd/compile/internal/types" @@ -200,10 +202,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From = obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[2].Reg()} p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()} - p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[1].Reg()}) - if v.Reg() != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } + p.SetFrom3Reg(v.Args[1].Reg()) case ssa.OpAMD64ADDQ, ssa.OpAMD64ADDL: r := v.Reg() r1 := v.Args[0].Reg() @@ -253,11 +252,16 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssa.OpAMD64BTSL, ssa.OpAMD64BTSQ, ssa.OpAMD64BTCL, ssa.OpAMD64BTCQ, ssa.OpAMD64BTRL, ssa.OpAMD64BTRQ: - r := v.Reg() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } - opregreg(s, v.Op.Asm(), r, v.Args[1].Reg()) + opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg()) + + case ssa.OpAMD64SHRDQ, ssa.OpAMD64SHLDQ: + p := s.Prog(v.Op.Asm()) + lo, hi, bits := v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg() + p.From.Type = obj.TYPE_REG + p.From.Reg = bits + p.To.Type = obj.TYPE_REG + p.To.Reg = lo + p.SetFrom3Reg(hi) case ssa.OpAMD64DIVQU, ssa.OpAMD64DIVLU, ssa.OpAMD64DIVWU: // Arg[0] (the dividend) is in AX. @@ -400,20 +404,16 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // compute (x+y)/2 unsigned. // Do a 64-bit add, the overflow goes into the carry. // Shift right once and pull the carry back into the 63rd bit. - r := v.Reg() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } p := s.Prog(x86.AADDQ) p.From.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG - p.To.Reg = r + p.To.Reg = v.Reg() p.From.Reg = v.Args[1].Reg() p = s.Prog(x86.ARCRQ) p.From.Type = obj.TYPE_CONST p.From.Offset = 1 p.To.Type = obj.TYPE_REG - p.To.Reg = r + p.To.Reg = v.Reg() case ssa.OpAMD64ADDQcarry, ssa.OpAMD64ADCQ: r := v.Reg0() @@ -529,21 +529,13 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssa.OpAMD64CMOVQCS, ssa.OpAMD64CMOVLCS, ssa.OpAMD64CMOVWCS, ssa.OpAMD64CMOVQGTF, ssa.OpAMD64CMOVLGTF, ssa.OpAMD64CMOVWGTF, ssa.OpAMD64CMOVQGEF, ssa.OpAMD64CMOVLGEF, ssa.OpAMD64CMOVWGEF: - r := v.Reg() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_REG - p.To.Reg = r + p.To.Reg = v.Reg() case ssa.OpAMD64CMOVQNEF, ssa.OpAMD64CMOVLNEF, ssa.OpAMD64CMOVWNEF: - r := v.Reg() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } // Flag condition: ^ZERO || PARITY // Generate: // CMOV*NE SRC,DST @@ -552,7 +544,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_REG - p.To.Reg = r + p.To.Reg = v.Reg() var q *obj.Prog if v.Op == ssa.OpAMD64CMOVQNEF { q = s.Prog(x86.ACMOVQPS) @@ -564,14 +556,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { q.From.Type = obj.TYPE_REG q.From.Reg = v.Args[1].Reg() q.To.Type = obj.TYPE_REG - q.To.Reg = r + q.To.Reg = v.Reg() case ssa.OpAMD64CMOVQEQF, ssa.OpAMD64CMOVLEQF, ssa.OpAMD64CMOVWEQF: - r := v.Reg() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } - // Flag condition: ZERO && !PARITY // Generate: // MOV SRC,AX @@ -588,7 +575,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { } p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG - p.From.Reg = r + p.From.Reg = v.Reg() p.To.Type = obj.TYPE_REG p.To.Reg = x86.REG_AX var q *obj.Prog @@ -602,7 +589,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { q.From.Type = obj.TYPE_REG q.From.Reg = x86.REG_AX q.To.Type = obj.TYPE_REG - q.To.Reg = r + q.To.Reg = v.Reg() case ssa.OpAMD64MULQconst, ssa.OpAMD64MULLconst: r := v.Reg() @@ -611,7 +598,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = r - p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[0].Reg()}) + p.SetFrom3Reg(v.Args[0].Reg()) case ssa.OpAMD64SUBQconst, ssa.OpAMD64SUBLconst, ssa.OpAMD64ANDQconst, ssa.OpAMD64ANDLconst, @@ -621,15 +608,11 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssa.OpAMD64SHRQconst, ssa.OpAMD64SHRLconst, ssa.OpAMD64SHRWconst, ssa.OpAMD64SHRBconst, ssa.OpAMD64SARQconst, ssa.OpAMD64SARLconst, ssa.OpAMD64SARWconst, ssa.OpAMD64SARBconst, ssa.OpAMD64ROLQconst, ssa.OpAMD64ROLLconst, ssa.OpAMD64ROLWconst, ssa.OpAMD64ROLBconst: - r := v.Reg() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG - p.To.Reg = r + p.To.Reg = v.Reg() case ssa.OpAMD64SBBQcarrymask, ssa.OpAMD64SBBLcarrymask: r := v.Reg() p := s.Prog(v.Op.Asm()) @@ -709,9 +692,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - ssagen.AddAux2(&p.From, v, sc.Off()) + ssagen.AddAux2(&p.From, v, sc.Off64()) p.To.Type = obj.TYPE_CONST - p.To.Offset = sc.Val() + p.To.Offset = sc.Val64() case ssa.OpAMD64CMPQloadidx8, ssa.OpAMD64CMPQloadidx1, ssa.OpAMD64CMPLloadidx4, ssa.OpAMD64CMPLloadidx1, ssa.OpAMD64CMPWloadidx2, ssa.OpAMD64CMPWloadidx1, ssa.OpAMD64CMPBloadidx1: p := s.Prog(v.Op.Asm()) memIdx(&p.From, v) @@ -722,9 +705,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { sc := v.AuxValAndOff() p := s.Prog(v.Op.Asm()) memIdx(&p.From, v) - ssagen.AddAux2(&p.From, v, sc.Off()) + ssagen.AddAux2(&p.From, v, sc.Off64()) p.To.Type = obj.TYPE_CONST - p.To.Offset = sc.Val() + p.To.Offset = sc.Val64() case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst: x := v.Reg() @@ -773,7 +756,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore, - ssa.OpAMD64BTCQmodify, ssa.OpAMD64BTCLmodify, ssa.OpAMD64BTRQmodify, ssa.OpAMD64BTRLmodify, ssa.OpAMD64BTSQmodify, ssa.OpAMD64BTSLmodify, ssa.OpAMD64ADDQmodify, ssa.OpAMD64SUBQmodify, ssa.OpAMD64ANDQmodify, ssa.OpAMD64ORQmodify, ssa.OpAMD64XORQmodify, ssa.OpAMD64ADDLmodify, ssa.OpAMD64SUBLmodify, ssa.OpAMD64ANDLmodify, ssa.OpAMD64ORLmodify, ssa.OpAMD64XORLmodify: p := s.Prog(v.Op.Asm()) @@ -796,7 +778,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssagen.AddAux(&p.To, v) case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify: sc := v.AuxValAndOff() - off := sc.Off() + off := sc.Off64() val := sc.Val() if val == 1 || val == -1 { var asm obj.As @@ -821,11 +803,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { } fallthrough case ssa.OpAMD64ANDQconstmodify, ssa.OpAMD64ANDLconstmodify, ssa.OpAMD64ORQconstmodify, ssa.OpAMD64ORLconstmodify, - ssa.OpAMD64BTCQconstmodify, ssa.OpAMD64BTCLconstmodify, ssa.OpAMD64BTSQconstmodify, ssa.OpAMD64BTSLconstmodify, - ssa.OpAMD64BTRQconstmodify, ssa.OpAMD64BTRLconstmodify, ssa.OpAMD64XORQconstmodify, ssa.OpAMD64XORLconstmodify: + ssa.OpAMD64XORQconstmodify, ssa.OpAMD64XORLconstmodify: sc := v.AuxValAndOff() - off := sc.Off() - val := sc.Val() + off := sc.Off64() + val := sc.Val64() p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = val @@ -837,16 +818,13 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST sc := v.AuxValAndOff() - p.From.Offset = sc.Val() + p.From.Offset = sc.Val64() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - ssagen.AddAux2(&p.To, v, sc.Off()) + ssagen.AddAux2(&p.To, v, sc.Off64()) case ssa.OpAMD64MOVOstorezero: - if s.ABI != obj.ABIInternal { - v.Fatalf("MOVOstorezero can be only used in ABIInternal functions") - } - if !base.Flag.ABIWrap { - // zeroing X15 manually if wrappers are not used + if !buildcfg.Experiment.RegabiG || s.ABI != obj.ABIInternal { + // zero X15 manually opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) } p := s.Prog(v.Op.Asm()) @@ -863,7 +841,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST sc := v.AuxValAndOff() - p.From.Offset = sc.Val() + p.From.Offset = sc.Val64() switch { case p.As == x86.AADDQ && p.From.Offset == 1: p.As = x86.AINCQ @@ -879,7 +857,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.From.Type = obj.TYPE_NONE } memIdx(&p.To, v) - ssagen.AddAux2(&p.To, v, sc.Off()) + ssagen.AddAux2(&p.To, v, sc.Off64()) case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX, ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ, ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS: @@ -912,9 +890,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() - if v.Reg() != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } case ssa.OpAMD64ADDLloadidx1, ssa.OpAMD64ADDLloadidx4, ssa.OpAMD64ADDLloadidx8, ssa.OpAMD64ADDQloadidx1, ssa.OpAMD64ADDQloadidx8, ssa.OpAMD64SUBLloadidx1, ssa.OpAMD64SUBLloadidx4, ssa.OpAMD64SUBLloadidx8, ssa.OpAMD64SUBQloadidx1, ssa.OpAMD64SUBQloadidx8, ssa.OpAMD64ANDLloadidx1, ssa.OpAMD64ANDLloadidx4, ssa.OpAMD64ANDLloadidx8, ssa.OpAMD64ANDQloadidx1, ssa.OpAMD64ANDQloadidx8, @@ -938,15 +913,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() - if v.Reg() != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } case ssa.OpAMD64DUFFZERO: - if s.ABI != obj.ABIInternal { - v.Fatalf("MOVOconst can be only used in ABIInternal functions") - } - if !base.Flag.ABIWrap { - // zeroing X15 manually if wrappers are not used + if !buildcfg.Experiment.RegabiG || s.ABI != obj.ABIInternal { + // zero X15 manually opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) } off := duffStart(v.AuxInt) @@ -1013,24 +982,35 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() + case ssa.OpArgIntReg, ssa.OpArgFloatReg: + // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill + // The loop only runs once. + for _, ap := range v.Block.Func.RegArgs { + // Pass the spill/unspill information along to the assembler, offset by size of return PC pushed on stack. + addr := ssagen.SpillSlotAddr(ap, x86.REG_SP, v.Block.Func.Config.PtrSize) + s.FuncInfo().AddSpill( + obj.RegSpill{Reg: ap.Reg, Addr: addr, Unspill: loadByType(ap.Type), Spill: storeByType(ap.Type)}) + } + v.Block.Func.RegArgs = nil + ssagen.CheckArgReg(v) case ssa.OpAMD64LoweredGetClosurePtr: // Closure pointer is DX. ssagen.CheckLoweredGetClosurePtr(v) case ssa.OpAMD64LoweredGetG: - if base.Flag.ABIWrap { - v.Fatalf("LoweredGetG should not appear in new ABI") + if buildcfg.Experiment.RegabiG && s.ABI == obj.ABIInternal { + v.Fatalf("LoweredGetG should not appear in ABIInternal") } r := v.Reg() getgFromTLS(s, r) case ssa.OpAMD64CALLstatic: - if s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal { + if buildcfg.Experiment.RegabiG && s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal { // zeroing X15 when entering ABIInternal from ABI0 opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) // set G register from TLS getgFromTLS(s, x86.REG_R14) } s.Call(v) - if s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 { + if buildcfg.Experiment.RegabiG && s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 { // zeroing X15 when entering ABIInternal from ABI0 opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) // set G register from TLS @@ -1077,24 +1057,16 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL, ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL, ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL: - r := v.Reg() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } p := s.Prog(v.Op.Asm()) p.To.Type = obj.TYPE_REG - p.To.Reg = r + p.To.Reg = v.Reg() case ssa.OpAMD64NEGLflags: - r := v.Reg0() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } p := s.Prog(v.Op.Asm()) p.To.Type = obj.TYPE_REG - p.To.Reg = r + p.To.Reg = v.Reg0() - case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ, ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD: + case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ, ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD, ssa.OpAMD64SQRTSS: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() @@ -1102,7 +1074,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { switch v.Op { case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ: p.To.Reg = v.Reg0() - case ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD: + case ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD, ssa.OpAMD64SQRTSS: p.To.Reg = v.Reg() } case ssa.OpAMD64ROUNDSD: @@ -1114,7 +1086,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { } p.From.Offset = val p.From.Type = obj.TYPE_CONST - p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[0].Reg()}) + p.SetFrom3Reg(v.Args[0].Reg()) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpAMD64POPCNTQ, ssa.OpAMD64POPCNTL: @@ -1213,25 +1185,17 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg0() case ssa.OpAMD64XCHGB, ssa.OpAMD64XCHGL, ssa.OpAMD64XCHGQ: - r := v.Reg0() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output[0] not in same register %s", v.LongString()) - } p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG - p.From.Reg = r + p.From.Reg = v.Reg0() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[1].Reg() ssagen.AddAux(&p.To, v) case ssa.OpAMD64XADDLlock, ssa.OpAMD64XADDQlock: - r := v.Reg0() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output[0] not in same register %s", v.LongString()) - } s.Prog(x86.ALOCK) p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG - p.From.Reg = r + p.From.Reg = v.Reg0() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[1].Reg() ssagen.AddAux(&p.To, v) @@ -1271,6 +1235,13 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Reg = x86.REG_SP ssagen.AddAux(&p.To, v) p.To.Offset += 4 + case ssa.OpClobberReg: + x := uint64(0xdeaddeaddeaddead) + p := s.Prog(x86.AMOVQ) + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(x) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() default: v.Fatalf("genValue not implemented: %s", v.LongString()) } @@ -1333,7 +1304,7 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { case ssa.BlockRet: s.Prog(obj.ARET) case ssa.BlockRetJmp: - if s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal { + if buildcfg.Experiment.RegabiG && s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal { // zeroing X15 when entering ABIInternal from ABI0 opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) // set G register from TLS @@ -1376,3 +1347,27 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { b.Fatalf("branch not implemented: %s", b.LongString()) } } + +func loadRegResults(s *ssagen.State, f *ssa.Func) { + for _, o := range f.OwnAux.ABIInfo().OutParams() { + n := o.Name.(*ir.Name) + rts, offs := o.RegisterTypesAndOffsets() + for i := range o.Registers { + p := s.Prog(loadByType(rts[i])) + p.From.Type = obj.TYPE_MEM + p.From.Name = obj.NAME_AUTO + p.From.Sym = n.Linksym() + p.From.Offset = n.FrameOffset() + offs[i] + p.To.Type = obj.TYPE_REG + p.To.Reg = ssa.ObjRegForAbiReg(o.Registers[i], f.Config) + } + } +} + +func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { + p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off) + p.To.Name = obj.NAME_PARAM + p.To.Sym = n.Linksym() + p.Pos = p.Pos.WithNotStmt() + return p +} diff --git a/src/cmd/compile/internal/arm/galign.go b/src/cmd/compile/internal/arm/galign.go index 81959ae0abc..d68500280d0 100644 --- a/src/cmd/compile/internal/arm/galign.go +++ b/src/cmd/compile/internal/arm/galign.go @@ -8,14 +8,14 @@ import ( "cmd/compile/internal/ssa" "cmd/compile/internal/ssagen" "cmd/internal/obj/arm" - "cmd/internal/objabi" + "internal/buildcfg" ) func Init(arch *ssagen.ArchInfo) { arch.LinkArch = &arm.Linkarm arch.REGSP = arm.REGSP arch.MAXWIDTH = (1 << 32) - 1 - arch.SoftFloat = objabi.GOARM == 5 + arch.SoftFloat = buildcfg.GOARM == 5 arch.ZeroRange = zerorange arch.Ginsnop = ginsnop arch.Ginsnopdefer = ginsnop diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go index 729d2dab2d8..4b083cec46b 100644 --- a/src/cmd/compile/internal/arm/ssa.go +++ b/src/cmd/compile/internal/arm/ssa.go @@ -6,6 +6,7 @@ package arm import ( "fmt" + "internal/buildcfg" "math" "math/bits" @@ -17,7 +18,6 @@ import ( "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/arm" - "cmd/internal/objabi" ) // loadByType returns the load instruction of the given type. @@ -173,9 +173,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = y case ssa.OpARMMOVWnop: - if v.Reg() != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } // nothing to do case ssa.OpLoadReg: if v.Type.IsFlags() { @@ -282,14 +279,14 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt >> 8 - p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt & 0xff}) + p.SetFrom3Const(v.AuxInt & 0xff) p.Reg = v.Args[0].Reg() p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpARMANDconst, ssa.OpARMBICconst: // try to optimize ANDconst and BICconst to BFC, which saves bytes and ticks // BFC is only available on ARMv7, and its result and source are in the same register - if objabi.GOARM == 7 && v.Reg() == v.Args[0].Reg() { + if buildcfg.GOARM == 7 && v.Reg() == v.Args[0].Reg() { var val uint32 if v.Op == ssa.OpARMANDconst { val = ^uint32(v.AuxInt) @@ -302,7 +299,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(arm.ABFC) p.From.Type = obj.TYPE_CONST p.From.Offset = int64(width) - p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(lsb)}) + p.SetFrom3Const(int64(lsb)) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() break @@ -646,7 +643,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { default: } } - if objabi.GOARM >= 6 { + if buildcfg.GOARM >= 6 { // generate more efficient "MOVB/MOVBU/MOVH/MOVHU Reg@>0, Reg" on ARMv6 & ARMv7 genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, 0) return @@ -657,6 +654,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssa.OpARMREV, ssa.OpARMREV16, ssa.OpARMRBIT, + ssa.OpARMSQRTF, ssa.OpARMSQRTD, ssa.OpARMNEGF, ssa.OpARMNEGD, @@ -863,7 +861,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { v.Fatalf("FlagConstant op should never make it to codegen %v", v.LongString()) case ssa.OpARMInvertFlags: v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) - case ssa.OpClobber: + case ssa.OpClobber, ssa.OpClobberReg: // TODO: implement for clobberdead experiment. Nop is ok for now. default: v.Fatalf("genValue not implemented: %s", v.LongString()) diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go index 8364535f63b..89be4964619 100644 --- a/src/cmd/compile/internal/arm64/ggen.go +++ b/src/cmd/compile/internal/arm64/ggen.go @@ -10,10 +10,10 @@ import ( "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/arm64" - "cmd/internal/objabi" + "internal/buildcfg" ) -var darwin = objabi.GOOS == "darwin" || objabi.GOOS == "ios" +var darwin = buildcfg.GOOS == "darwin" || buildcfg.GOOS == "ios" func padframe(frame int64) int64 { // arm64 requires that the frame size (not counting saved FP&LR) diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go index 73e74e12199..0c997bc4b3e 100644 --- a/src/cmd/compile/internal/arm64/ssa.go +++ b/src/cmd/compile/internal/arm64/ssa.go @@ -100,9 +100,11 @@ func genIndexedOperand(v *ssa.Value) obj.Addr { // Reg: base register, Index: (shifted) index register mop := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[0].Reg()} switch v.Op { - case ssa.OpARM64MOVDloadidx8, ssa.OpARM64MOVDstoreidx8, ssa.OpARM64MOVDstorezeroidx8: + case ssa.OpARM64MOVDloadidx8, ssa.OpARM64MOVDstoreidx8, ssa.OpARM64MOVDstorezeroidx8, + ssa.OpARM64FMOVDloadidx8, ssa.OpARM64FMOVDstoreidx8: mop.Index = arm64.REG_LSL | 3<<5 | v.Args[1].Reg()&31 - case ssa.OpARM64MOVWloadidx4, ssa.OpARM64MOVWUloadidx4, ssa.OpARM64MOVWstoreidx4, ssa.OpARM64MOVWstorezeroidx4: + case ssa.OpARM64MOVWloadidx4, ssa.OpARM64MOVWUloadidx4, ssa.OpARM64MOVWstoreidx4, ssa.OpARM64MOVWstorezeroidx4, + ssa.OpARM64FMOVSloadidx4, ssa.OpARM64FMOVSstoreidx4: mop.Index = arm64.REG_LSL | 2<<5 | v.Args[1].Reg()&31 case ssa.OpARM64MOVHloadidx2, ssa.OpARM64MOVHUloadidx2, ssa.OpARM64MOVHstoreidx2, ssa.OpARM64MOVHstorezeroidx2: mop.Index = arm64.REG_LSL | 1<<5 | v.Args[1].Reg()&31 @@ -140,9 +142,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = y case ssa.OpARM64MOVDnop: - if v.Reg() != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } // nothing to do case ssa.OpLoadReg: if v.Type.IsFlags() { @@ -230,7 +229,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.Reg = ra p.From.Type = obj.TYPE_REG p.From.Reg = rm - p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: rn}) + p.SetFrom3Reg(rn) p.To.Type = obj.TYPE_REG p.To.Reg = rt case ssa.OpARM64ADDconst, @@ -293,7 +292,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt - p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[0].Reg()}) + p.SetFrom3Reg(v.Args[0].Reg()) p.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() @@ -435,7 +434,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssa.OpARM64MOVHUloadidx2, ssa.OpARM64MOVWloadidx4, ssa.OpARM64MOVWUloadidx4, - ssa.OpARM64MOVDloadidx8: + ssa.OpARM64MOVDloadidx8, + ssa.OpARM64FMOVDloadidx8, + ssa.OpARM64FMOVSloadidx4: p := s.Prog(v.Op.Asm()) p.From = genIndexedOperand(v) p.To.Type = obj.TYPE_REG @@ -472,7 +473,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssa.OpARM64FMOVDstoreidx, ssa.OpARM64MOVHstoreidx2, ssa.OpARM64MOVWstoreidx4, - ssa.OpARM64MOVDstoreidx8: + ssa.OpARM64FMOVSstoreidx4, + ssa.OpARM64MOVDstoreidx8, + ssa.OpARM64FMOVDstoreidx8: p := s.Prog(v.Op.Asm()) p.To = genIndexedOperand(v) p.From.Type = obj.TYPE_REG @@ -516,17 +519,13 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssagen.AddAux(&p.To, v) case ssa.OpARM64BFI, ssa.OpARM64BFXIL: - r := v.Reg() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt >> 8 - p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt & 0xff}) + p.SetFrom3Const(v.AuxInt & 0xff) p.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_REG - p.To.Reg = r + p.To.Reg = v.Reg() case ssa.OpARM64SBFIZ, ssa.OpARM64SBFX, ssa.OpARM64UBFIZ, @@ -534,7 +533,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt >> 8 - p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt & 0xff}) + p.SetFrom3Const(v.AuxInt & 0xff) p.Reg = v.Args[0].Reg() p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() @@ -894,6 +893,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssa.OpARM64FMOVSgpfp, ssa.OpARM64FNEGS, ssa.OpARM64FNEGD, + ssa.OpARM64FSQRTS, ssa.OpARM64FSQRTD, ssa.OpARM64FCVTZSSW, ssa.OpARM64FCVTZSDW, @@ -915,6 +915,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssa.OpARM64FCVTDS, ssa.OpARM64REV, ssa.OpARM64REVW, + ssa.OpARM64REV16, ssa.OpARM64REV16W, ssa.OpARM64RBIT, ssa.OpARM64RBITW, @@ -953,7 +954,21 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg p.From.Reg = condBits[ssa.Op(v.AuxInt)] p.Reg = v.Args[0].Reg() - p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: r1}) + p.SetFrom3Reg(r1) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARM64CSINC, ssa.OpARM64CSINV, ssa.OpARM64CSNEG: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg + p.From.Reg = condBits[ssa.Op(v.AuxInt)] + p.Reg = v.Args[0].Reg() + p.SetFrom3Reg(v.Args[1].Reg()) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARM64CSETM: + p := s.Prog(arm64.ACSETM) + p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg + p.From.Reg = condBits[ssa.Op(v.AuxInt)] p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpARM64DUFFZERO: @@ -1086,7 +1101,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { v.Fatalf("FlagConstant op should never make it to codegen %v", v.LongString()) case ssa.OpARM64InvertFlags: v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) - case ssa.OpClobber: + case ssa.OpClobber, ssa.OpClobberReg: // TODO: implement for clobberdead experiment. Nop is ok for now. default: v.Fatalf("genValue not implemented: %s", v.LongString()) diff --git a/src/cmd/compile/internal/base/base.go b/src/cmd/compile/internal/base/base.go index 3b9bc3a8af2..4c2516f60e3 100644 --- a/src/cmd/compile/internal/base/base.go +++ b/src/cmd/compile/internal/base/base.go @@ -70,6 +70,6 @@ var NoInstrumentPkgs = []string{ "internal/cpu", } -// Don't insert racefuncenterfp/racefuncexit into the following packages. +// Don't insert racefuncenter/racefuncexit into the following packages. // Memory accesses in the packages are either uninteresting or will cause false positives. var NoRacePkgs = []string{"sync", "sync/atomic"} diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go index 164941bb26c..71712ab1a56 100644 --- a/src/cmd/compile/internal/base/debug.go +++ b/src/cmd/compile/internal/base/debug.go @@ -13,14 +13,10 @@ import ( "reflect" "strconv" "strings" - - "cmd/internal/objabi" ) // Debug holds the parsed debugging configuration values. -var Debug = DebugFlags{ - Fieldtrack: &objabi.Fieldtrack_enabled, -} +var Debug DebugFlags // DebugFlags defines the debugging configuration values (see var Debug). // Each struct field is a different value, named for the lower-case of the field name. @@ -29,28 +25,29 @@ var Debug = DebugFlags{ // The -d option takes a comma-separated list of settings. // Each setting is name=value; for ints, name is short for name=1. type DebugFlags struct { - Append int `help:"print information about append compilation"` - Checkptr int `help:"instrument unsafe pointer conversions"` - Closure int `help:"print information about closure compilation"` - DclStack int `help:"run internal dclstack check"` - Defer int `help:"print information about defer compilation"` - DisableNil int `help:"disable nil checks"` - DumpPtrs int `help:"show Node pointers values in dump output"` - DwarfInl int `help:"print information about DWARF inlined function creation"` - Export int `help:"print export data"` - Fieldtrack *int `help:"enable field tracking"` - GCProg int `help:"print dump of GC programs"` - Libfuzzer int `help:"enable coverage instrumentation for libfuzzer"` - LocationLists int `help:"print information about DWARF location list creation"` - Nil int `help:"print information about nil checks"` - PCTab string `help:"print named pc-value table"` - Panic int `help:"show all compiler panics"` - Slice int `help:"print information about slice compilation"` - SoftFloat int `help:"force compiler to emit soft-float code"` - TypeAssert int `help:"print information about type assertion inlining"` - TypecheckInl int `help:"eager typechecking of inline function bodies"` - WB int `help:"print information about write barriers"` - ABIWrap int `help:"print information about ABI wrapper generation"` + Append int `help:"print information about append compilation"` + Checkptr int `help:"instrument unsafe pointer conversions"` + Closure int `help:"print information about closure compilation"` + DclStack int `help:"run internal dclstack check"` + Defer int `help:"print information about defer compilation"` + DisableNil int `help:"disable nil checks"` + DumpPtrs int `help:"show Node pointers values in dump output"` + DwarfInl int `help:"print information about DWARF inlined function creation"` + Export int `help:"print export data"` + GCProg int `help:"print dump of GC programs"` + InlFuncsWithClosures int `help:"allow functions with closures to be inlined"` + Libfuzzer int `help:"enable coverage instrumentation for libfuzzer"` + LocationLists int `help:"print information about DWARF location list creation"` + Nil int `help:"print information about nil checks"` + NoOpenDefer int `help:"disable open-coded defers"` + PCTab string `help:"print named pc-value table"` + Panic int `help:"show all compiler panics"` + Slice int `help:"print information about slice compilation"` + SoftFloat int `help:"force compiler to emit soft-float code"` + TypeAssert int `help:"print information about type assertion inlining"` + TypecheckInl int `help:"eager typechecking of inline function bodies"` + WB int `help:"print information about write barriers"` + ABIWrap int `help:"print information about ABI wrapper generation"` any bool // set when any of the values have been set } @@ -85,8 +82,6 @@ func init() { panic(fmt.Sprintf("base.Debug.%s has invalid type %v (must be int or string)", f.Name, f.Type)) case *int, *string: // ok - case **int: - ptr = *ptr.(**int) // record the *int itself } debugTab = append(debugTab, debugField{name, help, ptr}) } diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go index d8ca9885cb4..42c0c1b94b5 100644 --- a/src/cmd/compile/internal/base/flag.go +++ b/src/cmd/compile/internal/base/flag.go @@ -8,6 +8,7 @@ import ( "encoding/json" "flag" "fmt" + "internal/buildcfg" "io/ioutil" "log" "os" @@ -82,14 +83,14 @@ type CmdFlags struct { CompilingRuntime bool "flag:\"+\" help:\"compiling runtime\"" // Longer names - ABIWrap bool "help:\"enable generation of ABI wrappers\"" - ABIWrapLimit int "help:\"emit at most N ABI wrappers (for debugging)\"" AsmHdr string "help:\"write assembly header to `file`\"" Bench string "help:\"append benchmark times to `file`\"" BlockProfile string "help:\"write block profile to `file`\"" BuildID string "help:\"record `id` as the build id in the export metadata\"" CPUProfile string "help:\"write cpu profile to `file`\"" Complete bool "help:\"compiling complete package (no C or assembly)\"" + ClobberDead bool "help:\"clobber dead stack slots (for debugging)\"" + ClobberDeadReg bool "help:\"clobber dead registers (for debugging)\"" Dwarf bool "help:\"generate DWARF symbols\"" DwarfBASEntries *bool "help:\"use base address selection entries in DWARF\"" // &Ctxt.UseBASEntries, set below DwarfLocationLists *bool "help:\"add location lists to DWARF in optimized mode\"" // &Ctxt.Flag_locationlists, set below @@ -146,8 +147,7 @@ func ParseFlags() { Flag.LowerP = &Ctxt.Pkgpath Flag.LowerV = &Ctxt.Debugvlog - Flag.ABIWrap = objabi.Regabi_enabled != 0 - Flag.Dwarf = objabi.GOARCH != "wasm" + Flag.Dwarf = buildcfg.GOARCH != "wasm" Flag.DwarfBASEntries = &Ctxt.UseBASEntries Flag.DwarfLocationLists = &Ctxt.Flag_locationlists *Flag.DwarfLocationLists = true @@ -159,6 +159,9 @@ func ParseFlags() { Flag.LinkShared = &Ctxt.Flag_linkshared Flag.Shared = &Ctxt.Flag_shared Flag.WB = true + Debug.InlFuncsWithClosures = 1 + + Debug.Checkptr = -1 // so we can tell whether it is set explicitly Flag.Cfg.ImportMap = make(map[string]string) @@ -166,14 +169,14 @@ func ParseFlags() { registerFlags() objabi.Flagparse(usage) - if Flag.MSan && !sys.MSanSupported(objabi.GOOS, objabi.GOARCH) { - log.Fatalf("%s/%s does not support -msan", objabi.GOOS, objabi.GOARCH) + if Flag.MSan && !sys.MSanSupported(buildcfg.GOOS, buildcfg.GOARCH) { + log.Fatalf("%s/%s does not support -msan", buildcfg.GOOS, buildcfg.GOARCH) } - if Flag.Race && !sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) { - log.Fatalf("%s/%s does not support -race", objabi.GOOS, objabi.GOARCH) + if Flag.Race && !sys.RaceDetectorSupported(buildcfg.GOOS, buildcfg.GOARCH) { + log.Fatalf("%s/%s does not support -race", buildcfg.GOOS, buildcfg.GOARCH) } if (*Flag.Shared || *Flag.Dynlink || *Flag.LinkShared) && !Ctxt.Arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X) { - log.Fatalf("%s/%s does not support -shared", objabi.GOOS, objabi.GOARCH) + log.Fatalf("%s/%s does not support -shared", buildcfg.GOOS, buildcfg.GOARCH) } parseSpectre(Flag.Spectre) // left as string for RecordFlags @@ -215,7 +218,9 @@ func ParseFlags() { } if Flag.Race || Flag.MSan { // -race and -msan imply -d=checkptr for now. - Debug.Checkptr = 1 + if Debug.Checkptr == -1 { // if not set explicitly + Debug.Checkptr = 1 + } } if Flag.CompilingRuntime && Flag.N != 0 { @@ -236,6 +241,10 @@ func ParseFlags() { Debug.Libfuzzer = 0 } + if Debug.Checkptr == -1 { // if not set explicitly + Debug.Checkptr = 0 + } + // set via a -d flag Ctxt.Debugpcln = Debug.PCTab } @@ -339,7 +348,7 @@ func concurrentBackendAllowed() bool { return false } // TODO: Test and delete this condition. - if objabi.Fieldtrack_enabled != 0 { + if buildcfg.Experiment.FieldTrack { return false } // TODO: fix races and enable the following flags @@ -450,11 +459,11 @@ func parseSpectre(s string) { } if Flag.Cfg.SpectreIndex { - switch objabi.GOARCH { + switch buildcfg.GOARCH { case "amd64": // ok default: - log.Fatalf("GOARCH=%s does not support -spectre=index", objabi.GOARCH) + log.Fatalf("GOARCH=%s does not support -spectre=index", buildcfg.GOARCH) } } } diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go index 668c600d317..b095fd704da 100644 --- a/src/cmd/compile/internal/base/print.go +++ b/src/cmd/compile/internal/base/print.go @@ -6,12 +6,12 @@ package base import ( "fmt" + "internal/buildcfg" "os" "runtime/debug" "sort" "strings" - "cmd/internal/objabi" "cmd/internal/src" ) @@ -217,7 +217,7 @@ func FatalfAt(pos src.XPos, format string, args ...interface{}) { fmt.Printf("\n") // If this is a released compiler version, ask for a bug report. - if strings.HasPrefix(objabi.Version, "go") { + if strings.HasPrefix(buildcfg.Version, "go") { fmt.Printf("\n") fmt.Printf("Please file a bug report including a short program that triggers the error.\n") fmt.Printf("https://golang.org/issue/new\n") diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go index 70168cffebf..422c7e66c47 100644 --- a/src/cmd/compile/internal/dwarfgen/dwarf.go +++ b/src/cmd/compile/internal/dwarfgen/dwarf.go @@ -8,6 +8,7 @@ import ( "bytes" "flag" "fmt" + "internal/buildcfg" "sort" "cmd/compile/internal/base" @@ -137,8 +138,11 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir var vars []*dwarf.Var var decls []*ir.Name var selected ir.NameSet + if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK { decls, vars, selected = createComplexVars(fnsym, fn) + } else if fn.ABI == obj.ABIInternal && base.Flag.N != 0 && complexOK { + decls, vars, selected = createABIVars(fnsym, fn, apDecls) } else { decls, vars, selected = createSimpleVars(fnsym, apDecls) } @@ -264,20 +268,29 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { var abbrev int var offs int64 - switch n.Class { - case ir.PAUTO: + localAutoOffset := func() int64 { offs = n.FrameOffset() - abbrev = dwarf.DW_ABRV_AUTO if base.Ctxt.FixedFrameSize() == 0 { offs -= int64(types.PtrSize) } - if objabi.Framepointer_enabled { + if buildcfg.FramePointerEnabled { offs -= int64(types.PtrSize) } + return offs + } + switch n.Class { + case ir.PAUTO: + offs = localAutoOffset() + abbrev = dwarf.DW_ABRV_AUTO case ir.PPARAM, ir.PPARAMOUT: abbrev = dwarf.DW_ABRV_PARAM - offs = n.FrameOffset() + base.Ctxt.FixedFrameSize() + if n.IsOutputParamInRegisters() { + offs = localAutoOffset() + } else { + offs = n.FrameOffset() + base.Ctxt.FixedFrameSize() + } + default: base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class, n) } @@ -309,6 +322,37 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { } } +// createABIVars creates DWARF variables for functions in which the +// register ABI is enabled but optimization is turned off. It uses a +// hybrid approach in which register-resident input params are +// captured with location lists, and all other vars use the "simple" +// strategy. +func createABIVars(fnsym *obj.LSym, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, ir.NameSet) { + + // Invoke createComplexVars to generate dwarf vars for input parameters + // that are register-allocated according to the ABI rules. + decls, vars, selected := createComplexVars(fnsym, fn) + + // Now fill in the remainder of the variables: input parameters + // that are not register-resident, output parameters, and local + // variables. + for _, n := range apDecls { + if ir.IsAutoTmp(n) { + continue + } + if _, ok := selected[n]; ok { + // already handled + continue + } + + decls = append(decls, n) + vars = append(vars, createSimpleVar(fnsym, n)) + selected.Add(n) + } + + return decls, vars, selected +} + // createComplexVars creates recomposed DWARF vars with location lists, // suitable for describing optimized code. func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, ir.NameSet) { diff --git a/src/cmd/compile/internal/dwarfgen/dwinl.go b/src/cmd/compile/internal/dwarfgen/dwinl.go index d5687cb1d72..8adb36fc883 100644 --- a/src/cmd/compile/internal/dwarfgen/dwinl.go +++ b/src/cmd/compile/internal/dwarfgen/dwinl.go @@ -247,7 +247,8 @@ func makePreinlineDclMap(fnsym *obj.LSym) map[varPos]int { DeclCol: pos.Col(), } if _, found := m[vp]; found { - base.Fatalf("child dcl collision on symbol %s within %v\n", n.Sym().Name, fnsym.Name) + // We can see collisions (variables with the same name/file/line/col) in obfuscated or machine-generated code -- see issue 44378 for an example. Skip duplicates in such cases, since it is unlikely that a human will be debugging such code. + continue } m[vp] = i } diff --git a/src/cmd/compile/internal/dwarfgen/scope.go b/src/cmd/compile/internal/dwarfgen/scope.go index 4957e24e447..b4ae69e96fa 100644 --- a/src/cmd/compile/internal/dwarfgen/scope.go +++ b/src/cmd/compile/internal/dwarfgen/scope.go @@ -36,7 +36,7 @@ func assembleScopes(fnsym *obj.LSym, fn *ir.Func, dwarfVars []*dwarf.Var, varSco dwarfScopes[i+1].Parent = int32(parent) } - scopeVariables(dwarfVars, varScopes, dwarfScopes) + scopeVariables(dwarfVars, varScopes, dwarfScopes, fnsym.ABI() != obj.ABI0) if fnsym.Func().Text != nil { scopePCs(fnsym, fn.Marks, dwarfScopes) } @@ -44,8 +44,12 @@ func assembleScopes(fnsym *obj.LSym, fn *ir.Func, dwarfVars []*dwarf.Var, varSco } // scopeVariables assigns DWARF variable records to their scopes. -func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ir.ScopeID, dwarfScopes []dwarf.Scope) { - sort.Stable(varsByScopeAndOffset{dwarfVars, varScopes}) +func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ir.ScopeID, dwarfScopes []dwarf.Scope, regabi bool) { + if regabi { + sort.Stable(varsByScope{dwarfVars, varScopes}) + } else { + sort.Stable(varsByScopeAndOffset{dwarfVars, varScopes}) + } i0 := 0 for i := range dwarfVars { @@ -112,3 +116,21 @@ func (v varsByScopeAndOffset) Swap(i, j int) { v.vars[i], v.vars[j] = v.vars[j], v.vars[i] v.scopes[i], v.scopes[j] = v.scopes[j], v.scopes[i] } + +type varsByScope struct { + vars []*dwarf.Var + scopes []ir.ScopeID +} + +func (v varsByScope) Len() int { + return len(v.vars) +} + +func (v varsByScope) Less(i, j int) bool { + return v.scopes[i] < v.scopes[j] +} + +func (v varsByScope) Swap(i, j int) { + v.vars[i], v.vars[j] = v.vars[j], v.vars[i] + v.scopes[i], v.scopes[j] = v.scopes[j], v.scopes[i] +} diff --git a/src/cmd/compile/internal/dwarfgen/scope_test.go b/src/cmd/compile/internal/dwarfgen/scope_test.go index fcfcf85f84c..3df4c345c3c 100644 --- a/src/cmd/compile/internal/dwarfgen/scope_test.go +++ b/src/cmd/compile/internal/dwarfgen/scope_test.go @@ -181,17 +181,17 @@ var testfile = []testline{ {line: " fi(p)", scopes: []int{1}}, {line: " }"}, {line: "}"}, - {line: "func TestCaptureVar(flag bool) func() int {"}, - {line: " a := 1", vars: []string{"arg flag bool", "arg ~r1 func() int", "var a int"}}, + {line: "var fglob func() int"}, + {line: "func TestCaptureVar(flag bool) {"}, + {line: " a := 1", vars: []string{"arg flag bool", "var a int"}}, // TODO(register args) restore "arg ~r1 func() int", {line: " if flag {"}, {line: " b := 2", scopes: []int{1}, vars: []string{"var b int", "var f func() int"}}, {line: " f := func() int {", scopes: []int{1, 0}}, {line: " return b + 1"}, {line: " }"}, - {line: " return f", scopes: []int{1}}, + {line: " fglob = f", scopes: []int{1}}, {line: " }"}, {line: " f1(a)"}, - {line: " return nil"}, {line: "}"}, {line: "func main() {"}, {line: " TestNestedFor()"}, diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 58cad73c762..3ac7ff1ebe1 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -587,7 +587,7 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) { switch n.Op() { default: - base.Fatalf("unexpected expr: %v", n) + base.Fatalf("unexpected expr: %s %v", n.Op().String(), n) case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OTYPE, ir.OMETHEXPR, ir.OLINKSYMOFFSET: // nop @@ -669,12 +669,15 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) { k = e.spill(k, n) } e.expr(k.note(n, "interface-converted"), n.X) - + case ir.OSLICE2ARRPTR: + // the slice pointer flows directly to the result + n := n.(*ir.ConvExpr) + e.expr(k, n.X) case ir.ORECV: n := n.(*ir.UnaryExpr) e.discard(n.X) - case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY: + case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY, ir.OUNSAFEADD, ir.OUNSAFESLICE: e.call([]hole{k}, n, nil) case ir.ONEW: @@ -1098,6 +1101,11 @@ func (e *escape) call(ks []hole, call, where ir.Node) { case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE: call := call.(*ir.UnaryExpr) argument(e.discardHole(), call.X) + + case ir.OUNSAFEADD, ir.OUNSAFESLICE: + call := call.(*ir.BinaryExpr) + argument(ks[0], call.X) + argument(e.discardHole(), call.Y) } } @@ -1292,7 +1300,7 @@ func (e *escape) newLoc(n ir.Node, transient bool) *location { if n.Op() == ir.ONAME { n := n.(*ir.Name) if n.Curfn != e.curfn { - base.Fatalf("curfn mismatch: %v != %v", n.Curfn, e.curfn) + base.Fatalf("curfn mismatch: %v != %v for %v", n.Curfn, e.curfn, n) } if n.Opt != nil { @@ -1305,6 +1313,9 @@ func (e *escape) newLoc(n ir.Node, transient bool) *location { } func (b *batch) oldLoc(n *ir.Name) *location { + if n.Canonical().Opt == nil { + base.Fatalf("%v has no location", n) + } return n.Canonical().Opt.(*location) } @@ -1625,9 +1636,10 @@ func containsClosure(f, c *ir.Func) bool { // leak records that parameter l leaks to sink. func (l *location) leakTo(sink *location, derefs int) { - // If sink is a result parameter and we can fit return bits - // into the escape analysis tag, then record a return leak. - if sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn { + // If sink is a result parameter that doesn't escape (#44614) + // and we can fit return bits into the escape analysis tag, + // then record as a result leak. + if !sink.escapes && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn { ri := sink.resultIndex - 1 if ri < numEscResults { // Leak to result parameter. diff --git a/src/cmd/compile/internal/gc/bootstrap.go b/src/cmd/compile/internal/gc/bootstrap.go index 2e13d6b57ac..37b0d59ede7 100644 --- a/src/cmd/compile/internal/gc/bootstrap.go +++ b/src/cmd/compile/internal/gc/bootstrap.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.8 // +build !go1.8 package gc diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go index ba67c58c455..00504451a88 100644 --- a/src/cmd/compile/internal/gc/compile.go +++ b/src/cmd/compile/internal/gc/compile.go @@ -13,10 +13,12 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/liveness" + "cmd/compile/internal/objw" "cmd/compile/internal/ssagen" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/compile/internal/walk" + "cmd/internal/obj" ) // "Portable" code generation. @@ -43,7 +45,14 @@ func enqueueFunc(fn *ir.Func) { if len(fn.Body) == 0 { // Initialize ABI wrappers if necessary. ssagen.InitLSym(fn, false) - liveness.WriteFuncMap(fn) + types.CalcSize(fn.Type()) + a := ssagen.AbiForBodylessFuncStackMap(fn) + abiInfo := a.ABIAnalyzeFuncType(fn.Type().FuncType()) // abiInfo has spill/home locations for wrapper + liveness.WriteFuncMap(fn, abiInfo) + if fn.ABI == obj.ABI0 { + x := ssagen.EmitArgInfo(fn, abiInfo) + objw.Global(x, int32(len(x.P)), obj.RODATA|obj.LOCAL) + } return } @@ -110,38 +119,51 @@ func compileFunctions() { }) } - // We queue up a goroutine per function that needs to be - // compiled, but require them to grab an available worker ID - // before doing any substantial work to limit parallelism. - workerIDs := make(chan int, base.Flag.LowerC) - for i := 0; i < base.Flag.LowerC; i++ { - workerIDs <- i + // By default, we perform work right away on the current goroutine + // as the solo worker. + queue := func(work func(int)) { + work(0) + } + + if nWorkers := base.Flag.LowerC; nWorkers > 1 { + // For concurrent builds, we create a goroutine per task, but + // require them to hold a unique worker ID while performing work + // to limit parallelism. + workerIDs := make(chan int, nWorkers) + for i := 0; i < nWorkers; i++ { + workerIDs <- i + } + + queue = func(work func(int)) { + go func() { + worker := <-workerIDs + work(worker) + workerIDs <- worker + }() + } } var wg sync.WaitGroup - var asyncCompile func(*ir.Func) - asyncCompile = func(fn *ir.Func) { - wg.Add(1) - go func() { - worker := <-workerIDs - ssagen.Compile(fn, worker) - workerIDs <- worker - - // Done compiling fn. Schedule it's closures for compilation. - for _, closure := range fn.Closures { - asyncCompile(closure) - } - wg.Done() - }() + var compile func([]*ir.Func) + compile = func(fns []*ir.Func) { + wg.Add(len(fns)) + for _, fn := range fns { + fn := fn + queue(func(worker int) { + ssagen.Compile(fn, worker) + compile(fn.Closures) + wg.Done() + }) + } } types.CalcSizeDisabled = true // not safe to calculate sizes concurrently base.Ctxt.InParallel = true - for _, fn := range compilequeue { - asyncCompile(fn) - } + + compile(compilequeue) compilequeue = nil wg.Wait() + base.Ctxt.InParallel = false types.CalcSizeDisabled = false } diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 4d8221f53ba..2137f1d1961 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -152,7 +152,7 @@ func (p *exporter) markType(t *types.Type) { } case types.TINTER: - for _, f := range t.FieldSlice() { + for _, f := range t.AllMethods().Slice() { if types.IsExported(f.Sym.Name) { p.markType(f.Type) } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 726a0685d57..ce50cbb4c2e 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -28,6 +28,7 @@ import ( "cmd/internal/src" "flag" "fmt" + "internal/buildcfg" "log" "os" "runtime" @@ -105,7 +106,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { // Record flags that affect the build result. (And don't // record flags that don't, since that would cause spurious // changes in the binary.) - dwarfgen.RecordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre") + dwarfgen.RecordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarf", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre") if !base.EnableTrace && base.Flag.LowerT { log.Fatalf("compiler not built with support for -t") @@ -139,8 +140,9 @@ func Main(archInit func(*ssagen.ArchInfo)) { types.ParseLangFlag() + symABIs := ssagen.NewSymABIs(base.Ctxt.Pkgpath) if base.Flag.SymABIs != "" { - ssagen.ReadSymABIs(base.Flag.SymABIs, base.Ctxt.Pkgpath) + symABIs.ReadSymABIs(base.Flag.SymABIs) } if base.Compiling(base.NoInstrumentPkgs) { @@ -157,6 +159,9 @@ func Main(archInit func(*ssagen.ArchInfo)) { dwarf.EnableLogging(base.Debug.DwarfInl != 0) } if base.Debug.SoftFloat != 0 { + if buildcfg.Experiment.RegabiArgs { + log.Fatalf("softfloat mode with GOEXPERIMENT=regabiargs not implemented ") + } ssagen.Arch.SoftFloat = true } @@ -187,7 +192,6 @@ func Main(archInit func(*ssagen.ArchInfo)) { noder.LoadPackage(flag.Args()) dwarfgen.RecordPackageName() - ssagen.CgoSymABIs() // Build init task. if initTask := pkginit.Task(); initTask != nil { @@ -233,6 +237,10 @@ func Main(archInit func(*ssagen.ArchInfo)) { } ir.CurFunc = nil + // Generate ABI wrappers. Must happen before escape analysis + // and doesn't benefit from dead-coding or inlining. + symABIs.GenABIWrappers() + // Escape analysis. // Required for moving heap allocations onto stack, // which in turn is required by the closure implementation, @@ -328,7 +336,7 @@ func writebench(filename string) error { } var buf bytes.Buffer - fmt.Fprintln(&buf, "commit:", objabi.Version) + fmt.Fprintln(&buf, "commit:", buildcfg.Version) fmt.Fprintln(&buf, "goos:", runtime.GOOS) fmt.Fprintln(&buf, "goarch:", runtime.GOARCH) base.Timer.Write(&buf, "BenchmarkCompile:"+base.Ctxt.Pkgpath+":") diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 0472af74419..0b10cb8a9e1 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -70,7 +70,7 @@ func dumpobj1(outfile string, mode int) { } func printObjHeader(bout *bio.Writer) { - fmt.Fprintf(bout, "go object %s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring()) + bout.WriteString(objabi.HeaderString()) if base.Flag.BuildID != "" { fmt.Fprintf(bout, "build id %q\n", base.Flag.BuildID) } @@ -195,6 +195,7 @@ func dumpGlobal(n *ir.Name) { } types.CalcSize(n.Type()) ggloblnod(n) + base.Ctxt.DwarfGlobal(base.Ctxt.Pkgpath, types.TypeSymName(n.Type()), n.Linksym()) } func dumpGlobalConst(n ir.Node) { @@ -256,6 +257,11 @@ func addGCLocals() { if x := fn.OpenCodedDeferInfo; x != nil { objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK) } + if x := fn.ArgInfo; x != nil { + objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK) + x.Set(obj.AttrStatic, true) + x.Set(obj.AttrContentAddressable, true) + } } } diff --git a/src/cmd/compile/internal/gc/pprof.go b/src/cmd/compile/internal/gc/pprof.go index 256c6592591..5f9b030621c 100644 --- a/src/cmd/compile/internal/gc/pprof.go +++ b/src/cmd/compile/internal/gc/pprof.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.8 // +build go1.8 package gc diff --git a/src/cmd/compile/internal/gc/trace.go b/src/cmd/compile/internal/gc/trace.go index c6eb23a0909..8cdbd4b0f3a 100644 --- a/src/cmd/compile/internal/gc/trace.go +++ b/src/cmd/compile/internal/gc/trace.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.7 // +build go1.7 package gc diff --git a/src/cmd/compile/internal/importer/iimport.go b/src/cmd/compile/internal/importer/iimport.go index 33c46a0f906..8ab0b7b9896 100644 --- a/src/cmd/compile/internal/importer/iimport.go +++ b/src/cmd/compile/internal/importer/iimport.go @@ -4,7 +4,7 @@ // license that can be found in the LICENSE file. // Indexed package import. -// See cmd/compile/internal/gc/iexport.go for the export data format. +// See cmd/compile/internal/typecheck/iexport.go for the export data format. package importer diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 7d70fca6c9f..a6829e9835f 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -53,8 +53,8 @@ const ( inlineBigFunctionMaxCost = 20 // Max cost of inlinee when inlining into a "big" function. ) +// InlinePackage finds functions that can be inlined and clones them before walk expands them. func InlinePackage() { - // Find functions that can be inlined and clone them before walk expands them. ir.VisitFuncsBottomUp(typecheck.Target.Decls, func(list []*ir.Func, recursive bool) { numfns := numNonClosures(list) for _, n := range list { @@ -74,8 +74,8 @@ func InlinePackage() { } // CanInline determines whether fn is inlineable. -// If so, CanInline saves fn->nbody in fn->inl and substitutes it with a copy. -// fn and ->nbody will already have been typechecked. +// If so, CanInline saves copies of fn.Body and fn.Dcl in fn.Inl. +// fn and fn.Body will already have been typechecked. func CanInline(fn *ir.Func) { if fn.Nname == nil { base.Fatalf("CanInline no nname %+v", fn) @@ -354,15 +354,13 @@ func (v *hairyVisitor) doNode(n ir.Node) bool { return true case ir.OCLOSURE: - // TODO(danscales,mdempsky): Get working with -G. - // Probably after #43818 is fixed. - if base.Flag.G > 0 { - v.reason = "inlining closures not yet working with -G" + if base.Debug.InlFuncsWithClosures == 0 { + v.reason = "not inlining functions with closures" return true } - // TODO(danscales) - fix some bugs when budget is lowered below 15 - // Maybe make budget proportional to number of closure variables, e.g.: + // TODO(danscales): Maybe make budget proportional to number of closure + // variables, e.g.: //v.budget -= int32(len(n.(*ir.ClosureExpr).Func.ClosureVars) * 3) v.budget -= 15 // Scan body of closure (which DoChildren doesn't automatically @@ -384,6 +382,22 @@ func (v *hairyVisitor) doNode(n ir.Node) bool { case ir.OAPPEND: v.budget -= inlineExtraAppendCost + case ir.ODEREF: + // *(*X)(unsafe.Pointer(&x)) is low-cost + n := n.(*ir.StarExpr) + + ptr := n.X + for ptr.Op() == ir.OCONVNOP { + ptr = ptr.(*ir.ConvExpr).X + } + if ptr.Op() == ir.OADDR { + v.budget += 1 // undo half of default cost of ir.ODEREF+ir.OADDR + } + + case ir.OCONVNOP: + // This doesn't produce code, but the children might. + v.budget++ // undo default cost + case ir.ODCLCONST, ir.OFALL: // These nodes don't produce code; omit from inlining budget. return false @@ -492,7 +506,10 @@ func inlcopy(n ir.Node) ir.Node { newfn.Nname = ir.NewNameAt(oldfn.Nname.Pos(), oldfn.Nname.Sym()) // XXX OK to share fn.Type() ?? newfn.Nname.SetType(oldfn.Nname.Type()) - newfn.Nname.Ntype = inlcopy(oldfn.Nname.Ntype).(ir.Ntype) + // Ntype can be nil for -G=3 mode. + if oldfn.Nname.Ntype != nil { + newfn.Nname.Ntype = inlcopy(oldfn.Nname.Ntype).(ir.Ntype) + } newfn.Body = inlcopylist(oldfn.Body) // Make shallow copy of the Dcl and ClosureVar slices newfn.Dcl = append([]*ir.Name(nil), oldfn.Dcl...) @@ -503,7 +520,7 @@ func inlcopy(n ir.Node) ir.Node { return edit(n) } -// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any +// InlineCalls/inlnode walks fn's statements and expressions and substitutes any // calls made to inlineable functions. This is the external entry point. func InlineCalls(fn *ir.Func) { savefn := ir.CurFunc @@ -836,17 +853,25 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b } } + // We can delay declaring+initializing result parameters if: + // (1) there's exactly one "return" statement in the inlined function; + // (2) it's not an empty return statement (#44355); and + // (3) the result parameters aren't named. + delayretvars := true + nreturns := 0 ir.VisitList(ir.Nodes(fn.Inl.Body), func(n ir.Node) { - if n != nil && n.Op() == ir.ORETURN { + if n, ok := n.(*ir.ReturnStmt); ok { nreturns++ + if len(n.Results) == 0 { + delayretvars = false // empty return statement (case 2) + } } }) - // We can delay declaring+initializing result parameters if: - // (1) there's only one "return" statement in the inlined - // function, and (2) the result parameters aren't named. - delayretvars := nreturns == 1 + if nreturns != 1 { + delayretvars = false // not exactly one return statement (case 1) + } // temporaries for return values. var retvars []ir.Node @@ -857,7 +882,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b m = inlvar(n) m = typecheck.Expr(m).(*ir.Name) inlvars[n] = m - delayretvars = false // found a named result parameter + delayretvars = false // found a named result parameter (case 3) } else { // anonymous return values, synthesize names for use in assignment that replaces return m = retvar(t, i) @@ -977,6 +1002,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b retvars: retvars, delayretvars: delayretvars, inlvars: inlvars, + defnMarker: ir.NilExpr{}, bases: make(map[*src.PosBase]*src.PosBase), newInlIndex: newIndex, fn: fn, @@ -988,7 +1014,9 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b lab := ir.NewLabelStmt(base.Pos, retlabel) body = append(body, lab) - typecheck.Stmts(body) + if !typecheck.Go117ExportTypes { + typecheck.Stmts(body) + } if base.Flag.GenDwarfInl > 0 { for _, v := range inlfvars { @@ -1076,6 +1104,10 @@ type inlsubst struct { delayretvars bool inlvars map[*ir.Name]*ir.Name + // defnMarker is used to mark a Node for reassignment. + // inlsubst.clovar set this during creating new ONAME. + // inlsubst.node will set the correct Defn for inlvar. + defnMarker ir.NilExpr // bases maps from original PosBase to PosBase with an extra // inlined call frame. @@ -1133,7 +1165,11 @@ func (subst *inlsubst) clovar(n *ir.Name) *ir.Name { m := &ir.Name{} *m = *n m.Curfn = subst.newclofn - if n.Defn != nil && n.Defn.Op() == ir.ONAME { + + switch defn := n.Defn.(type) { + case nil: + // ok + case *ir.Name: if !n.IsClosureVar() { base.FatalfAt(n.Pos(), "want closure variable, got: %+v", n) } @@ -1155,7 +1191,15 @@ func (subst *inlsubst) clovar(n *ir.Name) *ir.Name { if subst.inlvars[n.Defn.(*ir.Name)] != nil { m.Defn = subst.node(n.Defn) } + case *ir.AssignStmt, *ir.AssignListStmt: + // Mark node for reassignment at the end of inlsubst.node. + m.Defn = &subst.defnMarker + case *ir.TypeSwitchGuard: + // TODO(mdempsky): Set m.Defn properly. See discussion on #45743. + default: + base.FatalfAt(n.Pos(), "unexpected Defn: %+v", defn) } + if n.Outer != nil { // Either the outer variable is defined in function being inlined, // and we will replace it with the substituted variable, or it is @@ -1191,7 +1235,10 @@ func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node { newfn.SetIsHiddenClosure(true) newfn.Nname = ir.NewNameAt(n.Pos(), ir.BlankNode.Sym()) newfn.Nname.Func = newfn - newfn.Nname.Ntype = subst.node(oldfn.Nname.Ntype).(ir.Ntype) + // Ntype can be nil for -G=3 mode. + if oldfn.Nname.Ntype != nil { + newfn.Nname.Ntype = subst.node(oldfn.Nname.Ntype).(ir.Ntype) + } newfn.Nname.Defn = newfn m.(*ir.ClosureExpr).Func = newfn @@ -1347,6 +1394,10 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { return ir.NewBlockStmt(base.Pos, init) case ir.OGOTO: + if subst.newclofn != nil { + // Don't do special substitutions if inside a closure + break + } n := n.(*ir.BranchStmt) m := ir.Copy(n).(*ir.BranchStmt) m.SetPos(subst.updatedPos(m.Pos())) @@ -1376,6 +1427,20 @@ func (subst *inlsubst) node(n ir.Node) ir.Node { m := ir.Copy(n) m.SetPos(subst.updatedPos(m.Pos())) ir.EditChildren(m, subst.edit) + + switch m := m.(type) { + case *ir.AssignStmt: + if lhs, ok := m.X.(*ir.Name); ok && lhs.Defn == &subst.defnMarker { + lhs.Defn = m + } + case *ir.AssignListStmt: + for _, lhs := range m.Lhs { + if lhs, ok := lhs.(*ir.Name); ok && lhs.Defn == &subst.defnMarker { + lhs.Defn = m + } + } + } + return m } diff --git a/src/cmd/compile/internal/ir/dump.go b/src/cmd/compile/internal/ir/dump.go index fc995cee620..59914baa5cc 100644 --- a/src/cmd/compile/internal/ir/dump.go +++ b/src/cmd/compile/internal/ir/dump.go @@ -20,12 +20,12 @@ import ( "cmd/internal/src" ) -// dump is like fdump but prints to stderr. +// DumpAny is like FDumpAny but prints to stderr. func DumpAny(root interface{}, filter string, depth int) { FDumpAny(os.Stderr, root, filter, depth) } -// fdump prints the structure of a rooted data structure +// FDumpAny prints the structure of a rooted data structure // to w by depth-first traversal of the data structure. // // The filter parameter is a regular expression. If it is diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index d68bcfe60c4..f70645f0791 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -136,7 +136,7 @@ func (n *BinaryExpr) SetOp(op Op) { panic(n.no("SetOp " + op.String())) case OADD, OADDSTR, OAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLSH, OLT, OMOD, OMUL, ONE, OOR, ORSH, OSUB, OXOR, - OCOPY, OCOMPLEX, + OCOPY, OCOMPLEX, OUNSAFEADD, OUNSAFESLICE, OEFACE: n.op = op } @@ -157,12 +157,13 @@ const ( type CallExpr struct { miniExpr origNode - X Node - Args Nodes - KeepAlive []*Name // vars to be kept alive until call returns - IsDDD bool - Use CallUse - NoInline bool + X Node + Args Nodes + KeepAlive []*Name // vars to be kept alive until call returns + IsDDD bool + Use CallUse + NoInline bool + PreserveClosure bool // disable directClosureCall for this call } func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr { @@ -276,7 +277,7 @@ func (n *ConvExpr) SetOp(op Op) { switch op { default: panic(n.no("SetOp " + op.String())) - case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, OBYTES2STRTMP, ORUNES2STR, OSTR2BYTES, OSTR2BYTESTMP, OSTR2RUNES, ORUNESTR: + case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, OBYTES2STRTMP, ORUNES2STR, OSTR2BYTES, OSTR2BYTESTMP, OSTR2RUNES, ORUNESTR, OSLICE2ARRPTR: n.op = op } } @@ -447,14 +448,14 @@ func (n *ParenExpr) SetOTYPE(t *types.Type) { t.SetNod(n) } -// A ResultExpr represents a direct access to a result slot on the stack frame. +// A ResultExpr represents a direct access to a result. type ResultExpr struct { miniExpr - Offset int64 + Index int64 // index of the result expr. } -func NewResultExpr(pos src.XPos, typ *types.Type, offset int64) *ResultExpr { - n := &ResultExpr{Offset: offset} +func NewResultExpr(pos src.XPos, typ *types.Type, index int64) *ResultExpr { + n := &ResultExpr{Index: index} n.pos = pos n.op = ORESULT n.typ = typ @@ -527,6 +528,13 @@ func (n *SelectorExpr) FuncName() *Name { fn := NewNameAt(n.Selection.Pos, MethodSym(n.X.Type(), n.Sel)) fn.Class = PFUNC fn.SetType(n.Type()) + if n.Selection.Nname != nil { + // TODO(austin): Nname is nil for interface method + // expressions (I.M), so we can't attach a Func to + // those here. reflectdata.methodWrapper generates the + // Func. + fn.Func = n.Selection.Nname.(*Name).Func + } return fn } @@ -1052,7 +1060,7 @@ func MethodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sy return rpkg.LookupBytes(b.Bytes()) } -// MethodName returns the ONAME representing the method +// MethodExprName returns the ONAME representing the method // referenced by expression n, which must be a method selector, // method expression, or method value. func MethodExprName(n Node) *Name { @@ -1060,7 +1068,7 @@ func MethodExprName(n Node) *Name { return name } -// MethodFunc is like MethodName, but returns the types.Field instead. +// MethodExprFunc is like MethodExprName, but returns the types.Field instead. func MethodExprFunc(n Node) *types.Field { switch n.Op() { case ODOTMETH, OMETHEXPR, OCALLPART: diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index 1a05079dac8..f2ae0f7606e 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -25,69 +25,71 @@ import ( // Op var OpNames = []string{ - OADDR: "&", - OADD: "+", - OADDSTR: "+", - OALIGNOF: "unsafe.Alignof", - OANDAND: "&&", - OANDNOT: "&^", - OAND: "&", - OAPPEND: "append", - OAS: "=", - OAS2: "=", - OBREAK: "break", - OCALL: "function call", // not actual syntax - OCAP: "cap", - OCASE: "case", - OCLOSE: "close", - OCOMPLEX: "complex", - OBITNOT: "^", - OCONTINUE: "continue", - OCOPY: "copy", - ODELETE: "delete", - ODEFER: "defer", - ODIV: "/", - OEQ: "==", - OFALL: "fallthrough", - OFOR: "for", - OFORUNTIL: "foruntil", // not actual syntax; used to avoid off-end pointer live on backedge.892 - OGE: ">=", - OGOTO: "goto", - OGT: ">", - OIF: "if", - OIMAG: "imag", - OINLMARK: "inlmark", - ODEREF: "*", - OLEN: "len", - OLE: "<=", - OLSH: "<<", - OLT: "<", - OMAKE: "make", - ONEG: "-", - OMOD: "%", - OMUL: "*", - ONEW: "new", - ONE: "!=", - ONOT: "!", - OOFFSETOF: "unsafe.Offsetof", - OOROR: "||", - OOR: "|", - OPANIC: "panic", - OPLUS: "+", - OPRINTN: "println", - OPRINT: "print", - ORANGE: "range", - OREAL: "real", - ORECV: "<-", - ORECOVER: "recover", - ORETURN: "return", - ORSH: ">>", - OSELECT: "select", - OSEND: "<-", - OSIZEOF: "unsafe.Sizeof", - OSUB: "-", - OSWITCH: "switch", - OXOR: "^", + OADDR: "&", + OADD: "+", + OADDSTR: "+", + OALIGNOF: "unsafe.Alignof", + OANDAND: "&&", + OANDNOT: "&^", + OAND: "&", + OAPPEND: "append", + OAS: "=", + OAS2: "=", + OBREAK: "break", + OCALL: "function call", // not actual syntax + OCAP: "cap", + OCASE: "case", + OCLOSE: "close", + OCOMPLEX: "complex", + OBITNOT: "^", + OCONTINUE: "continue", + OCOPY: "copy", + ODELETE: "delete", + ODEFER: "defer", + ODIV: "/", + OEQ: "==", + OFALL: "fallthrough", + OFOR: "for", + OFORUNTIL: "foruntil", // not actual syntax; used to avoid off-end pointer live on backedge.892 + OGE: ">=", + OGOTO: "goto", + OGT: ">", + OIF: "if", + OIMAG: "imag", + OINLMARK: "inlmark", + ODEREF: "*", + OLEN: "len", + OLE: "<=", + OLSH: "<<", + OLT: "<", + OMAKE: "make", + ONEG: "-", + OMOD: "%", + OMUL: "*", + ONEW: "new", + ONE: "!=", + ONOT: "!", + OOFFSETOF: "unsafe.Offsetof", + OOROR: "||", + OOR: "|", + OPANIC: "panic", + OPLUS: "+", + OPRINTN: "println", + OPRINT: "print", + ORANGE: "range", + OREAL: "real", + ORECV: "<-", + ORECOVER: "recover", + ORETURN: "return", + ORSH: ">>", + OSELECT: "select", + OSEND: "<-", + OSIZEOF: "unsafe.Sizeof", + OSUB: "-", + OSWITCH: "switch", + OUNSAFEADD: "unsafe.Add", + OUNSAFESLICE: "unsafe.Slice", + OXOR: "^", } // GoString returns the Go syntax for the Op, or else its name. @@ -206,6 +208,7 @@ var OpPrec = []int{ OPRINT: 8, ORUNESTR: 8, OSIZEOF: 8, + OSLICE2ARRPTR: 8, OSTR2BYTES: 8, OSTR2RUNES: 8, OSTRUCTLIT: 8, @@ -217,6 +220,8 @@ var OpPrec = []int{ OTMAP: 8, OTSTRUCT: 8, OTYPE: 8, + OUNSAFEADD: 8, + OUNSAFESLICE: 8, OINDEXMAP: 8, OINDEX: 8, OSLICE: 8, @@ -793,7 +798,7 @@ func exprFmt(n Node, s fmt.State, prec int) { n := n.(*SliceHeaderExpr) fmt.Fprintf(s, "sliceheader{%v,%v,%v}", n.Ptr, n.Len, n.Cap) - case OCOMPLEX, OCOPY: + case OCOMPLEX, OCOPY, OUNSAFEADD, OUNSAFESLICE: n := n.(*BinaryExpr) fmt.Fprintf(s, "%v(%v, %v)", n.Op(), n.X, n.Y) @@ -804,7 +809,8 @@ func exprFmt(n Node, s fmt.State, prec int) { ORUNES2STR, OSTR2BYTES, OSTR2RUNES, - ORUNESTR: + ORUNESTR, + OSLICE2ARRPTR: n := n.(*ConvExpr) if n.Type() == nil || n.Type().Sym() == nil { fmt.Fprintf(s, "(%v)", n.Type()) diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 0a9db92d967..20fe965711d 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -93,7 +93,7 @@ type Func struct { FieldTrack map[*obj.LSym]struct{} DebugInfo interface{} - LSym *obj.LSym + LSym *obj.LSym // Linker object in this function's native ABI (Func.ABI) Inl *Inline @@ -109,7 +109,22 @@ type Func struct { Pragma PragmaFlag // go:xxx function annotations - flags bitset16 + flags bitset16 + + // ABI is a function's "definition" ABI. This is the ABI that + // this function's generated code is expecting to be called by. + // + // For most functions, this will be obj.ABIInternal. It may be + // a different ABI for functions defined in assembly or ABI wrappers. + // + // This is included in the export data and tracked across packages. + ABI obj.ABI + // ABIRefs is the set of ABIs by which this function is referenced. + // For ABIs other than this function's definition ABI, the + // compiler generates ABI wrapper functions. This is only tracked + // within a package. + ABIRefs obj.ABISet + NumDefers int32 // number of defer calls in the function NumReturns int32 // number of explicit returns in the function @@ -124,6 +139,9 @@ func NewFunc(pos src.XPos) *Func { f.pos = pos f.op = ODCLFUNC f.Iota = -1 + // Most functions are ABIInternal. The importer or symabis + // pass may override this. + f.ABI = obj.ABIInternal return f } @@ -142,7 +160,10 @@ func (f *Func) LinksymABI(abi obj.ABI) *obj.LSym { return f.Nname.LinksymABI(abi type Inline struct { Cost int32 // heuristic cost of inlining this function - // Copies of Func.Dcl and Nbody for use during inlining. + // Copies of Func.Dcl and Func.Body for use during inlining. Copies are + // needed because the function's dcl/body may be changed by later compiler + // transformations. These fields are also populated when a function from + // another package is imported. Dcl []*Name Body []Node } @@ -162,7 +183,8 @@ type ScopeID int32 const ( funcDupok = 1 << iota // duplicate definitions ok - funcWrapper // is method wrapper + funcWrapper // hide frame from users (elide in tracebacks, don't count as a frame for recover()) + funcABIWrapper // is an ABI wrapper (also set flagWrapper) funcNeedctxt // function uses context register (has closure variables) funcReflectMethod // function calls reflect.Type.Method or MethodByName // true if closure inside a function; false if a simple function or a @@ -184,6 +206,7 @@ type SymAndPos struct { func (f *Func) Dupok() bool { return f.flags&funcDupok != 0 } func (f *Func) Wrapper() bool { return f.flags&funcWrapper != 0 } +func (f *Func) ABIWrapper() bool { return f.flags&funcABIWrapper != 0 } func (f *Func) Needctxt() bool { return f.flags&funcNeedctxt != 0 } func (f *Func) ReflectMethod() bool { return f.flags&funcReflectMethod != 0 } func (f *Func) IsHiddenClosure() bool { return f.flags&funcIsHiddenClosure != 0 } @@ -197,6 +220,7 @@ func (f *Func) ClosureCalled() bool { return f.flags&funcClosureCalle func (f *Func) SetDupok(b bool) { f.flags.set(funcDupok, b) } func (f *Func) SetWrapper(b bool) { f.flags.set(funcWrapper, b) } +func (f *Func) SetABIWrapper(b bool) { f.flags.set(funcABIWrapper, b) } func (f *Func) SetNeedctxt(b bool) { f.flags.set(funcNeedctxt, b) } func (f *Func) SetReflectMethod(b bool) { f.flags.set(funcReflectMethod, b) } func (f *Func) SetIsHiddenClosure(b bool) { f.flags.set(funcIsHiddenClosure, b) } @@ -217,7 +241,7 @@ func (f *Func) SetWBPos(pos src.XPos) { } } -// funcname returns the name (without the package) of the function n. +// FuncName returns the name (without the package) of the function n. func FuncName(f *Func) string { if f == nil || f.Nname == nil { return "" @@ -225,7 +249,7 @@ func FuncName(f *Func) string { return f.Sym().Name } -// pkgFuncName returns the name of the function referenced by n, with package prepended. +// PkgFuncName returns the name of the function referenced by n, with package prepended. // This differs from the compiler's internal convention where local functions lack a package // because the ultimate consumer of this is a human looking at an IDE; package is only empty // if the compilation package is actually the empty string. diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go index 326f491a69d..5a0aaadf16f 100644 --- a/src/cmd/compile/internal/ir/mknode.go +++ b/src/cmd/compile/internal/ir/mknode.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build ignore // +build ignore package main diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 6240852aaf3..b6c68bc5e01 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -41,7 +41,7 @@ type Name struct { pragma PragmaFlag // int16 flags bitset16 sym *types.Sym - Func *Func + Func *Func // TODO(austin): nil for I.M, eqFor, hashfor, and hashmem Offset_ int64 val constant.Value Opt interface{} // for use by escape analysis @@ -49,7 +49,9 @@ type Name struct { PkgName *PkgName // real package for import . names // For a local variable (not param) or extern, the initializing assignment (OAS or OAS2). - // For a closure var, the ONAME node of the outer captured variable + // For a closure var, the ONAME node of the outer captured variable. + // For the case-local variables of a type switch, the type switch guard (OTYPESW). + // For the name of a function, points to corresponding Func node. Defn Node // The function, method, or closure in which local variable or param is declared. @@ -246,44 +248,47 @@ func (n *Name) Alias() bool { return n.flags&nameAlias != 0 } func (n *Name) SetAlias(alias bool) { n.flags.set(nameAlias, alias) } const ( - nameReadonly = 1 << iota - nameByval // is the variable captured by value or by reference - nameNeedzero // if it contains pointers, needs to be zeroed on function entry - nameAutoTemp // is the variable a temporary (implies no dwarf info. reset if escapes to heap) - nameUsed // for variable declared and not used error - nameIsClosureVar // PAUTOHEAP closure pseudo-variable; original (if any) at n.Defn - nameIsOutputParamHeapAddr // pointer to a result parameter's heap copy - nameAddrtaken // address taken, even if not moved to heap - nameInlFormal // PAUTO created by inliner, derived from callee formal - nameInlLocal // PAUTO created by inliner, derived from callee local - nameOpenDeferSlot // if temporary var storing info for open-coded defers - nameLibfuzzerExtraCounter // if PEXTERN should be assigned to __libfuzzer_extra_counters section - nameAlias // is type name an alias + nameReadonly = 1 << iota + nameByval // is the variable captured by value or by reference + nameNeedzero // if it contains pointers, needs to be zeroed on function entry + nameAutoTemp // is the variable a temporary (implies no dwarf info. reset if escapes to heap) + nameUsed // for variable declared and not used error + nameIsClosureVar // PAUTOHEAP closure pseudo-variable; original (if any) at n.Defn + nameIsOutputParamHeapAddr // pointer to a result parameter's heap copy + nameIsOutputParamInRegisters // output parameter in registers spills as an auto + nameAddrtaken // address taken, even if not moved to heap + nameInlFormal // PAUTO created by inliner, derived from callee formal + nameInlLocal // PAUTO created by inliner, derived from callee local + nameOpenDeferSlot // if temporary var storing info for open-coded defers + nameLibfuzzerExtraCounter // if PEXTERN should be assigned to __libfuzzer_extra_counters section + nameAlias // is type name an alias ) -func (n *Name) Readonly() bool { return n.flags&nameReadonly != 0 } -func (n *Name) Needzero() bool { return n.flags&nameNeedzero != 0 } -func (n *Name) AutoTemp() bool { return n.flags&nameAutoTemp != 0 } -func (n *Name) Used() bool { return n.flags&nameUsed != 0 } -func (n *Name) IsClosureVar() bool { return n.flags&nameIsClosureVar != 0 } -func (n *Name) IsOutputParamHeapAddr() bool { return n.flags&nameIsOutputParamHeapAddr != 0 } -func (n *Name) Addrtaken() bool { return n.flags&nameAddrtaken != 0 } -func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 } -func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 } -func (n *Name) OpenDeferSlot() bool { return n.flags&nameOpenDeferSlot != 0 } -func (n *Name) LibfuzzerExtraCounter() bool { return n.flags&nameLibfuzzerExtraCounter != 0 } +func (n *Name) Readonly() bool { return n.flags&nameReadonly != 0 } +func (n *Name) Needzero() bool { return n.flags&nameNeedzero != 0 } +func (n *Name) AutoTemp() bool { return n.flags&nameAutoTemp != 0 } +func (n *Name) Used() bool { return n.flags&nameUsed != 0 } +func (n *Name) IsClosureVar() bool { return n.flags&nameIsClosureVar != 0 } +func (n *Name) IsOutputParamHeapAddr() bool { return n.flags&nameIsOutputParamHeapAddr != 0 } +func (n *Name) IsOutputParamInRegisters() bool { return n.flags&nameIsOutputParamInRegisters != 0 } +func (n *Name) Addrtaken() bool { return n.flags&nameAddrtaken != 0 } +func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 } +func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 } +func (n *Name) OpenDeferSlot() bool { return n.flags&nameOpenDeferSlot != 0 } +func (n *Name) LibfuzzerExtraCounter() bool { return n.flags&nameLibfuzzerExtraCounter != 0 } -func (n *Name) setReadonly(b bool) { n.flags.set(nameReadonly, b) } -func (n *Name) SetNeedzero(b bool) { n.flags.set(nameNeedzero, b) } -func (n *Name) SetAutoTemp(b bool) { n.flags.set(nameAutoTemp, b) } -func (n *Name) SetUsed(b bool) { n.flags.set(nameUsed, b) } -func (n *Name) SetIsClosureVar(b bool) { n.flags.set(nameIsClosureVar, b) } -func (n *Name) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nameIsOutputParamHeapAddr, b) } -func (n *Name) SetAddrtaken(b bool) { n.flags.set(nameAddrtaken, b) } -func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) } -func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) } -func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) } -func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) } +func (n *Name) setReadonly(b bool) { n.flags.set(nameReadonly, b) } +func (n *Name) SetNeedzero(b bool) { n.flags.set(nameNeedzero, b) } +func (n *Name) SetAutoTemp(b bool) { n.flags.set(nameAutoTemp, b) } +func (n *Name) SetUsed(b bool) { n.flags.set(nameUsed, b) } +func (n *Name) SetIsClosureVar(b bool) { n.flags.set(nameIsClosureVar, b) } +func (n *Name) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nameIsOutputParamHeapAddr, b) } +func (n *Name) SetIsOutputParamInRegisters(b bool) { n.flags.set(nameIsOutputParamInRegisters, b) } +func (n *Name) SetAddrtaken(b bool) { n.flags.set(nameAddrtaken, b) } +func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) } +func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) } +func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) } +func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) } // OnStack reports whether variable n may reside on the stack. func (n *Name) OnStack() bool { @@ -398,7 +403,7 @@ func FinishCaptureNames(pos src.XPos, outerfn, fn *Func) { // unhook them. // make the list of pointers for the closure call. for _, cv := range fn.ClosureVars { - // Unlink from n; see comment in syntax.go type Param for these fields. + // Unlink from n; see comment above on type Name for these fields. n := cv.Defn.(*Name) n.Innermost = cv.Outer @@ -509,5 +514,3 @@ func NewPkgName(pos src.XPos, sym *types.Sym, pkg *types.Pkg) *PkgName { p.pos = pos return p } - -var RegFP *Name diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 59643713fa6..af559cc0820 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -48,6 +48,12 @@ type Node interface { SetEsc(x uint16) Diag() bool SetDiag(x bool) + + // Typecheck values: + // 0 means the node is not typechecked + // 1 means the node is completely typechecked + // 2 means typechecking of the node is in progress + // 3 means the node has its type from types2, but may need transformation Typecheck() uint8 SetTypecheck(x uint8) NonNil() bool @@ -74,7 +80,7 @@ func IsAutoTmp(n Node) bool { return n.Name().AutoTemp() } -// mayBeShared reports whether n may occur in multiple places in the AST. +// MayBeShared reports whether n may occur in multiple places in the AST. // Extra care must be taken when mutating such a node. func MayBeShared(n Node) bool { switch n.Op() { @@ -117,176 +123,176 @@ const ( ONIL // nil // expressions - OADD // Left + Right - OSUB // Left - Right - OOR // Left | Right - OXOR // Left ^ Right + OADD // X + Y + OSUB // X - Y + OOR // X | Y + OXOR // X ^ Y OADDSTR // +{List} (string addition, list elements are strings) - OADDR // &Left - OANDAND // Left && Right - OAPPEND // append(List); after walk, Left may contain elem type descriptor - OBYTES2STR // Type(Left) (Type is string, Left is a []byte) - OBYTES2STRTMP // Type(Left) (Type is string, Left is a []byte, ephemeral) - ORUNES2STR // Type(Left) (Type is string, Left is a []rune) - OSTR2BYTES // Type(Left) (Type is []byte, Left is a string) - OSTR2BYTESTMP // Type(Left) (Type is []byte, Left is a string, ephemeral) - OSTR2RUNES // Type(Left) (Type is []rune, Left is a string) - // Left = Right or (if Colas=true) Left := Right - // If Colas, then Ninit includes a DCL node for Left. + OADDR // &X + OANDAND // X && Y + OAPPEND // append(Args); after walk, X may contain elem type descriptor + OBYTES2STR // Type(X) (Type is string, X is a []byte) + OBYTES2STRTMP // Type(X) (Type is string, X is a []byte, ephemeral) + ORUNES2STR // Type(X) (Type is string, X is a []rune) + OSTR2BYTES // Type(X) (Type is []byte, X is a string) + OSTR2BYTESTMP // Type(X) (Type is []byte, X is a string, ephemeral) + OSTR2RUNES // Type(X) (Type is []rune, X is a string) + OSLICE2ARRPTR // Type(X) (Type is *[N]T, X is a []T) + // X = Y or (if Def=true) X := Y + // If Def, then Init includes a DCL node for X. OAS - // List = Rlist (x, y, z = a, b, c) or (if Colas=true) List := Rlist - // If Colas, then Ninit includes DCL nodes for List + // Lhs = Rhs (x, y, z = a, b, c) or (if Def=true) Lhs := Rhs + // If Def, then Init includes DCL nodes for Lhs OAS2 - OAS2DOTTYPE // List = Right (x, ok = I.(int)) - OAS2FUNC // List = Right (x, y = f()) - OAS2MAPR // List = Right (x, ok = m["foo"]) - OAS2RECV // List = Right (x, ok = <-c) - OASOP // Left Etype= Right (x += y) - OCALL // Left(List) (function call, method call or type conversion) + OAS2DOTTYPE // Lhs = Rhs (x, ok = I.(int)) + OAS2FUNC // Lhs = Rhs (x, y = f()) + OAS2MAPR // Lhs = Rhs (x, ok = m["foo"]) + OAS2RECV // Lhs = Rhs (x, ok = <-c) + OASOP // X AsOp= Y (x += y) + OCALL // X(Args) (function call, method call or type conversion) // OCALLFUNC, OCALLMETH, and OCALLINTER have the same structure. - // Prior to walk, they are: Left(List), where List is all regular arguments. - // After walk, List is a series of assignments to temporaries, - // and Rlist is an updated set of arguments. - // Nbody is all OVARLIVE nodes that are attached to OCALLxxx. - // TODO(josharian/khr): Use Ninit instead of List for the assignments to temporaries. See CL 114797. - OCALLFUNC // Left(List/Rlist) (function call f(args)) - OCALLMETH // Left(List/Rlist) (direct method call x.Method(args)) - OCALLINTER // Left(List/Rlist) (interface method call x.Method(args)) - OCALLPART // Left.Right (method expression x.Method, not called) - OCAP // cap(Left) - OCLOSE // close(Left) - OCLOSURE // func Type { Func.Closure.Nbody } (func literal) - OCOMPLIT // Right{List} (composite literal, not yet lowered to specific form) + // Prior to walk, they are: X(Args), where Args is all regular arguments. + // After walk, if any argument whose evaluation might requires temporary variable, + // that temporary variable will be pushed to Init, Args will contains an updated + // set of arguments. KeepAlive is all OVARLIVE nodes that are attached to OCALLxxx. + OCALLFUNC // X(Args) (function call f(args)) + OCALLMETH // X(Args) (direct method call x.Method(args)) + OCALLINTER // X(Args) (interface method call x.Method(args)) + OCALLPART // X.Sel (method expression x.Method, not called) + OCAP // cap(X) + OCLOSE // close(X) + OCLOSURE // func Type { Func.Closure.Body } (func literal) + OCOMPLIT // Type{List} (composite literal, not yet lowered to specific form) OMAPLIT // Type{List} (composite literal, Type is map) OSTRUCTLIT // Type{List} (composite literal, Type is struct) OARRAYLIT // Type{List} (composite literal, Type is array) - OSLICELIT // Type{List} (composite literal, Type is slice) Right.Int64() = slice length. - OPTRLIT // &Left (left is composite literal) - OCONV // Type(Left) (type conversion) - OCONVIFACE // Type(Left) (type conversion, to interface) - OCONVNOP // Type(Left) (type conversion, no effect) - OCOPY // copy(Left, Right) - ODCL // var Left (declares Left of type Left.Type) + OSLICELIT // Type{List} (composite literal, Type is slice), Len is slice length. + OPTRLIT // &X (X is composite literal) + OCONV // Type(X) (type conversion) + OCONVIFACE // Type(X) (type conversion, to interface) + OCONVNOP // Type(X) (type conversion, no effect) + OCOPY // copy(X, Y) + ODCL // var X (declares X of type X.Type) // Used during parsing but don't last. ODCLFUNC // func f() or func (r) f() ODCLCONST // const pi = 3.14 ODCLTYPE // type Int int or type Int = int - ODELETE // delete(List) - ODOT // Left.Sym (Left is of struct type) - ODOTPTR // Left.Sym (Left is of pointer to struct type) - ODOTMETH // Left.Sym (Left is non-interface, Right is method name) - ODOTINTER // Left.Sym (Left is interface, Right is method name) - OXDOT // Left.Sym (before rewrite to one of the preceding) - ODOTTYPE // Left.Right or Left.Type (.Right during parsing, .Type once resolved); after walk, .Right contains address of interface type descriptor and .Right.Right contains address of concrete type descriptor - ODOTTYPE2 // Left.Right or Left.Type (.Right during parsing, .Type once resolved; on rhs of OAS2DOTTYPE); after walk, .Right contains address of interface type descriptor - OEQ // Left == Right - ONE // Left != Right - OLT // Left < Right - OLE // Left <= Right - OGE // Left >= Right - OGT // Left > Right - ODEREF // *Left - OINDEX // Left[Right] (index of array or slice) - OINDEXMAP // Left[Right] (index of map) - OKEY // Left:Right (key:value in struct/array/map literal) - OSTRUCTKEY // Sym:Left (key:value in struct literal, after type checking) - OLEN // len(Left) - OMAKE // make(List) (before type checking converts to one of the following) - OMAKECHAN // make(Type, Left) (type is chan) - OMAKEMAP // make(Type, Left) (type is map) - OMAKESLICE // make(Type, Left, Right) (type is slice) - OMAKESLICECOPY // makeslicecopy(Type, Left, Right) (type is slice; Left is length and Right is the copied from slice) + ODELETE // delete(Args) + ODOT // X.Sel (X is of struct type) + ODOTPTR // X.Sel (X is of pointer to struct type) + ODOTMETH // X.Sel (X is non-interface, Sel is method name) + ODOTINTER // X.Sel (X is interface, Sel is method name) + OXDOT // X.Sel (before rewrite to one of the preceding) + ODOTTYPE // X.Ntype or X.Type (.Ntype during parsing, .Type once resolved); after walk, Itab contains address of interface type descriptor and Itab.X contains address of concrete type descriptor + ODOTTYPE2 // X.Ntype or X.Type (.Ntype during parsing, .Type once resolved; on rhs of OAS2DOTTYPE); after walk, Itab contains address of interface type descriptor + OEQ // X == Y + ONE // X != Y + OLT // X < Y + OLE // X <= Y + OGE // X >= Y + OGT // X > Y + ODEREF // *X + OINDEX // X[Index] (index of array or slice) + OINDEXMAP // X[Index] (index of map) + OKEY // Key:Value (key:value in struct/array/map literal) + OSTRUCTKEY // Field:Value (key:value in struct literal, after type checking) + OLEN // len(X) + OMAKE // make(Args) (before type checking converts to one of the following) + OMAKECHAN // make(Type[, Len]) (type is chan) + OMAKEMAP // make(Type[, Len]) (type is map) + OMAKESLICE // make(Type[, Len[, Cap]]) (type is slice) + OMAKESLICECOPY // makeslicecopy(Type, Len, Cap) (type is slice; Len is length and Cap is the copied from slice) // OMAKESLICECOPY is created by the order pass and corresponds to: - // s = make(Type, Left); copy(s, Right) + // s = make(Type, Len); copy(s, Cap) // - // Bounded can be set on the node when Left == len(Right) is known at compile time. + // Bounded can be set on the node when Len == len(Cap) is known at compile time. // // This node is created so the walk pass can optimize this pattern which would // otherwise be hard to detect after the order pass. - OMUL // Left * Right - ODIV // Left / Right - OMOD // Left % Right - OLSH // Left << Right - ORSH // Left >> Right - OAND // Left & Right - OANDNOT // Left &^ Right - ONEW // new(Left); corresponds to calls to new in source code - ONOT // !Left - OBITNOT // ^Left - OPLUS // +Left - ONEG // -Left - OOROR // Left || Right - OPANIC // panic(Left) + OMUL // X * Y + ODIV // X / Y + OMOD // X % Y + OLSH // X << Y + ORSH // X >> Y + OAND // X & Y + OANDNOT // X &^ Y + ONEW // new(X); corresponds to calls to new in source code + ONOT // !X + OBITNOT // ^X + OPLUS // +X + ONEG // -X + OOROR // X || Y + OPANIC // panic(X) OPRINT // print(List) OPRINTN // println(List) - OPAREN // (Left) - OSEND // Left <- Right - OSLICE // Left[List[0] : List[1]] (Left is untypechecked or slice) - OSLICEARR // Left[List[0] : List[1]] (Left is pointer to array) - OSLICESTR // Left[List[0] : List[1]] (Left is string) - OSLICE3 // Left[List[0] : List[1] : List[2]] (Left is untypedchecked or slice) - OSLICE3ARR // Left[List[0] : List[1] : List[2]] (Left is pointer to array) - OSLICEHEADER // sliceheader{Left, List[0], List[1]} (Left is unsafe.Pointer, List[0] is length, List[1] is capacity) + OPAREN // (X) + OSEND // Chan <- Value + OSLICE // X[Low : High] (X is untypechecked or slice) + OSLICEARR // X[Low : High] (X is pointer to array) + OSLICESTR // X[Low : High] (X is string) + OSLICE3 // X[Low : High : Max] (X is untypedchecked or slice) + OSLICE3ARR // X[Low : High : Max] (X is pointer to array) + OSLICEHEADER // sliceheader{Ptr, Len, Cap} (Ptr is unsafe.Pointer, Len is length, Cap is capacity) ORECOVER // recover() - ORECV // <-Left - ORUNESTR // Type(Left) (Type is string, Left is rune) - OSELRECV2 // like OAS2: List = Rlist where len(List)=2, len(Rlist)=1, Rlist[0].Op = ORECV (appears as .Left of OCASE) + ORECV // <-X + ORUNESTR // Type(X) (Type is string, X is rune) + OSELRECV2 // like OAS2: Lhs = Rhs where len(Lhs)=2, len(Rhs)=1, Rhs[0].Op = ORECV (appears as .Var of OCASE) OIOTA // iota - OREAL // real(Left) - OIMAG // imag(Left) - OCOMPLEX // complex(Left, Right) or complex(List[0]) where List[0] is a 2-result function call - OALIGNOF // unsafe.Alignof(Left) - OOFFSETOF // unsafe.Offsetof(Left) - OSIZEOF // unsafe.Sizeof(Left) + OREAL // real(X) + OIMAG // imag(X) + OCOMPLEX // complex(X, Y) + OALIGNOF // unsafe.Alignof(X) + OOFFSETOF // unsafe.Offsetof(X) + OSIZEOF // unsafe.Sizeof(X) + OUNSAFEADD // unsafe.Add(X, Y) + OUNSAFESLICE // unsafe.Slice(X, Y) OMETHEXPR // method expression - OSTMTEXPR // statement expression (Init; Left) // statements OBLOCK // { List } (block of code) - OBREAK // break [Sym] - // OCASE: case List: Nbody (List==nil means default) + OBREAK // break [Label] + // OCASE: case List: Body (List==nil means default) // For OTYPESW, List is a OTYPE node for the specified type (or OLITERAL // for nil), and, if a type-switch variable is specified, Rlist is an // ONAME for the version of the type-switch variable with the specified // type. OCASE - OCONTINUE // continue [Sym] - ODEFER // defer Left (Left must be call) + OCONTINUE // continue [Label] + ODEFER // defer Call OFALL // fallthrough - OFOR // for Ninit; Left; Right { Nbody } - // OFORUNTIL is like OFOR, but the test (Left) is applied after the body: - // Ninit - // top: { Nbody } // Execute the body at least once - // cont: Right - // if Left { // And then test the loop condition + OFOR // for Init; Cond; Post { Body } + // OFORUNTIL is like OFOR, but the test (Cond) is applied after the body: + // Init + // top: { Body } // Execute the body at least once + // cont: Post + // if Cond { // And then test the loop condition // List // Before looping to top, execute List // goto top // } // OFORUNTIL is created by walk. There's no way to write this in Go code. OFORUNTIL - OGOTO // goto Sym - OIF // if Ninit; Left { Nbody } else { Rlist } - OLABEL // Sym: - OGO // go Left (Left must be call) - ORANGE // for List = range Right { Nbody } - ORETURN // return List - OSELECT // select { List } (List is list of OCASE) - OSWITCH // switch Ninit; Left { List } (List is a list of OCASE) - // OTYPESW: Left := Right.(type) (appears as .Left of OSWITCH) - // Left is nil if there is no type-switch variable + OGOTO // goto Label + OIF // if Init; Cond { Then } else { Else } + OLABEL // Label: + OGO // go Call + ORANGE // for Key, Value = range X { Body } + ORETURN // return Results + OSELECT // select { Cases } + OSWITCH // switch Init; Expr { Cases } + // OTYPESW: X := Y.(type) (appears as .Tag of OSWITCH) + // X is nil if there is no type-switch variable OTYPESW OFUNCINST // instantiation of a generic function - OTYPEINST // instantiation of a generic type // types OTCHAN // chan int OTMAP // map[string]int OTSTRUCT // struct{} OTINTER // interface{} - // OTFUNC: func() - Left is receiver field, List is list of param fields, Rlist is + // OTFUNC: func() - Recv is receiver field, Params is list of param fields, Results is // list of result fields. OTFUNC OTARRAY // [8]int or [...]int @@ -300,7 +306,7 @@ const ( OINLCALL // intermediary representation of an inlined call. OEFACE // itable and data words of an empty-interface value. OITAB // itable word of an interface value. - OIDATA // data word of an interface value in Left + OIDATA // data word of an interface value in X OSPTR // base pointer of a slice or string. OCFUNC // reference to c function pointer (not go func value) OCHECKNIL // emit code to ensure pointer/interface not nil @@ -456,7 +462,7 @@ const ( // Go command pragmas GoBuildPragma - RegisterParams // TODO remove after register abi is working + RegisterParams // TODO(register args) remove after register abi is working ) @@ -473,7 +479,7 @@ func IsConst(n Node, ct constant.Kind) bool { return ConstType(n) == ct } -// isNil reports whether n represents the universal untyped zero value "nil". +// IsNil reports whether n represents the universal untyped zero value "nil". func IsNil(n Node) bool { // Check n.Orig because constant propagation may produce typed nil constants, // which don't exist in the Go spec. @@ -533,7 +539,7 @@ func SetPos(n Node) src.XPos { } // The result of InitExpr MUST be assigned back to n, e.g. -// n.Left = InitExpr(init, n.Left) +// n.X = InitExpr(init, n.X) func InitExpr(init []Node, expr Node) Node { if len(init) == 0 { return expr diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go index 65456df356d..405a0c6b3c9 100644 --- a/src/cmd/compile/internal/ir/op_string.go +++ b/src/cmd/compile/internal/ir/op_string.go @@ -29,144 +29,145 @@ func _() { _ = x[OSTR2BYTES-18] _ = x[OSTR2BYTESTMP-19] _ = x[OSTR2RUNES-20] - _ = x[OAS-21] - _ = x[OAS2-22] - _ = x[OAS2DOTTYPE-23] - _ = x[OAS2FUNC-24] - _ = x[OAS2MAPR-25] - _ = x[OAS2RECV-26] - _ = x[OASOP-27] - _ = x[OCALL-28] - _ = x[OCALLFUNC-29] - _ = x[OCALLMETH-30] - _ = x[OCALLINTER-31] - _ = x[OCALLPART-32] - _ = x[OCAP-33] - _ = x[OCLOSE-34] - _ = x[OCLOSURE-35] - _ = x[OCOMPLIT-36] - _ = x[OMAPLIT-37] - _ = x[OSTRUCTLIT-38] - _ = x[OARRAYLIT-39] - _ = x[OSLICELIT-40] - _ = x[OPTRLIT-41] - _ = x[OCONV-42] - _ = x[OCONVIFACE-43] - _ = x[OCONVNOP-44] - _ = x[OCOPY-45] - _ = x[ODCL-46] - _ = x[ODCLFUNC-47] - _ = x[ODCLCONST-48] - _ = x[ODCLTYPE-49] - _ = x[ODELETE-50] - _ = x[ODOT-51] - _ = x[ODOTPTR-52] - _ = x[ODOTMETH-53] - _ = x[ODOTINTER-54] - _ = x[OXDOT-55] - _ = x[ODOTTYPE-56] - _ = x[ODOTTYPE2-57] - _ = x[OEQ-58] - _ = x[ONE-59] - _ = x[OLT-60] - _ = x[OLE-61] - _ = x[OGE-62] - _ = x[OGT-63] - _ = x[ODEREF-64] - _ = x[OINDEX-65] - _ = x[OINDEXMAP-66] - _ = x[OKEY-67] - _ = x[OSTRUCTKEY-68] - _ = x[OLEN-69] - _ = x[OMAKE-70] - _ = x[OMAKECHAN-71] - _ = x[OMAKEMAP-72] - _ = x[OMAKESLICE-73] - _ = x[OMAKESLICECOPY-74] - _ = x[OMUL-75] - _ = x[ODIV-76] - _ = x[OMOD-77] - _ = x[OLSH-78] - _ = x[ORSH-79] - _ = x[OAND-80] - _ = x[OANDNOT-81] - _ = x[ONEW-82] - _ = x[ONOT-83] - _ = x[OBITNOT-84] - _ = x[OPLUS-85] - _ = x[ONEG-86] - _ = x[OOROR-87] - _ = x[OPANIC-88] - _ = x[OPRINT-89] - _ = x[OPRINTN-90] - _ = x[OPAREN-91] - _ = x[OSEND-92] - _ = x[OSLICE-93] - _ = x[OSLICEARR-94] - _ = x[OSLICESTR-95] - _ = x[OSLICE3-96] - _ = x[OSLICE3ARR-97] - _ = x[OSLICEHEADER-98] - _ = x[ORECOVER-99] - _ = x[ORECV-100] - _ = x[ORUNESTR-101] - _ = x[OSELRECV2-102] - _ = x[OIOTA-103] - _ = x[OREAL-104] - _ = x[OIMAG-105] - _ = x[OCOMPLEX-106] - _ = x[OALIGNOF-107] - _ = x[OOFFSETOF-108] - _ = x[OSIZEOF-109] - _ = x[OMETHEXPR-110] - _ = x[OSTMTEXPR-111] - _ = x[OBLOCK-112] - _ = x[OBREAK-113] - _ = x[OCASE-114] - _ = x[OCONTINUE-115] - _ = x[ODEFER-116] - _ = x[OFALL-117] - _ = x[OFOR-118] - _ = x[OFORUNTIL-119] - _ = x[OGOTO-120] - _ = x[OIF-121] - _ = x[OLABEL-122] - _ = x[OGO-123] - _ = x[ORANGE-124] - _ = x[ORETURN-125] - _ = x[OSELECT-126] - _ = x[OSWITCH-127] - _ = x[OTYPESW-128] - _ = x[OFUNCINST-129] - _ = x[OTYPEINST-130] - _ = x[OTCHAN-131] - _ = x[OTMAP-132] - _ = x[OTSTRUCT-133] - _ = x[OTINTER-134] - _ = x[OTFUNC-135] - _ = x[OTARRAY-136] - _ = x[OTSLICE-137] - _ = x[OINLCALL-138] - _ = x[OEFACE-139] - _ = x[OITAB-140] - _ = x[OIDATA-141] - _ = x[OSPTR-142] - _ = x[OCFUNC-143] - _ = x[OCHECKNIL-144] - _ = x[OVARDEF-145] - _ = x[OVARKILL-146] - _ = x[OVARLIVE-147] - _ = x[ORESULT-148] - _ = x[OINLMARK-149] - _ = x[OLINKSYMOFFSET-150] - _ = x[OTAILCALL-151] - _ = x[OGETG-152] - _ = x[OEND-153] + _ = x[OSLICE2ARRPTR-21] + _ = x[OAS-22] + _ = x[OAS2-23] + _ = x[OAS2DOTTYPE-24] + _ = x[OAS2FUNC-25] + _ = x[OAS2MAPR-26] + _ = x[OAS2RECV-27] + _ = x[OASOP-28] + _ = x[OCALL-29] + _ = x[OCALLFUNC-30] + _ = x[OCALLMETH-31] + _ = x[OCALLINTER-32] + _ = x[OCALLPART-33] + _ = x[OCAP-34] + _ = x[OCLOSE-35] + _ = x[OCLOSURE-36] + _ = x[OCOMPLIT-37] + _ = x[OMAPLIT-38] + _ = x[OSTRUCTLIT-39] + _ = x[OARRAYLIT-40] + _ = x[OSLICELIT-41] + _ = x[OPTRLIT-42] + _ = x[OCONV-43] + _ = x[OCONVIFACE-44] + _ = x[OCONVNOP-45] + _ = x[OCOPY-46] + _ = x[ODCL-47] + _ = x[ODCLFUNC-48] + _ = x[ODCLCONST-49] + _ = x[ODCLTYPE-50] + _ = x[ODELETE-51] + _ = x[ODOT-52] + _ = x[ODOTPTR-53] + _ = x[ODOTMETH-54] + _ = x[ODOTINTER-55] + _ = x[OXDOT-56] + _ = x[ODOTTYPE-57] + _ = x[ODOTTYPE2-58] + _ = x[OEQ-59] + _ = x[ONE-60] + _ = x[OLT-61] + _ = x[OLE-62] + _ = x[OGE-63] + _ = x[OGT-64] + _ = x[ODEREF-65] + _ = x[OINDEX-66] + _ = x[OINDEXMAP-67] + _ = x[OKEY-68] + _ = x[OSTRUCTKEY-69] + _ = x[OLEN-70] + _ = x[OMAKE-71] + _ = x[OMAKECHAN-72] + _ = x[OMAKEMAP-73] + _ = x[OMAKESLICE-74] + _ = x[OMAKESLICECOPY-75] + _ = x[OMUL-76] + _ = x[ODIV-77] + _ = x[OMOD-78] + _ = x[OLSH-79] + _ = x[ORSH-80] + _ = x[OAND-81] + _ = x[OANDNOT-82] + _ = x[ONEW-83] + _ = x[ONOT-84] + _ = x[OBITNOT-85] + _ = x[OPLUS-86] + _ = x[ONEG-87] + _ = x[OOROR-88] + _ = x[OPANIC-89] + _ = x[OPRINT-90] + _ = x[OPRINTN-91] + _ = x[OPAREN-92] + _ = x[OSEND-93] + _ = x[OSLICE-94] + _ = x[OSLICEARR-95] + _ = x[OSLICESTR-96] + _ = x[OSLICE3-97] + _ = x[OSLICE3ARR-98] + _ = x[OSLICEHEADER-99] + _ = x[ORECOVER-100] + _ = x[ORECV-101] + _ = x[ORUNESTR-102] + _ = x[OSELRECV2-103] + _ = x[OIOTA-104] + _ = x[OREAL-105] + _ = x[OIMAG-106] + _ = x[OCOMPLEX-107] + _ = x[OALIGNOF-108] + _ = x[OOFFSETOF-109] + _ = x[OSIZEOF-110] + _ = x[OUNSAFEADD-111] + _ = x[OUNSAFESLICE-112] + _ = x[OMETHEXPR-113] + _ = x[OBLOCK-114] + _ = x[OBREAK-115] + _ = x[OCASE-116] + _ = x[OCONTINUE-117] + _ = x[ODEFER-118] + _ = x[OFALL-119] + _ = x[OFOR-120] + _ = x[OFORUNTIL-121] + _ = x[OGOTO-122] + _ = x[OIF-123] + _ = x[OLABEL-124] + _ = x[OGO-125] + _ = x[ORANGE-126] + _ = x[ORETURN-127] + _ = x[OSELECT-128] + _ = x[OSWITCH-129] + _ = x[OTYPESW-130] + _ = x[OFUNCINST-131] + _ = x[OTCHAN-132] + _ = x[OTMAP-133] + _ = x[OTSTRUCT-134] + _ = x[OTINTER-135] + _ = x[OTFUNC-136] + _ = x[OTARRAY-137] + _ = x[OTSLICE-138] + _ = x[OINLCALL-139] + _ = x[OEFACE-140] + _ = x[OITAB-141] + _ = x[OIDATA-142] + _ = x[OSPTR-143] + _ = x[OCFUNC-144] + _ = x[OCHECKNIL-145] + _ = x[OVARDEF-146] + _ = x[OVARKILL-147] + _ = x[OVARLIVE-148] + _ = x[ORESULT-149] + _ = x[OINLMARK-150] + _ = x[OLINKSYMOFFSET-151] + _ = x[OTAILCALL-152] + _ = x[OGETG-153] + _ = x[OEND-154] } -const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWFUNCINSTTYPEINSTTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETTAILCALLGETGEND" +const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFUNSAFEADDUNSAFESLICEMETHEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWFUNCINSTTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETTAILCALLGETGEND" -var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 474, 480, 484, 487, 491, 496, 501, 507, 512, 516, 521, 529, 537, 543, 552, 563, 570, 574, 581, 589, 593, 597, 601, 608, 615, 623, 629, 637, 645, 650, 655, 659, 667, 672, 676, 679, 687, 691, 693, 698, 700, 705, 711, 717, 723, 729, 737, 745, 750, 754, 761, 767, 772, 778, 784, 791, 796, 800, 805, 809, 814, 822, 828, 835, 842, 848, 855, 868, 876, 880, 883} +var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 136, 138, 141, 151, 158, 165, 172, 176, 180, 188, 196, 205, 213, 216, 221, 228, 235, 241, 250, 258, 266, 272, 276, 285, 292, 296, 299, 306, 314, 321, 327, 330, 336, 343, 351, 355, 362, 370, 372, 374, 376, 378, 380, 382, 387, 392, 400, 403, 412, 415, 419, 427, 434, 443, 456, 459, 462, 465, 468, 471, 474, 480, 483, 486, 492, 496, 499, 503, 508, 513, 519, 524, 528, 533, 541, 549, 555, 564, 575, 582, 586, 593, 601, 605, 609, 613, 620, 627, 635, 641, 650, 661, 669, 674, 679, 683, 691, 696, 700, 703, 711, 715, 717, 722, 724, 729, 735, 741, 747, 753, 761, 766, 770, 777, 783, 788, 794, 800, 807, 812, 816, 821, 825, 830, 838, 844, 851, 858, 864, 871, 884, 892, 896, 899} func (i Op) String() string { if i >= Op(len(_Op_index)-1) { diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index d8c1518b90f..a4421fcf531 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Func{}, 188, 328}, + {Func{}, 192, 328}, {Name{}, 112, 200}, } diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index c304867e1d9..8115012f978 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -224,7 +224,7 @@ func (n *ForStmt) SetOp(op Op) { // A GoDeferStmt is a go or defer statement: go Call / defer Call. // -// The two opcodes use a signle syntax because the implementations +// The two opcodes use a single syntax because the implementations // are very similar: both are concerned with saving Call and running it // in a different context (a separate goroutine or a later time). type GoDeferStmt struct { diff --git a/src/cmd/compile/internal/ir/val.go b/src/cmd/compile/internal/ir/val.go index ff45f31074e..03c320e205d 100644 --- a/src/cmd/compile/internal/ir/val.go +++ b/src/cmd/compile/internal/ir/val.go @@ -19,7 +19,7 @@ func ConstType(n Node) constant.Kind { return n.Val().Kind() } -// ValueInterface returns the constant value stored in n as an interface{}. +// ConstValue returns the constant value stored in n as an interface{}. // It returns int64s for ints and runes, float64s for floats, // and complex128s for complex values. func ConstValue(n Node) interface{} { @@ -40,7 +40,7 @@ func ConstValue(n Node) interface{} { } } -// int64Val returns v converted to int64. +// IntVal returns v converted to int64. // Note: if t is uint64, very large values will be converted to negative int64. func IntVal(t *types.Type, v constant.Value) int64 { if t.IsUnsigned() { @@ -90,7 +90,7 @@ func ValidTypeForConst(t *types.Type, v constant.Value) bool { panic("unreachable") } -// nodlit returns a new untyped constant with value v. +// NewLiteral returns a new untyped constant with value v. func NewLiteral(v constant.Value) Node { return NewBasicLit(base.Pos, v) } diff --git a/src/cmd/compile/internal/ir/visit.go b/src/cmd/compile/internal/ir/visit.go index c1b3d4ed950..e4aeae35220 100644 --- a/src/cmd/compile/internal/ir/visit.go +++ b/src/cmd/compile/internal/ir/visit.go @@ -25,10 +25,10 @@ package ir // // var do func(ir.Node) bool // do = func(x ir.Node) bool { -// ... processing BEFORE visting children ... +// ... processing BEFORE visiting children ... // if ... should visit children ... { // ir.DoChildren(x, do) -// ... processing AFTER visting children ... +// ... processing AFTER visiting children ... // } // if ... should stop parent DoChildren call from visiting siblings ... { // return true @@ -43,11 +43,11 @@ package ir // // var do func(ir.Node) bool // do = func(x ir.Node) bool { -// ... processing BEFORE visting children ... +// ... processing BEFORE visiting children ... // if ... should visit children ... { // ir.DoChildren(x, do) // } -// ... processing AFTER visting children ... +// ... processing AFTER visiting children ... // return false // } // do(root) diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go index 53ae797fce1..f5c2ef7709e 100644 --- a/src/cmd/compile/internal/liveness/plive.go +++ b/src/cmd/compile/internal/liveness/plive.go @@ -16,10 +16,13 @@ package liveness import ( "crypto/md5" + "crypto/sha1" "fmt" + "os" "sort" "strings" + "cmd/compile/internal/abi" "cmd/compile/internal/base" "cmd/compile/internal/bitvec" "cmd/compile/internal/ir" @@ -30,6 +33,7 @@ import ( "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/objabi" + "cmd/internal/src" ) // OpVarDef is an annotation for the liveness analysis, marking a place @@ -123,9 +127,9 @@ type liveness struct { unsafePoints bitvec.BitVec // An array with a bit vector for each safe point in the - // current Block during Liveness.epilogue. Indexed in Value + // current Block during liveness.epilogue. Indexed in Value // order for that block. Additionally, for the entry block - // livevars[0] is the entry bitmap. Liveness.compact moves + // livevars[0] is the entry bitmap. liveness.compact moves // these to stackMaps. livevars []bitvec.BitVec @@ -136,6 +140,14 @@ type liveness struct { stackMaps []bitvec.BitVec cache progeffectscache + + // partLiveArgs includes input arguments (PPARAM) that may + // be partially live. That is, it is considered live because + // a part of it is used, but we may not initialize all parts. + partLiveArgs map[*ir.Name]bool + + doClobber bool // Whether to clobber dead stack slots in this function. + noClobberArgs bool // Do not clobber function arguments } // Map maps from *ssa.Value to LivenessIndex. @@ -262,6 +274,12 @@ func (lv *liveness) valueEffects(v *ssa.Value) (int32, liveEffect) { } } + if n.Class == ir.PPARAM && !n.Addrtaken() && n.Type().Width > int64(types.PtrSize) { + // Only aggregate-typed arguments that are not address-taken can be + // partially live. + lv.partLiveArgs[n] = true + } + var effect liveEffect // Read is a read, obviously. // @@ -297,6 +315,22 @@ func affectedVar(v *ssa.Value) (*ir.Name, ssa.SymEffect) { n, _ := ssa.AutoVar(v) return n, ssa.SymWrite + case ssa.OpArgIntReg: + // This forces the spill slot for the register to be live at function entry. + // one of the following holds for a function F with pointer-valued register arg X: + // 0. No GC (so an uninitialized spill slot is okay) + // 1. GC at entry of F. GC is precise, but the spills around morestack initialize X's spill slot + // 2. Stack growth at entry of F. Same as GC. + // 3. GC occurs within F itself. This has to be from preemption, and thus GC is conservative. + // a. X is in a register -- then X is seen, and the spill slot is also scanned conservatively. + // b. X is spilled -- the spill slot is initialized, and scanned conservatively + // c. X is not live -- the spill slot is scanned conservatively, and it may contain X from an earlier spill. + // 4. GC within G, transitively called from F + // a. X is live at call site, therefore is spilled, to its spill slot (which is live because of subsequent LoadReg). + // b. X is not live at call site -- but neither is its spill slot. + n, _ := ssa.AutoVar(v) + return n, ssa.SymRead + case ssa.OpVarLive: return v.Aux.(*ir.Name), ssa.SymRead case ssa.OpVarDef, ssa.OpVarKill: @@ -371,6 +405,11 @@ func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int lv.livenessMap.reset() lv.markUnsafePoints() + + lv.partLiveArgs = make(map[*ir.Name]bool) + + lv.enableClobber() + return lv } @@ -389,11 +428,17 @@ func (lv *liveness) pointerMap(liveout bitvec.BitVec, vars []*ir.Name, args, loc } node := vars[i] switch node.Class { + case ir.PPARAM, ir.PPARAMOUT: + if !node.IsOutputParamInRegisters() { + if node.FrameOffset() < 0 { + lv.f.Fatalf("Node %v has frameoffset %d\n", node.Sym().Name, node.FrameOffset()) + } + typebits.Set(node.Type(), node.FrameOffset(), args) + break + } + fallthrough // PPARAMOUT in registers acts memory-allocates like an AUTO case ir.PAUTO: typebits.Set(node.Type(), node.FrameOffset()+lv.stkptrsize, locals) - - case ir.PPARAM, ir.PPARAMOUT: - typebits.Set(node.Type(), node.FrameOffset(), args) } } } @@ -798,6 +843,10 @@ func (lv *liveness) epilogue() { live.Or(*live, liveout) } + if lv.doClobber { + lv.clobber(b) + } + // The liveness maps for this block are now complete. Compact them. lv.compact(b) } @@ -851,7 +900,7 @@ func (lv *liveness) compact(b *ssa.Block) { } for _, v := range b.Values { hasStackMap := lv.hasStackMap(v) - isUnsafePoint := lv.allUnsafe || lv.unsafePoints.Get(int32(v.ID)) + isUnsafePoint := lv.allUnsafe || v.Op != ssa.OpClobber && lv.unsafePoints.Get(int32(v.ID)) idx := objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: isUnsafePoint} if hasStackMap { idx.StackMapIndex = lv.stackMapSet.add(lv.livevars[pos]) @@ -866,6 +915,169 @@ func (lv *liveness) compact(b *ssa.Block) { lv.livevars = lv.livevars[:0] } +func (lv *liveness) enableClobber() { + // The clobberdead experiment inserts code to clobber pointer slots in all + // the dead variables (locals and args) at every synchronous safepoint. + if !base.Flag.ClobberDead { + return + } + if lv.fn.Pragma&ir.CgoUnsafeArgs != 0 { + // C or assembly code uses the exact frame layout. Don't clobber. + return + } + if len(lv.vars) > 10000 || len(lv.f.Blocks) > 10000 { + // Be careful to avoid doing too much work. + // Bail if >10000 variables or >10000 blocks. + // Otherwise, giant functions make this experiment generate too much code. + return + } + if lv.f.Name == "forkAndExecInChild" { + // forkAndExecInChild calls vfork on some platforms. + // The code we add here clobbers parts of the stack in the child. + // When the parent resumes, it is using the same stack frame. But the + // child has clobbered stack variables that the parent needs. Boom! + // In particular, the sys argument gets clobbered. + return + } + if lv.f.Name == "wbBufFlush" || + ((lv.f.Name == "callReflect" || lv.f.Name == "callMethod") && lv.fn.ABIWrapper()) { + // runtime.wbBufFlush must not modify its arguments. See the comments + // in runtime/mwbbuf.go:wbBufFlush. + // + // reflect.callReflect and reflect.callMethod are called from special + // functions makeFuncStub and methodValueCall. The runtime expects + // that it can find the first argument (ctxt) at 0(SP) in makeFuncStub + // and methodValueCall's frame (see runtime/traceback.go:getArgInfo). + // Normally callReflect and callMethod already do not modify the + // argument, and keep it alive. But the compiler-generated ABI wrappers + // don't do that. Special case the wrappers to not clobber its arguments. + lv.noClobberArgs = true + } + if h := os.Getenv("GOCLOBBERDEADHASH"); h != "" { + // Clobber only functions where the hash of the function name matches a pattern. + // Useful for binary searching for a miscompiled function. + hstr := "" + for _, b := range sha1.Sum([]byte(lv.f.Name)) { + hstr += fmt.Sprintf("%08b", b) + } + if !strings.HasSuffix(hstr, h) { + return + } + fmt.Printf("\t\t\tCLOBBERDEAD %s\n", lv.f.Name) + } + lv.doClobber = true +} + +// Inserts code to clobber pointer slots in all the dead variables (locals and args) +// at every synchronous safepoint in b. +func (lv *liveness) clobber(b *ssa.Block) { + // Copy block's values to a temporary. + oldSched := append([]*ssa.Value{}, b.Values...) + b.Values = b.Values[:0] + idx := 0 + + // Clobber pointer slots in all dead variables at entry. + if b == lv.f.Entry { + for len(oldSched) > 0 && len(oldSched[0].Args) == 0 { + // Skip argless ops. We need to skip at least + // the lowered ClosurePtr op, because it + // really wants to be first. This will also + // skip ops like InitMem and SP, which are ok. + b.Values = append(b.Values, oldSched[0]) + oldSched = oldSched[1:] + } + clobber(lv, b, lv.livevars[0]) + idx++ + } + + // Copy values into schedule, adding clobbering around safepoints. + for _, v := range oldSched { + if !lv.hasStackMap(v) { + b.Values = append(b.Values, v) + continue + } + clobber(lv, b, lv.livevars[idx]) + b.Values = append(b.Values, v) + idx++ + } +} + +// clobber generates code to clobber pointer slots in all dead variables +// (those not marked in live). Clobbering instructions are added to the end +// of b.Values. +func clobber(lv *liveness, b *ssa.Block, live bitvec.BitVec) { + for i, n := range lv.vars { + if !live.Get(int32(i)) && !n.Addrtaken() && !n.OpenDeferSlot() && !n.IsOutputParamHeapAddr() { + // Don't clobber stack objects (address-taken). They are + // tracked dynamically. + // Also don't clobber slots that are live for defers (see + // the code setting livedefer in epilogue). + if lv.noClobberArgs && n.Class == ir.PPARAM { + continue + } + clobberVar(b, n) + } + } +} + +// clobberVar generates code to trash the pointers in v. +// Clobbering instructions are added to the end of b.Values. +func clobberVar(b *ssa.Block, v *ir.Name) { + clobberWalk(b, v, 0, v.Type()) +} + +// b = block to which we append instructions +// v = variable +// offset = offset of (sub-portion of) variable to clobber (in bytes) +// t = type of sub-portion of v. +func clobberWalk(b *ssa.Block, v *ir.Name, offset int64, t *types.Type) { + if !t.HasPointers() { + return + } + switch t.Kind() { + case types.TPTR, + types.TUNSAFEPTR, + types.TFUNC, + types.TCHAN, + types.TMAP: + clobberPtr(b, v, offset) + + case types.TSTRING: + // struct { byte *str; int len; } + clobberPtr(b, v, offset) + + case types.TINTER: + // struct { Itab *tab; void *data; } + // or, when isnilinter(t)==true: + // struct { Type *type; void *data; } + clobberPtr(b, v, offset) + clobberPtr(b, v, offset+int64(types.PtrSize)) + + case types.TSLICE: + // struct { byte *array; int len; int cap; } + clobberPtr(b, v, offset) + + case types.TARRAY: + for i := int64(0); i < t.NumElem(); i++ { + clobberWalk(b, v, offset+i*t.Elem().Size(), t.Elem()) + } + + case types.TSTRUCT: + for _, t1 := range t.Fields().Slice() { + clobberWalk(b, v, offset+t1.Offset, t1.Type) + } + + default: + base.Fatalf("clobberWalk: unexpected type, %v", t) + } +} + +// clobberPtr generates a clobber of the pointer at offset offset in v. +// The clobber instruction is added at the end of b. +func clobberPtr(b *ssa.Block, v *ir.Name, offset int64) { + b.NewValue0IA(src.NoXPos, ssa.OpClobber, types.TypeVoid, offset, v) +} + func (lv *liveness) showlive(v *ssa.Value, live bitvec.BitVec) { if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") { return @@ -1067,8 +1279,10 @@ func (lv *liveness) emit() (argsSym, liveSym *obj.LSym) { for _, n := range lv.vars { switch n.Class { case ir.PPARAM, ir.PPARAMOUT: - if maxArgNode == nil || n.FrameOffset() > maxArgNode.FrameOffset() { - maxArgNode = n + if !n.IsOutputParamInRegisters() { + if maxArgNode == nil || n.FrameOffset() > maxArgNode.FrameOffset() { + maxArgNode = n + } } } } @@ -1126,8 +1340,9 @@ func (lv *liveness) emit() (argsSym, liveSym *obj.LSym) { // Entry pointer for Compute analysis. Solves for the Compute of // pointer variables in the function and emits a runtime data // structure read by the garbage collector. -// Returns a map from GC safe points to their corresponding stack map index. -func Compute(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *objw.Progs) Map { +// Returns a map from GC safe points to their corresponding stack map index, +// and a map that contains all input parameters that may be partially live. +func Compute(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *objw.Progs) (Map, map[*ir.Name]bool) { // Construct the global liveness state. vars, idx := getvariables(curfn) lv := newliveness(curfn, f, vars, idx, stkptrsize) @@ -1189,7 +1404,7 @@ func Compute(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *objw.Progs) Map p.To.Sym = x } - return lv.livenessMap + return lv.livenessMap, lv.partLiveArgs } func (lv *liveness) emitStackObjects() *obj.LSym { @@ -1216,8 +1431,26 @@ func (lv *liveness) emitStackObjects() *obj.LSym { // Note: arguments and return values have non-negative Xoffset, // in which case the offset is relative to argp. // Locals have a negative Xoffset, in which case the offset is relative to varp. - off = objw.Uintptr(x, off, uint64(v.FrameOffset())) - off = objw.SymPtr(x, off, reflectdata.TypeLinksym(v.Type()), 0) + // We already limit the frame size, so the offset and the object size + // should not be too big. + frameOffset := v.FrameOffset() + if frameOffset != int64(int32(frameOffset)) { + base.Fatalf("frame offset too big: %v %d", v, frameOffset) + } + off = objw.Uint32(x, off, uint32(frameOffset)) + + t := v.Type() + sz := t.Width + if sz != int64(int32(sz)) { + base.Fatalf("stack object too big: %v of type %v, size %d", v, t, sz) + } + lsym, useGCProg, ptrdata := reflectdata.GCSym(t) + if useGCProg { + ptrdata = -ptrdata + } + off = objw.Uint32(x, off, uint32(sz)) + off = objw.Uint32(x, off, uint32(ptrdata)) + off = objw.SymPtr(x, off, lsym, 0) } if base.Flag.Live != 0 { @@ -1266,31 +1499,35 @@ func isfat(t *types.Type) bool { return false } -func WriteFuncMap(fn *ir.Func) { +// WriteFuncMap writes the pointer bitmaps for bodyless function fn's +// inputs and outputs as the value of symbol .args_stackmap. +// If fn has outputs, two bitmaps are written, otherwise just one. +func WriteFuncMap(fn *ir.Func, abiInfo *abi.ABIParamResultInfo) { if ir.FuncName(fn) == "_" || fn.Sym().Linkname != "" { return } - types.CalcSize(fn.Type()) - lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap") - nptr := int(fn.Type().ArgWidth() / int64(types.PtrSize)) + nptr := int(abiInfo.ArgWidth() / int64(types.PtrSize)) bv := bitvec.New(int32(nptr) * 2) + + for _, p := range abiInfo.InParams() { + typebits.Set(p.Type, p.FrameOffset(abiInfo), bv) + } + nbitmap := 1 if fn.Type().NumResults() > 0 { nbitmap = 2 } + lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap") off := objw.Uint32(lsym, 0, uint32(nbitmap)) off = objw.Uint32(lsym, off, uint32(bv.N)) - - if ir.IsMethod(fn) { - typebits.Set(fn.Type().Recvs(), 0, bv) - } - if fn.Type().NumParams() > 0 { - typebits.Set(fn.Type().Params(), 0, bv) - } off = objw.BitVec(lsym, off, bv) if fn.Type().NumResults() > 0 { - typebits.Set(fn.Type().Results(), 0, bv) + for _, p := range abiInfo.OutParams() { + if len(p.Registers) == 0 { + typebits.Set(p.Type, p.FrameOffset(abiInfo), bv) + } + } off = objw.BitVec(lsym, off, bv) } diff --git a/src/cmd/compile/internal/logopt/escape.go b/src/cmd/compile/internal/logopt/escape.go index 802f967aa66..9660e938b4a 100644 --- a/src/cmd/compile/internal/logopt/escape.go +++ b/src/cmd/compile/internal/logopt/escape.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.8 // +build go1.8 package logopt diff --git a/src/cmd/compile/internal/logopt/escape_bootstrap.go b/src/cmd/compile/internal/logopt/escape_bootstrap.go index 66ff0b8f220..cc04eaadfd1 100644 --- a/src/cmd/compile/internal/logopt/escape_bootstrap.go +++ b/src/cmd/compile/internal/logopt/escape_bootstrap.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.8 // +build !go1.8 package logopt diff --git a/src/cmd/compile/internal/logopt/log_opts.go b/src/cmd/compile/internal/logopt/log_opts.go index 37a049d6403..97ebf569448 100644 --- a/src/cmd/compile/internal/logopt/log_opts.go +++ b/src/cmd/compile/internal/logopt/log_opts.go @@ -6,10 +6,10 @@ package logopt import ( "cmd/internal/obj" - "cmd/internal/objabi" "cmd/internal/src" "encoding/json" "fmt" + "internal/buildcfg" "io" "log" "net/url" @@ -408,7 +408,7 @@ func uprootedPath(filename string) string { if !strings.HasPrefix(filename, "$GOROOT/") { return filename } - return objabi.GOROOT + filename[len("$GOROOT"):] + return buildcfg.GOROOT + filename[len("$GOROOT"):] } // FlushLoggedOpts flushes all the accumulated optimization log entries. @@ -448,7 +448,7 @@ func FlushLoggedOpts(ctxt *obj.Link, slashPkgPath string) { currentFile = p0f w = writerForLSP(subdirpath, currentFile) encoder = json.NewEncoder(w) - encoder.Encode(VersionHeader{Version: 0, Package: slashPkgPath, Goos: objabi.GOOS, Goarch: objabi.GOARCH, GcVersion: objabi.Version, File: currentFile}) + encoder.Encode(VersionHeader{Version: 0, Package: slashPkgPath, Goos: buildcfg.GOOS, Goarch: buildcfg.GOARCH, GcVersion: buildcfg.Version, File: currentFile}) } // The first "target" is the most important one. diff --git a/src/cmd/compile/internal/mips/galign.go b/src/cmd/compile/internal/mips/galign.go index 599163550bb..f892923ba03 100644 --- a/src/cmd/compile/internal/mips/galign.go +++ b/src/cmd/compile/internal/mips/galign.go @@ -8,17 +8,17 @@ import ( "cmd/compile/internal/ssa" "cmd/compile/internal/ssagen" "cmd/internal/obj/mips" - "cmd/internal/objabi" + "internal/buildcfg" ) func Init(arch *ssagen.ArchInfo) { arch.LinkArch = &mips.Linkmips - if objabi.GOARCH == "mipsle" { + if buildcfg.GOARCH == "mipsle" { arch.LinkArch = &mips.Linkmipsle } arch.REGSP = mips.REGSP arch.MAXWIDTH = (1 << 31) - 1 - arch.SoftFloat = (objabi.GOMIPS == "softfloat") + arch.SoftFloat = (buildcfg.GOMIPS == "softfloat") arch.ZeroRange = zerorange arch.Ginsnop = ginsnop arch.Ginsnopdefer = ginsnop diff --git a/src/cmd/compile/internal/mips/ssa.go b/src/cmd/compile/internal/mips/ssa.go index f1cdbd3241a..e0447f38cbf 100644 --- a/src/cmd/compile/internal/mips/ssa.go +++ b/src/cmd/compile/internal/mips/ssa.go @@ -112,9 +112,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Reg = y } case ssa.OpMIPSMOVWnop: - if v.Reg() != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } // nothing to do case ssa.OpLoadReg: if v.Type.IsFlags() { @@ -244,9 +241,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpMIPSCMOVZ: - if v.Reg() != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[2].Reg() @@ -254,9 +248,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpMIPSCMOVZzero: - if v.Reg() != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[1].Reg() @@ -372,6 +363,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssa.OpMIPSMOVDF, ssa.OpMIPSNEGF, ssa.OpMIPSNEGD, + ssa.OpMIPSSQRTF, ssa.OpMIPSSQRTD, ssa.OpMIPSCLZ: p := s.Prog(v.Op.Asm()) @@ -806,7 +798,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(obj.AGETCALLERPC) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() - case ssa.OpClobber: + case ssa.OpClobber, ssa.OpClobberReg: // TODO: implement for clobberdead experiment. Nop is ok for now. default: v.Fatalf("genValue not implemented: %s", v.LongString()) diff --git a/src/cmd/compile/internal/mips64/galign.go b/src/cmd/compile/internal/mips64/galign.go index fc0a34228c8..af81366e51b 100644 --- a/src/cmd/compile/internal/mips64/galign.go +++ b/src/cmd/compile/internal/mips64/galign.go @@ -8,17 +8,17 @@ import ( "cmd/compile/internal/ssa" "cmd/compile/internal/ssagen" "cmd/internal/obj/mips" - "cmd/internal/objabi" + "internal/buildcfg" ) func Init(arch *ssagen.ArchInfo) { arch.LinkArch = &mips.Linkmips64 - if objabi.GOARCH == "mips64le" { + if buildcfg.GOARCH == "mips64le" { arch.LinkArch = &mips.Linkmips64le } arch.REGSP = mips.REGSP arch.MAXWIDTH = 1 << 50 - arch.SoftFloat = objabi.GOMIPS64 == "softfloat" + arch.SoftFloat = buildcfg.GOMIPS64 == "softfloat" arch.ZeroRange = zerorange arch.Ginsnop = ginsnop arch.Ginsnopdefer = ginsnop diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go index 14cf7af1437..e821a00876f 100644 --- a/src/cmd/compile/internal/mips64/ssa.go +++ b/src/cmd/compile/internal/mips64/ssa.go @@ -115,9 +115,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Reg = y } case ssa.OpMIPS64MOVVnop: - if v.Reg() != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } // nothing to do case ssa.OpLoadReg: if v.Type.IsFlags() { @@ -358,6 +355,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssa.OpMIPS64MOVDF, ssa.OpMIPS64NEGF, ssa.OpMIPS64NEGD, + ssa.OpMIPS64SQRTF, ssa.OpMIPS64SQRTD: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG @@ -767,7 +765,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(obj.AGETCALLERPC) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() - case ssa.OpClobber: + case ssa.OpClobber, ssa.OpClobberReg: // TODO: implement for clobberdead experiment. Nop is ok for now. default: v.Fatalf("genValue not implemented: %s", v.LongString()) diff --git a/src/cmd/compile/internal/noder/decl.go b/src/cmd/compile/internal/noder/decl.go index a1596be4a42..4ca2eb4740a 100644 --- a/src/cmd/compile/internal/noder/decl.go +++ b/src/cmd/compile/internal/noder/decl.go @@ -134,14 +134,14 @@ func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) { } // We need to use g.typeExpr(decl.Type) here to ensure that for - // chained, defined-type declarations like + // chained, defined-type declarations like: // // type T U // // //go:notinheap // type U struct { … } // - // that we mark both T and U as NotInHeap. If we instead used just + // we mark both T and U as NotInHeap. If we instead used just // g.typ(otyp.Underlying()), then we'd instead set T's underlying // type directly to the struct type (which is not marked NotInHeap) // and fail to mark T as NotInHeap. @@ -154,6 +154,12 @@ func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) { // [mdempsky: Subtleties like these are why I always vehemently // object to new type pragmas.] ntyp.SetUnderlying(g.typeExpr(decl.Type)) + if len(decl.TParamList) > 0 { + // Set HasTParam if there are any tparams, even if no tparams are + // used in the type itself (e.g., if it is an empty struct, or no + // fields in the struct use the tparam). + ntyp.SetHasTParam(true) + } types.ResumeCheckSize() if otyp, ok := otyp.(*types2.Named); ok && otyp.NumMethods() != 0 { @@ -205,11 +211,24 @@ func (g *irgen) varDecl(out *ir.Nodes, decl *syntax.VarDecl) { } else if ir.CurFunc == nil { name.Defn = as } - out.Append(typecheck.Stmt(as)) + lhs := []ir.Node{as.X} + rhs := []ir.Node{} + if as.Y != nil { + rhs = []ir.Node{as.Y} + } + transformAssign(as, lhs, rhs) + as.X = lhs[0] + if as.Y != nil { + as.Y = rhs[0] + } + as.SetTypecheck(1) + out.Append(as) } } if as2 != nil { - out.Append(typecheck.Stmt(as2)) + transformAssign(as2, as2.Lhs, as2.Rhs) + as2.SetTypecheck(1) + out.Append(as2) } } diff --git a/src/cmd/compile/internal/noder/expr.go b/src/cmd/compile/internal/noder/expr.go index b166d34eadc..c7695ed9204 100644 --- a/src/cmd/compile/internal/noder/expr.go +++ b/src/cmd/compile/internal/noder/expr.go @@ -15,7 +15,6 @@ import ( ) func (g *irgen) expr(expr syntax.Expr) ir.Node { - // TODO(mdempsky): Change callers to not call on nil? if expr == nil { return nil } @@ -30,7 +29,14 @@ func (g *irgen) expr(expr syntax.Expr) ir.Node { } switch { case tv.IsBuiltin(): - // TODO(mdempsky): Handle in CallExpr? + // Qualified builtins, such as unsafe.Add and unsafe.Slice. + if expr, ok := expr.(*syntax.SelectorExpr); ok { + if name, ok := expr.X.(*syntax.Name); ok { + if _, ok := g.info.Uses[name].(*types2.PkgName); ok { + return g.use(expr.Sel) + } + } + } return g.use(expr.(*syntax.Name)) case tv.IsType(): return ir.TypeNode(g.typ(tv.Type)) @@ -65,7 +71,7 @@ func (g *irgen) expr(expr syntax.Expr) ir.Node { } n := g.expr0(typ, expr) - if n.Typecheck() != 1 { + if n.Typecheck() != 1 && n.Typecheck() != 3 { base.FatalfAt(g.pos(expr), "missed typecheck: %+v", n) } if !g.match(n.Type(), typ, tv.HasOk()) { @@ -82,8 +88,7 @@ func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node { if _, isNil := g.info.Uses[expr].(*types2.Nil); isNil { return Nil(pos, g.typ(typ)) } - // TODO(mdempsky): Remove dependency on typecheck.Expr. - return typecheck.Expr(g.use(expr)) + return g.use(expr) case *syntax.CompositeLit: return g.compLit(typ, expr) @@ -96,7 +101,13 @@ func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node { case *syntax.CallExpr: fun := g.expr(expr.Fun) + + // The key for the Inferred map is the CallExpr (if inferring + // types required the function arguments) or the IndexExpr below + // (if types could be inferred without the function arguments). if inferred, ok := g.info.Inferred[expr]; ok && len(inferred.Targs) > 0 { + // This is the case where inferring types required the + // types of the function arguments. targs := make([]ir.Node, len(inferred.Targs)) for i, targ := range inferred.Targs { targs[i] = ir.TypeNode(g.typ(targ)) @@ -119,23 +130,33 @@ func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node { case *syntax.IndexExpr: var targs []ir.Node - if _, ok := expr.Index.(*syntax.ListExpr); ok { + + if inferred, ok := g.info.Inferred[expr]; ok && len(inferred.Targs) > 0 { + // This is the partial type inference case where the types + // can be inferred from other type arguments without using + // the types of the function arguments. + targs = make([]ir.Node, len(inferred.Targs)) + for i, targ := range inferred.Targs { + targs[i] = ir.TypeNode(g.typ(targ)) + } + } else if _, ok := expr.Index.(*syntax.ListExpr); ok { targs = g.exprList(expr.Index) } else { index := g.expr(expr.Index) if index.Op() != ir.OTYPE { // This is just a normal index expression - return Index(pos, g.expr(expr.X), index) + return Index(pos, g.typ(typ), g.expr(expr.X), index) } // This is generic function instantiation with a single type targs = []ir.Node{index} } - // This is a generic function instantiation (e.g. min[int]) + // This is a generic function instantiation (e.g. min[int]). + // Generic type instantiation is handled in the type + // section of expr() above (using g.typ). x := g.expr(expr.X) if x.Op() != ir.ONAME || x.Type().Kind() != types.TFUNC { panic("Incorrect argument for generic func instantiation") } - // This could also be an OTYPEINST once we can handle those examples. n := ir.NewInstExpr(pos, ir.OFUNCINST, x, targs) typed(g.typ(typ), n) return n @@ -147,24 +168,23 @@ func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node { // Qualified identifier. if name, ok := expr.X.(*syntax.Name); ok { if _, ok := g.info.Uses[name].(*types2.PkgName); ok { - // TODO(mdempsky): Remove dependency on typecheck.Expr. - return typecheck.Expr(g.use(expr.Sel)) + return g.use(expr.Sel) } } return g.selectorExpr(pos, typ, expr) case *syntax.SliceExpr: - return Slice(pos, g.expr(expr.X), g.expr(expr.Index[0]), g.expr(expr.Index[1]), g.expr(expr.Index[2])) + return Slice(pos, g.typ(typ), g.expr(expr.X), g.expr(expr.Index[0]), g.expr(expr.Index[1]), g.expr(expr.Index[2])) case *syntax.Operation: if expr.Y == nil { - return Unary(pos, g.op(expr.Op, unOps[:]), g.expr(expr.X)) + return Unary(pos, g.typ(typ), g.op(expr.Op, unOps[:]), g.expr(expr.X)) } switch op := g.op(expr.Op, binOps[:]); op { case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: return Compare(pos, g.typ(typ), op, g.expr(expr.X), g.expr(expr.Y)) default: - return Binary(pos, op, g.expr(expr.X), g.expr(expr.Y)) + return Binary(pos, op, g.typ(typ), g.expr(expr.X), g.expr(expr.Y)) } default: @@ -178,7 +198,7 @@ func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node { // than in typecheck.go. func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.SelectorExpr) ir.Node { x := g.expr(expr.X) - if x.Type().Kind() == types.TTYPEPARAM { + if x.Type().HasTParam() { // Leave a method call on a type param as an OXDOT, since it can // only be fully transformed once it has an instantiated type. n := ir.NewSelectorExpr(pos, ir.OXDOT, x, typecheck.Lookup(expr.Sel.Value)) @@ -230,7 +250,7 @@ func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.Selecto if havePtr != wantPtr { if havePtr { - x = Implicit(Deref(pos, x)) + x = Implicit(Deref(pos, x.Type().Elem(), x)) } else { x = Implicit(Addr(pos, x)) } @@ -253,7 +273,7 @@ func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.Selecto // selinfo.Targs() are the types used to // instantiate the type of receiver - targs2 := selinfo.TArgs() + targs2 := getTargs(selinfo) targs := make([]ir.Node, len(targs2)) for i, targ2 := range targs2 { targs[i] = ir.TypeNode(g.typ(targ2)) @@ -279,6 +299,19 @@ func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.Selecto return n } +// getTargs gets the targs associated with the receiver of a selected method +func getTargs(selinfo *types2.Selection) []types2.Type { + r := selinfo.Recv() + if p := types2.AsPointer(r); p != nil { + r = p.Elem() + } + n := types2.AsNamed(r) + if n == nil { + base.Fatalf("Incorrect type for selinfo %v", selinfo) + } + return n.TArgs() +} + func (g *irgen) exprList(expr syntax.Expr) []ir.Node { switch expr := expr.(type) { case nil: @@ -321,23 +354,25 @@ func (g *irgen) compLit(typ types2.Type, lit *syntax.CompositeLit) ir.Node { } } - // TODO(mdempsky): Remove dependency on typecheck.Expr. - return typecheck.Expr(ir.NewCompLitExpr(g.pos(lit), ir.OCOMPLIT, ir.TypeNode(g.typ(typ)), exprs)) + n := ir.NewCompLitExpr(g.pos(lit), ir.OCOMPLIT, nil, exprs) + typed(g.typ(typ), n) + return transformCompLit(n) } -func (g *irgen) funcLit(typ types2.Type, expr *syntax.FuncLit) ir.Node { +func (g *irgen) funcLit(typ2 types2.Type, expr *syntax.FuncLit) ir.Node { fn := ir.NewFunc(g.pos(expr)) fn.SetIsHiddenClosure(ir.CurFunc != nil) fn.Nname = ir.NewNameAt(g.pos(expr), typecheck.ClosureName(ir.CurFunc)) ir.MarkFunc(fn.Nname) - fn.Nname.SetType(g.typ(typ)) + typ := g.typ(typ2) fn.Nname.Func = fn fn.Nname.Defn = fn + typed(typ, fn.Nname) + fn.SetTypecheck(1) fn.OClosure = ir.NewClosureExpr(g.pos(expr), fn) - fn.OClosure.SetType(fn.Nname.Type()) - fn.OClosure.SetTypecheck(1) + typed(typ, fn.OClosure) g.funcBody(fn, nil, expr.Type, expr.Body) diff --git a/src/cmd/compile/internal/noder/helpers.go b/src/cmd/compile/internal/noder/helpers.go index 4cb6bc3eabc..9da0e493007 100644 --- a/src/cmd/compile/internal/noder/helpers.go +++ b/src/cmd/compile/internal/noder/helpers.go @@ -18,10 +18,6 @@ import ( // // TODO(mdempsky): Move into their own package so they can be easily // reused by iimport and frontend optimizations. -// -// TODO(mdempsky): Update to consistently return already typechecked -// results, rather than leaving the caller responsible for using -// typecheck.Expr or typecheck.Stmt. type ImplicitNode interface { ir.Node @@ -54,8 +50,11 @@ func Nil(pos src.XPos, typ *types.Type) ir.Node { // Expressions func Addr(pos src.XPos, x ir.Node) *ir.AddrExpr { - // TODO(mdempsky): Avoid typecheck.Expr. Probably just need to set OPTRLIT when appropriate. - n := typecheck.Expr(typecheck.NodAddrAt(pos, x)).(*ir.AddrExpr) + n := typecheck.NodAddrAt(pos, x) + switch x.Op() { + case ir.OARRAYLIT, ir.OMAPLIT, ir.OSLICELIT, ir.OSTRUCTLIT: + n.SetOp(ir.OPTRLIT) + } typed(types.NewPtr(x.Type()), n) return n } @@ -64,44 +63,76 @@ func Assert(pos src.XPos, x ir.Node, typ *types.Type) ir.Node { return typed(typ, ir.NewTypeAssertExpr(pos, x, nil)) } -func Binary(pos src.XPos, op ir.Op, x, y ir.Node) ir.Node { +func Binary(pos src.XPos, op ir.Op, typ *types.Type, x, y ir.Node) ir.Node { switch op { case ir.OANDAND, ir.OOROR: return typed(x.Type(), ir.NewLogicalExpr(pos, op, x, y)) case ir.OADD: - if x.Type().IsString() { - // TODO(mdempsky): Construct OADDSTR directly. - return typecheck.Expr(ir.NewBinaryExpr(pos, op, x, y)) + n := ir.NewBinaryExpr(pos, op, x, y) + if x.Type().HasTParam() || y.Type().HasTParam() { + // Delay transformAdd() if either arg has a type param, + // since it needs to know the exact types to decide whether + // to transform OADD to OADDSTR. + n.SetType(typ) + n.SetTypecheck(3) + return n } - fallthrough + typed(typ, n) + return transformAdd(n) default: return typed(x.Type(), ir.NewBinaryExpr(pos, op, x, y)) } } func Call(pos src.XPos, typ *types.Type, fun ir.Node, args []ir.Node, dots bool) ir.Node { - // TODO(mdempsky): This should not be so difficult. + n := ir.NewCallExpr(pos, ir.OCALL, fun, args) + n.IsDDD = dots + // n.Use will be changed to ir.CallUseStmt in g.stmt() if this call is + // just a statement (any return values are ignored). + n.Use = ir.CallUseExpr + if fun.Op() == ir.OTYPE { // Actually a type conversion, not a function call. - n := ir.NewCallExpr(pos, ir.OCALL, fun, args) - if fun.Type().Kind() == types.TTYPEPARAM { + if fun.Type().HasTParam() || args[0].Type().HasTParam() { // For type params, don't typecheck until we actually know // the type. return typed(typ, n) } - return typecheck.Expr(n) + typed(typ, n) + return transformConvCall(n) } if fun, ok := fun.(*ir.Name); ok && fun.BuiltinOp != 0 { - // Call to a builtin function. - n := ir.NewCallExpr(pos, ir.OCALL, fun, args) - n.IsDDD = dots + // For Builtin ops, we currently stay with using the old + // typechecker to transform the call to a more specific expression + // and possibly use more specific ops. However, for a bunch of the + // ops, we delay doing the old typechecker if any of the args have + // type params, for a variety of reasons: + // + // OMAKE: hard to choose specific ops OMAKESLICE, etc. until arg type is known + // OREAL/OIMAG: can't determine type float32/float64 until arg type know + // OLEN/OCAP: old typechecker will complain if arg is not obviously a slice/array. + // OAPPEND: old typechecker will complain if arg is not obviously slice, etc. + // + // We will eventually break out the transforming functionality + // needed for builtin's, and call it here or during stenciling, as + // appropriate. switch fun.BuiltinOp { - case ir.OCLOSE, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN: - return typecheck.Stmt(n) - default: - return typecheck.Expr(n) + case ir.OMAKE, ir.OREAL, ir.OIMAG, ir.OLEN, ir.OCAP, ir.OAPPEND: + hasTParam := false + for _, arg := range args { + if arg.Type().HasTParam() { + hasTParam = true + break + } + } + if hasTParam { + return typed(typ, n) + } } + + typed(typ, n) + return transformBuiltin(n) } // Add information, now that we know that fun is actually being called. @@ -121,11 +152,17 @@ func Call(pos src.XPos, typ *types.Type, fun ir.Node, args []ir.Node, dots bool) } } - n := ir.NewCallExpr(pos, ir.OCALL, fun, args) - n.IsDDD = dots + if fun.Type().HasTParam() { + // If the fun arg is or has a type param, don't do any extra + // transformations, since we may not have needed properties yet + // (e.g. number of return values, etc). The type param is probably + // described by a structural constraint that requires it to be a + // certain function type, etc., but we don't want to analyze that. + return typed(typ, n) + } if fun.Op() == ir.OXDOT { - if fun.(*ir.SelectorExpr).X.Type().Kind() != types.TTYPEPARAM { + if !fun.(*ir.SelectorExpr).X.Type().HasTParam() { base.FatalfAt(pos, "Expecting type param receiver in %v", fun) } // For methods called in a generic function, don't do any extra @@ -135,46 +172,35 @@ func Call(pos src.XPos, typ *types.Type, fun ir.Node, args []ir.Node, dots bool) return n } if fun.Op() != ir.OFUNCINST { - // If no type params, still do normal typechecking, since we're - // still missing some things done by tcCall below (mainly - // typecheckargs and typecheckaste). - typecheck.Call(n) + // If no type params, do the normal call transformations. This + // will convert OCALL to OCALLFUNC. + typed(typ, n) + transformCall(n) return n } - n.Use = ir.CallUseExpr - if fun.Type().NumResults() == 0 { - n.Use = ir.CallUseStmt - } - - // Rewrite call node depending on use. - switch fun.Op() { - case ir.ODOTINTER: - n.SetOp(ir.OCALLINTER) - - case ir.ODOTMETH: - n.SetOp(ir.OCALLMETH) - - default: - n.SetOp(ir.OCALLFUNC) - } - + // Leave the op as OCALL, which indicates the call still needs typechecking. typed(typ, n) return n } func Compare(pos src.XPos, typ *types.Type, op ir.Op, x, y ir.Node) ir.Node { n := ir.NewBinaryExpr(pos, op, x, y) - if !types.Identical(x.Type(), y.Type()) { - // TODO(mdempsky): Handle subtleties of constructing mixed-typed comparisons. - n = typecheck.Expr(n).(*ir.BinaryExpr) + if x.Type().HasTParam() || y.Type().HasTParam() { + // Delay transformCompare() if either arg has a type param, since + // it needs to know the exact types to decide on any needed conversions. + n.SetType(typ) + n.SetTypecheck(3) + return n } - return typed(typ, n) + typed(typ, n) + transformCompare(n) + return n } -func Deref(pos src.XPos, x ir.Node) *ir.StarExpr { +func Deref(pos src.XPos, typ *types.Type, x ir.Node) *ir.StarExpr { n := ir.NewStarExpr(pos, x) - typed(x.Type().Elem(), n) + typed(typ, n) return n } @@ -233,36 +259,58 @@ func dot(pos src.XPos, typ *types.Type, op ir.Op, x ir.Node, selection *types.Fi // TODO(mdempsky): Move to package types. func method(typ *types.Type, index int) *types.Field { if typ.IsInterface() { - return typ.Field(index) + return typ.AllMethods().Index(index) } return types.ReceiverBaseType(typ).Methods().Index(index) } -func Index(pos src.XPos, x, index ir.Node) ir.Node { - // TODO(mdempsky): Avoid typecheck.Expr (which will call tcIndex) - return typecheck.Expr(ir.NewIndexExpr(pos, x, index)) +func Index(pos src.XPos, typ *types.Type, x, index ir.Node) ir.Node { + n := ir.NewIndexExpr(pos, x, index) + if x.Type().HasTParam() { + // transformIndex needs to know exact type + n.SetType(typ) + n.SetTypecheck(3) + return n + } + typed(typ, n) + // transformIndex will modify n.Type() for OINDEXMAP. + transformIndex(n) + return n } -func Slice(pos src.XPos, x, low, high, max ir.Node) ir.Node { +func Slice(pos src.XPos, typ *types.Type, x, low, high, max ir.Node) ir.Node { op := ir.OSLICE if max != nil { op = ir.OSLICE3 } - // TODO(mdempsky): Avoid typecheck.Expr. - return typecheck.Expr(ir.NewSliceExpr(pos, op, x, low, high, max)) + n := ir.NewSliceExpr(pos, op, x, low, high, max) + if x.Type().HasTParam() { + // transformSlice needs to know if x.Type() is a string or an array or a slice. + n.SetType(typ) + n.SetTypecheck(3) + return n + } + typed(typ, n) + transformSlice(n) + return n } -func Unary(pos src.XPos, op ir.Op, x ir.Node) ir.Node { +func Unary(pos src.XPos, typ *types.Type, op ir.Op, x ir.Node) ir.Node { switch op { case ir.OADDR: return Addr(pos, x) case ir.ODEREF: - return Deref(pos, x) + return Deref(pos, typ, x) } - typ := x.Type() if op == ir.ORECV { - typ = typ.Elem() + if typ.IsFuncArgStruct() && typ.NumFields() == 2 { + // Remove the second boolean type (if provided by type2), + // since that works better with the rest of the compiler + // (which will add it back in later). + assert(typ.Field(1).Type.Kind() == types.TBOOL) + typ = typ.Field(0).Type + } } return typed(typ, ir.NewUnaryExpr(pos, op, x)) } @@ -271,7 +319,7 @@ func Unary(pos src.XPos, op ir.Op, x ir.Node) ir.Node { var one = constant.MakeInt64(1) -func IncDec(pos src.XPos, op ir.Op, x ir.Node) ir.Node { - x = typecheck.AssignExpr(x) +func IncDec(pos src.XPos, op ir.Op, x ir.Node) *ir.AssignOpStmt { + assert(x.Type() != nil) return ir.NewAssignOpStmt(pos, op, x, typecheck.DefaultLit(ir.NewBasicLit(pos, one), x.Type())) } diff --git a/src/cmd/compile/internal/noder/import.go b/src/cmd/compile/internal/noder/import.go index 89a2598833f..701e9001c85 100644 --- a/src/cmd/compile/internal/noder/import.go +++ b/src/cmd/compile/internal/noder/import.go @@ -7,6 +7,7 @@ package noder import ( "errors" "fmt" + "internal/buildcfg" "io" "os" pathpkg "path" @@ -108,7 +109,7 @@ func openPackage(path string) (*os.File, error) { } } - if objabi.GOROOT != "" { + if buildcfg.GOROOT != "" { suffix := "" if base.Flag.InstallSuffix != "" { suffix = "_" + base.Flag.InstallSuffix @@ -118,10 +119,10 @@ func openPackage(path string) (*os.File, error) { suffix = "_msan" } - if file, err := os.Open(fmt.Sprintf("%s/pkg/%s_%s%s/%s.a", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffix, path)); err == nil { + if file, err := os.Open(fmt.Sprintf("%s/pkg/%s_%s%s/%s.a", buildcfg.GOROOT, buildcfg.GOOS, buildcfg.GOARCH, suffix, path)); err == nil { return file, nil } - if file, err := os.Open(fmt.Sprintf("%s/pkg/%s_%s%s/%s.o", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffix, path)); err == nil { + if file, err := os.Open(fmt.Sprintf("%s/pkg/%s_%s%s/%s.o", buildcfg.GOROOT, buildcfg.GOOS, buildcfg.GOARCH, suffix, path)); err == nil { return file, nil } } @@ -243,9 +244,9 @@ func importfile(decl *syntax.ImportDecl) *types.Pkg { base.Errorf("import %s: not a go object file: %s", file, p) base.ErrorExit() } - q := fmt.Sprintf("%s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring()) - if p[10:] != q { - base.Errorf("import %s: object is [%s] expected [%s]", file, p[10:], q) + q := objabi.HeaderString() + if p != q { + base.Errorf("import %s: object is [%s] expected [%s]", file, p, q) base.ErrorExit() } diff --git a/src/cmd/compile/internal/noder/irgen.go b/src/cmd/compile/internal/noder/irgen.go index 28536cc1f7f..3e0d3285ab9 100644 --- a/src/cmd/compile/internal/noder/irgen.go +++ b/src/cmd/compile/internal/noder/irgen.go @@ -36,19 +36,10 @@ func check2(noders []*noder) { // typechecking conf := types2.Config{ GoVersion: base.Flag.Lang, - InferFromConstraints: true, IgnoreLabels: true, // parser already checked via syntax.CheckBranches mode CompilerErrorMessages: true, // use error strings matching existing compiler errors Error: func(err error) { terr := err.(types2.Error) - if len(terr.Msg) > 0 && terr.Msg[0] == '\t' { - // types2 reports error clarifications via separate - // error messages which are indented with a tab. - // Ignore them to satisfy tools and tests that expect - // only one error in such cases. - // TODO(gri) Need to adjust error reporting in types2. - return - } base.ErrorfAt(m.makeXPos(terr.Pos), "%s", terr.Msg) }, Importer: &gcimports{ @@ -68,10 +59,10 @@ func check2(noders []*noder) { } pkg, err := conf.Check(base.Ctxt.Pkgpath, files, &info) files = nil + base.ExitIfErrors() if err != nil { base.FatalfAt(src.NoXPos, "conf.Check error: %v", err) } - base.ExitIfErrors() if base.Flag.G < 2 { os.Exit(0) } @@ -100,6 +91,9 @@ type irgen struct { objs map[types2.Object]*ir.Name typs map[types2.Type]*types.Type marker dwarfgen.ScopeMarker + + // Fully-instantiated generic types whose methods should be instantiated + instTypeList []*types.Type } func (g *irgen) generate(noders []*noder) { diff --git a/src/cmd/compile/internal/noder/lex.go b/src/cmd/compile/internal/noder/lex.go index cdca9e55f33..66a56a50ec8 100644 --- a/src/cmd/compile/internal/noder/lex.go +++ b/src/cmd/compile/internal/noder/lex.go @@ -6,11 +6,11 @@ package noder import ( "fmt" + "internal/buildcfg" "strings" "cmd/compile/internal/ir" "cmd/compile/internal/syntax" - "cmd/internal/objabi" ) func isSpace(c rune) bool { @@ -28,7 +28,7 @@ const ( ir.Nosplit | ir.Noinline | ir.NoCheckPtr | - ir.RegisterParams | // TODO remove after register abi is working + ir.RegisterParams | // TODO(register args) remove after register abi is working ir.CgoUnsafeArgs | ir.UintptrEscapes | ir.Systemstack | @@ -44,7 +44,7 @@ func pragmaFlag(verb string) ir.PragmaFlag { case "go:build": return ir.GoBuildPragma case "go:nointerface": - if objabi.Fieldtrack_enabled != 0 { + if buildcfg.Experiment.FieldTrack { return ir.Nointerface } case "go:noescape": @@ -80,7 +80,7 @@ func pragmaFlag(verb string) ir.PragmaFlag { // in the argument list. // Used in syscall/dll_windows.go. return ir.UintptrEscapes - case "go:registerparams": // TODO remove after register abi is working + case "go:registerparams": // TODO(register args) remove after register abi is working return ir.RegisterParams case "go:notinheap": return ir.NotInHeap @@ -110,7 +110,7 @@ func (p *noder) pragcgo(pos syntax.Pos, text string) { case len(f) == 3 && !isQuoted(f[1]) && !isQuoted(f[2]): case len(f) == 4 && !isQuoted(f[1]) && !isQuoted(f[2]) && isQuoted(f[3]): f[3] = strings.Trim(f[3], `"`) - if objabi.GOOS == "aix" && f[3] != "" { + if buildcfg.GOOS == "aix" && f[3] != "" { // On Aix, library pattern must be "lib.a/object.o" // or "lib.a/libname.so.X" n := strings.Split(f[3], "/") diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go index d692bf97aaf..4c7c9fc322f 100644 --- a/src/cmd/compile/internal/noder/noder.go +++ b/src/cmd/compile/internal/noder/noder.go @@ -68,6 +68,9 @@ func LoadPackage(filenames []string) { for e := range p.err { p.errorAt(e.Pos, "%s", e.Msg) } + if p.file == nil { + base.ErrorExit() + } lines += p.file.EOF.Line() } base.Timer.AddEvent(int64(lines), "lines") @@ -686,7 +689,7 @@ func (p *noder) expr(expr syntax.Expr) ir.Node { if expr.Kind == syntax.RuneLit { n.SetType(types.UntypedRune) } - n.SetDiag(expr.Bad) // avoid follow-on errors if there was a syntax error + n.SetDiag(expr.Bad || n.Val().Kind() == constant.Unknown) // avoid follow-on errors if there was a syntax error return n case *syntax.CompositeLit: n := ir.NewCompLitExpr(p.pos(expr), ir.OCOMPLIT, p.typeExpr(expr.Type), nil) diff --git a/src/cmd/compile/internal/noder/object.go b/src/cmd/compile/internal/noder/object.go index b4e5c022dbf..82cce1ace0f 100644 --- a/src/cmd/compile/internal/noder/object.go +++ b/src/cmd/compile/internal/noder/object.go @@ -22,16 +22,26 @@ func (g *irgen) def(name *syntax.Name) (*ir.Name, types2.Object) { return g.obj(obj), obj } +// use returns the Name node associated with the use of name. The returned node +// will have the correct type and be marked as typechecked. func (g *irgen) use(name *syntax.Name) *ir.Name { - obj, ok := g.info.Uses[name] + obj2, ok := g.info.Uses[name] if !ok { base.FatalfAt(g.pos(name), "unknown name %v", name) } - return ir.CaptureName(g.pos(obj), ir.CurFunc, g.obj(obj)) + obj := ir.CaptureName(g.pos(obj2), ir.CurFunc, g.obj(obj2)) + if obj.Defn != nil && obj.Defn.Op() == ir.ONAME { + // If CaptureName created a closure variable, then transfer the + // type of the captured name to the new closure variable. + obj.SetTypecheck(1) + obj.SetType(obj.Defn.Type()) + } + return obj } -// obj returns the Name that represents the given object. If no such -// Name exists yet, it will be implicitly created. +// obj returns the Name that represents the given object. If no such Name exists +// yet, it will be implicitly created. The returned node will have the correct +// type and be marked as typechecked. // // For objects declared at function scope, ir.CurFunc must already be // set to the respective function when the Name is created. @@ -45,6 +55,7 @@ func (g *irgen) obj(obj types2.Object) *ir.Name { } n := typecheck.Resolve(ir.NewIdent(src.NoXPos, sym)) if n, ok := n.(*ir.Name); ok { + n.SetTypecheck(1) return n } base.FatalfAt(g.pos(obj), "failed to resolve %v", obj) @@ -117,6 +128,7 @@ func (g *irgen) obj(obj types2.Object) *ir.Name { } g.objs[obj] = name + name.SetTypecheck(1) return name } @@ -135,9 +147,6 @@ func (g *irgen) objFinish(name *ir.Name, class ir.Class, typ *types.Type) { sym.SetFunc(true) } - // We already know name's type, but typecheck is really eager to try - // recomputing it later. This appears to prevent that at least. - name.Ntype = ir.TypeNode(typ) name.SetTypecheck(1) name.SetWalkdef(1) diff --git a/src/cmd/compile/internal/noder/sizes.go b/src/cmd/compile/internal/noder/sizes.go index 7cda6da9a60..23f20626754 100644 --- a/src/cmd/compile/internal/noder/sizes.go +++ b/src/cmd/compile/internal/noder/sizes.go @@ -115,10 +115,10 @@ func (s *gcSizes) Sizeof(T types2.Type) int64 { } offsets := s.Offsetsof(fields) - // gc: The last field of a struct is not allowed to + // gc: The last field of a non-zero-sized struct is not allowed to // have size 0. last := s.Sizeof(fields[n-1].Type()) - if last == 0 { + if last == 0 && offsets[n-1] > 0 { last = 1 } diff --git a/src/cmd/compile/internal/noder/stencil.go b/src/cmd/compile/internal/noder/stencil.go index 69461a81905..3ebc8dff6d8 100644 --- a/src/cmd/compile/internal/noder/stencil.go +++ b/src/cmd/compile/internal/noder/stencil.go @@ -18,48 +18,75 @@ import ( "strings" ) -// stencil scans functions for instantiated generic function calls and -// creates the required stencils for simple generic functions. +// For catching problems as we add more features +// TODO(danscales): remove assertions or replace with base.FatalfAt() +func assert(p bool) { + if !p { + panic("assertion failed") + } +} + +// stencil scans functions for instantiated generic function calls and creates the +// required instantiations for simple generic functions. It also creates +// instantiated methods for all fully-instantiated generic types that have been +// encountered already or new ones that are encountered during the stenciling +// process. func (g *irgen) stencil() { g.target.Stencils = make(map[*types.Sym]*ir.Func) + + // Instantiate the methods of instantiated generic types that we have seen so far. + g.instantiateMethods() + // Don't use range(g.target.Decls) - we also want to process any new instantiated // functions that are created during this loop, in order to handle generic // functions calling other generic functions. for i := 0; i < len(g.target.Decls); i++ { decl := g.target.Decls[i] - if decl.Op() != ir.ODCLFUNC || decl.Type().NumTParams() > 0 { - // Skip any non-function declarations and skip generic functions + + // Look for function instantiations in bodies of non-generic + // functions or in global assignments (ignore global type and + // constant declarations). + switch decl.Op() { + case ir.ODCLFUNC: + if decl.Type().HasTParam() { + // Skip any generic functions + continue + } + // transformCall() below depends on CurFunc being set. + ir.CurFunc = decl.(*ir.Func) + + case ir.OAS, ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV, ir.OASOP: + // These are all the various kinds of global assignments, + // whose right-hand-sides might contain a function + // instantiation. + + default: + // The other possible ops at the top level are ODCLCONST + // and ODCLTYPE, which don't have any function + // instantiations. continue } - // For each non-generic function, search for any function calls using - // generic function instantiations. (We don't yet handle generic - // function instantiations that are not immediately called.) - // Then create the needed instantiated function if it hasn't been - // created yet, and change to calling that function directly. - f := decl.(*ir.Func) + // For all non-generic code, search for any function calls using + // generic function instantiations. Then create the needed + // instantiated function if it hasn't been created yet, and change + // to calling that function directly. modified := false - ir.VisitList(f.Body, func(n ir.Node) { - if n.Op() != ir.OCALLFUNC || n.(*ir.CallExpr).X.Op() != ir.OFUNCINST { + foundFuncInst := false + ir.Visit(decl, func(n ir.Node) { + if n.Op() == ir.OFUNCINST { + // We found a function instantiation that is not + // immediately called. + foundFuncInst = true + } + if n.Op() != ir.OCALL || n.(*ir.CallExpr).X.Op() != ir.OFUNCINST { return } // We have found a function call using a generic function // instantiation. call := n.(*ir.CallExpr) inst := call.X.(*ir.InstExpr) - sym := makeInstName(inst) - //fmt.Printf("Found generic func call in %v to %v\n", f, s) - st := g.target.Stencils[sym] - if st == nil { - // If instantiation doesn't exist yet, create it and add - // to the list of decls. - st = genericSubst(sym, inst) - g.target.Stencils[sym] = st - g.target.Decls = append(g.target.Decls, st) - if base.Flag.W > 1 { - ir.Dump(fmt.Sprintf("\nstenciled %v", st), st) - } - } + st := g.getInstantiationForNode(inst) // Replace the OFUNCINST with a direct reference to the // new stenciled function call.X = st.Nname @@ -74,110 +101,223 @@ func (g *irgen) stencil() { copy(withRecv[1:], call.Args) call.Args = withRecv } + // Transform the Call now, which changes OCALL + // to OCALLFUNC and does typecheckaste/assignconvfn. + transformCall(call) modified = true }) + + // If we found an OFUNCINST without a corresponding call in the + // above decl, then traverse the nodes of decl again (with + // EditChildren rather than Visit), where we actually change the + // OFUNCINST node to an ONAME for the instantiated function. + // EditChildren is more expensive than Visit, so we only do this + // in the infrequent case of an OFUNCINSt without a corresponding + // call. + if foundFuncInst { + var edit func(ir.Node) ir.Node + edit = func(x ir.Node) ir.Node { + if x.Op() == ir.OFUNCINST { + st := g.getInstantiationForNode(x.(*ir.InstExpr)) + return st.Nname + } + ir.EditChildren(x, edit) + return x + } + edit(decl) + } if base.Flag.W > 1 && modified { ir.Dump(fmt.Sprintf("\nmodified %v", decl), decl) } + ir.CurFunc = nil + // We may have seen new fully-instantiated generic types while + // instantiating any needed functions/methods in the above + // function. If so, instantiate all the methods of those types + // (which will then lead to more function/methods to scan in the loop). + g.instantiateMethods() } } -// makeInstName makes the unique name for a stenciled generic function, based on -// the name of the function and the types of the type params. -func makeInstName(inst *ir.InstExpr) *types.Sym { - b := bytes.NewBufferString("#") +// instantiateMethods instantiates all the methods of all fully-instantiated +// generic types that have been added to g.instTypeList. +func (g *irgen) instantiateMethods() { + for i := 0; i < len(g.instTypeList); i++ { + typ := g.instTypeList[i] + // Get the base generic type by looking up the symbol of the + // generic (uninstantiated) name. + baseSym := typ.Sym().Pkg.Lookup(genericTypeName(typ.Sym())) + baseType := baseSym.Def.(*ir.Name).Type() + for j, m := range typ.Methods().Slice() { + name := m.Nname.(*ir.Name) + targs := make([]ir.Node, len(typ.RParams())) + for k, targ := range typ.RParams() { + targs[k] = ir.TypeNode(targ) + } + baseNname := baseType.Methods().Slice()[j].Nname.(*ir.Name) + name.Func = g.getInstantiation(baseNname, targs, true) + } + } + g.instTypeList = nil + +} + +// genericSym returns the name of the base generic type for the type named by +// sym. It simply returns the name obtained by removing everything after the +// first bracket ("["). +func genericTypeName(sym *types.Sym) string { + return sym.Name[0:strings.Index(sym.Name, "[")] +} + +// getInstantiationForNode returns the function/method instantiation for a +// InstExpr node inst. +func (g *irgen) getInstantiationForNode(inst *ir.InstExpr) *ir.Func { if meth, ok := inst.X.(*ir.SelectorExpr); ok { - // Write the name of the generic method, including receiver type - b.WriteString(meth.Selection.Nname.Sym().Name) + return g.getInstantiation(meth.Selection.Nname.(*ir.Name), inst.Targs, true) } else { - b.WriteString(inst.X.(*ir.Name).Name().Sym().Name) + return g.getInstantiation(inst.X.(*ir.Name), inst.Targs, false) + } +} + +// getInstantiation gets the instantiantion of the function or method nameNode +// with the type arguments targs. If the instantiated function is not already +// cached, then it calls genericSubst to create the new instantiation. +func (g *irgen) getInstantiation(nameNode *ir.Name, targs []ir.Node, isMeth bool) *ir.Func { + sym := makeInstName(nameNode.Sym(), targs, isMeth) + st := g.target.Stencils[sym] + if st == nil { + // If instantiation doesn't exist yet, create it and add + // to the list of decls. + st = g.genericSubst(sym, nameNode, targs, isMeth) + g.target.Stencils[sym] = st + g.target.Decls = append(g.target.Decls, st) + if base.Flag.W > 1 { + ir.Dump(fmt.Sprintf("\nstenciled %v", st), st) + } + } + return st +} + +// makeInstName makes the unique name for a stenciled generic function or method, +// based on the name of the function fy=nsym and the targs. It replaces any +// existing bracket type list in the name. makeInstName asserts that fnsym has +// brackets in its name if and only if hasBrackets is true. +// TODO(danscales): remove the assertions and the hasBrackets argument later. +// +// Names of declared generic functions have no brackets originally, so hasBrackets +// should be false. Names of generic methods already have brackets, since the new +// type parameter is specified in the generic type of the receiver (e.g. func +// (func (v *value[T]).set(...) { ... } has the original name (*value[T]).set. +// +// The standard naming is something like: 'genFn[int,bool]' for functions and +// '(*genType[int,bool]).methodName' for methods +func makeInstName(fnsym *types.Sym, targs []ir.Node, hasBrackets bool) *types.Sym { + b := bytes.NewBufferString("") + name := fnsym.Name + i := strings.Index(name, "[") + assert(hasBrackets == (i >= 0)) + if i >= 0 { + b.WriteString(name[0:i]) + } else { + b.WriteString(name) } b.WriteString("[") - for i, targ := range inst.Targs { + for i, targ := range targs { if i > 0 { b.WriteString(",") } b.WriteString(targ.Type().String()) } b.WriteString("]") + if i >= 0 { + i2 := strings.Index(name[i:], "]") + assert(i2 >= 0) + b.WriteString(name[i+i2+1:]) + } return typecheck.Lookup(b.String()) } // Struct containing info needed for doing the substitution as we create the // instantiation of a generic function with specified type arguments. type subster struct { - newf *ir.Func // Func node for the new stenciled function - tparams []*types.Field - targs []ir.Node + g *irgen + isMethod bool // If a method is being instantiated + newf *ir.Func // Func node for the new stenciled function + tparams []*types.Field + targs []ir.Node // The substitution map from name nodes in the generic function to the // name nodes in the new stenciled function. vars map[*ir.Name]*ir.Name - seen map[*types.Type]*types.Type } -// genericSubst returns a new function with the specified name. The function is an -// instantiation of a generic function or method with type params, as specified by -// inst. For a method with a generic receiver, it returns an instantiated function -// type where the receiver becomes the first parameter. Otherwise the instantiated -// method would still need to be transformed by later compiler phases. -func genericSubst(name *types.Sym, inst *ir.InstExpr) *ir.Func { - var nameNode *ir.Name +// genericSubst returns a new function with name newsym. The function is an +// instantiation of a generic function or method specified by namedNode with type +// args targs. For a method with a generic receiver, it returns an instantiated +// function type where the receiver becomes the first parameter. Otherwise the +// instantiated method would still need to be transformed by later compiler +// phases. +func (g *irgen) genericSubst(newsym *types.Sym, nameNode *ir.Name, targs []ir.Node, isMethod bool) *ir.Func { var tparams []*types.Field - if selExpr, ok := inst.X.(*ir.SelectorExpr); ok { + if isMethod { // Get the type params from the method receiver (after skipping // over any pointer) - nameNode = ir.AsNode(selExpr.Selection.Nname).(*ir.Name) - recvType := selExpr.Type().Recv().Type - if recvType.IsPtr() { - recvType = recvType.Elem() - } - tparams = make([]*types.Field, len(recvType.RParams)) - for i, rparam := range recvType.RParams { + recvType := nameNode.Type().Recv().Type + recvType = deref(recvType) + tparams = make([]*types.Field, len(recvType.RParams())) + for i, rparam := range recvType.RParams() { tparams[i] = types.NewField(src.NoXPos, nil, rparam) } } else { - nameNode = inst.X.(*ir.Name) tparams = nameNode.Type().TParams().Fields().Slice() } gf := nameNode.Func - newf := ir.NewFunc(inst.Pos()) - newf.Nname = ir.NewNameAt(inst.Pos(), name) + // Pos of the instantiated function is same as the generic function + newf := ir.NewFunc(gf.Pos()) + newf.Pragma = gf.Pragma // copy over pragmas from generic function to stenciled implementation. + newf.Nname = ir.NewNameAt(gf.Pos(), newsym) newf.Nname.Func = newf newf.Nname.Defn = newf - name.Def = newf.Nname + newsym.Def = newf.Nname + savef := ir.CurFunc + // transformCall/transformReturn (called during stenciling of the body) + // depend on ir.CurFunc being set. + ir.CurFunc = newf + + assert(len(tparams) == len(targs)) subst := &subster{ - newf: newf, - tparams: tparams, - targs: inst.Targs, - vars: make(map[*ir.Name]*ir.Name), - seen: make(map[*types.Type]*types.Type), + g: g, + isMethod: isMethod, + newf: newf, + tparams: tparams, + targs: targs, + vars: make(map[*ir.Name]*ir.Name), } newf.Dcl = make([]*ir.Name, len(gf.Dcl)) for i, n := range gf.Dcl { newf.Dcl[i] = subst.node(n).(*ir.Name) } - newf.Body = subst.list(gf.Body) // Ugly: we have to insert the Name nodes of the parameters/results into // the function type. The current function type has no Nname fields set, // because it came via conversion from the types2 type. - oldt := inst.X.Type() + oldt := nameNode.Type() // We also transform a generic method type to the corresponding // instantiated function type where the receiver is the first parameter. newt := types.NewSignature(oldt.Pkg(), nil, nil, subst.fields(ir.PPARAM, append(oldt.Recvs().FieldSlice(), oldt.Params().FieldSlice()...), newf.Dcl), subst.fields(ir.PPARAMOUT, oldt.Results().FieldSlice(), newf.Dcl)) - newf.Nname.Ntype = ir.TypeNode(newt) newf.Nname.SetType(newt) ir.MarkFunc(newf.Nname) newf.SetTypecheck(1) newf.Nname.SetTypecheck(1) - // TODO(danscales) - remove later, but avoid confusion for now. - newf.Pragma = ir.Noinline + + // Make sure name/type of newf is set before substituting the body. + newf.Body = subst.list(gf.Body) + ir.CurFunc = savef + return newf } @@ -198,9 +338,17 @@ func (subst *subster) node(n ir.Node) ir.Node { return v } m := ir.NewNameAt(name.Pos(), name.Sym()) + if name.IsClosureVar() { + m.SetIsClosureVar(true) + } t := x.Type() - newt := subst.typ(t) - m.SetType(newt) + if t == nil { + assert(name.BuiltinOp != 0) + } else { + newt := subst.typ(t) + m.SetType(newt) + } + m.BuiltinOp = name.BuiltinOp m.Curfn = subst.newf m.Class = name.Class m.Func = name.Func @@ -219,65 +367,180 @@ func (subst *subster) node(n ir.Node) ir.Node { // t can be nil only if this is a call that has no // return values, so allow that and otherwise give // an error. - if _, isCallExpr := m.(*ir.CallExpr); !isCallExpr { + _, isCallExpr := m.(*ir.CallExpr) + _, isStructKeyExpr := m.(*ir.StructKeyExpr) + if !isCallExpr && !isStructKeyExpr && x.Op() != ir.OPANIC && + x.Op() != ir.OCLOSE { base.Fatalf(fmt.Sprintf("Nil type for %v", x)) } - } else { + } else if x.Op() != ir.OCLOSURE { m.SetType(subst.typ(x.Type())) } } ir.EditChildren(m, edit) - if x.Op() == ir.OXDOT { - // A method value/call via a type param will have been left as an - // OXDOT. When we see this during stenciling, finish the - // typechecking, now that we have the instantiated receiver type. - // We need to do this now, since the access/selection to the - // method for the real type is very different from the selection - // for the type param. - m.SetTypecheck(0) - // m will transform to an OCALLPART - typecheck.Expr(m) - } - if x.Op() == ir.OCALL { - call := m.(*ir.CallExpr) - if call.X.Op() == ir.OTYPE { - // Do typechecking on a conversion, now that we - // know the type argument. - m.SetTypecheck(0) - m = typecheck.Expr(m) - } else if call.X.Op() == ir.OCALLPART { - // Redo the typechecking, now that we know the method - // value is being called. - call.X.(*ir.SelectorExpr).SetOp(ir.OXDOT) - call.X.SetTypecheck(0) - call.X.SetType(nil) - typecheck.Callee(call.X) - m.SetTypecheck(0) - typecheck.Call(m.(*ir.CallExpr)) + if x.Typecheck() == 3 { + // These are nodes whose transforms were delayed until + // their instantiated type was known. + m.SetTypecheck(1) + if typecheck.IsCmp(x.Op()) { + transformCompare(m.(*ir.BinaryExpr)) } else { - base.FatalfAt(call.Pos(), "Expecting OCALLPART or OTYPE with CALL") + switch x.Op() { + case ir.OSLICE, ir.OSLICE3: + transformSlice(m.(*ir.SliceExpr)) + + case ir.OADD: + m = transformAdd(m.(*ir.BinaryExpr)) + + case ir.OINDEX: + transformIndex(m.(*ir.IndexExpr)) + + case ir.OAS2: + as2 := m.(*ir.AssignListStmt) + transformAssign(as2, as2.Lhs, as2.Rhs) + + case ir.OAS: + as := m.(*ir.AssignStmt) + lhs, rhs := []ir.Node{as.X}, []ir.Node{as.Y} + transformAssign(as, lhs, rhs) + + case ir.OASOP: + as := m.(*ir.AssignOpStmt) + transformCheckAssign(as, as.X) + + case ir.ORETURN: + transformReturn(m.(*ir.ReturnStmt)) + + case ir.OSEND: + transformSend(m.(*ir.SendStmt)) + + default: + base.Fatalf("Unexpected node with Typecheck() == 3") + } } } - if x.Op() == ir.OCLOSURE { + switch x.Op() { + case ir.OLITERAL: + t := m.Type() + if t != x.Type() { + // types2 will give us a constant with a type T, + // if an untyped constant is used with another + // operand of type T (in a provably correct way). + // When we substitute in the type args during + // stenciling, we now know the real type of the + // constant. We may then need to change the + // BasicLit.val to be the correct type (e.g. + // convert an int64Val constant to a floatVal + // constant). + m.SetType(types.UntypedInt) // use any untyped type for DefaultLit to work + m = typecheck.DefaultLit(m, t) + } + + case ir.OXDOT: + // A method value/call via a type param will have been + // left as an OXDOT. When we see this during stenciling, + // finish the transformation, now that we have the + // instantiated receiver type. We need to do this now, + // since the access/selection to the method for the real + // type is very different from the selection for the type + // param. m will be transformed to an OCALLPART node. It + // will be transformed to an ODOTMETH or ODOTINTER node if + // we find in the OCALL case below that the method value + // is actually called. + transformDot(m.(*ir.SelectorExpr), false) + m.SetTypecheck(1) + + case ir.OCALL: + call := m.(*ir.CallExpr) + switch call.X.Op() { + case ir.OTYPE: + // Transform the conversion, now that we know the + // type argument. + m = transformConvCall(m.(*ir.CallExpr)) + + case ir.OCALLPART: + // Redo the transformation of OXDOT, now that we + // know the method value is being called. Then + // transform the call. + call.X.(*ir.SelectorExpr).SetOp(ir.OXDOT) + transformDot(call.X.(*ir.SelectorExpr), true) + transformCall(call) + + case ir.ODOT, ir.ODOTPTR: + // An OXDOT for a generic receiver was resolved to + // an access to a field which has a function + // value. Transform the call to that function, now + // that the OXDOT was resolved. + transformCall(call) + + case ir.ONAME: + name := call.X.Name() + if name.BuiltinOp != ir.OXXX { + switch name.BuiltinOp { + case ir.OMAKE, ir.OREAL, ir.OIMAG, ir.OLEN, ir.OCAP, ir.OAPPEND: + // Transform these builtins now that we + // know the type of the args. + m = transformBuiltin(call) + default: + base.FatalfAt(call.Pos(), "Unexpected builtin op") + } + } else { + // This is the case of a function value that was a + // type parameter (implied to be a function via a + // structural constraint) which is now resolved. + transformCall(call) + } + + case ir.OCLOSURE: + transformCall(call) + + case ir.OFUNCINST: + // A call with an OFUNCINST will get transformed + // in stencil() once we have created & attached the + // instantiation to be called. + + default: + base.FatalfAt(call.Pos(), fmt.Sprintf("Unexpected op with CALL during stenciling: %v", call.X.Op())) + } + + case ir.OCLOSURE: x := x.(*ir.ClosureExpr) - // Need to save/duplicate x.Func.Nname, - // x.Func.Nname.Ntype, x.Func.Dcl, x.Func.ClosureVars, and + // Need to duplicate x.Func.Nname, x.Func.Dcl, x.Func.ClosureVars, and // x.Func.Body. oldfn := x.Func newfn := ir.NewFunc(oldfn.Pos()) if oldfn.ClosureCalled() { newfn.SetClosureCalled(true) } + newfn.SetIsHiddenClosure(true) m.(*ir.ClosureExpr).Func = newfn - newfn.Nname = ir.NewNameAt(oldfn.Nname.Pos(), oldfn.Nname.Sym()) - newfn.Nname.SetType(oldfn.Nname.Type()) - newfn.Nname.Ntype = subst.node(oldfn.Nname.Ntype).(ir.Ntype) + // Closure name can already have brackets, if it derives + // from a generic method + newsym := makeInstName(oldfn.Nname.Sym(), subst.targs, subst.isMethod) + newfn.Nname = ir.NewNameAt(oldfn.Nname.Pos(), newsym) + newfn.Nname.Func = newfn + newfn.Nname.Defn = newfn + ir.MarkFunc(newfn.Nname) + newfn.OClosure = m.(*ir.ClosureExpr) + + saveNewf := subst.newf + ir.CurFunc = newfn + subst.newf = newfn + newfn.Dcl = subst.namelist(oldfn.Dcl) + newfn.ClosureVars = subst.namelist(oldfn.ClosureVars) + + typed(subst.typ(oldfn.Nname.Type()), newfn.Nname) + typed(newfn.Nname.Type(), m) + newfn.SetTypecheck(1) + + // Make sure type of closure function is set before doing body. newfn.Body = subst.list(oldfn.Body) - // Make shallow copy of the Dcl and ClosureVar slices - newfn.Dcl = append([]*ir.Name(nil), oldfn.Dcl...) - newfn.ClosureVars = append([]*ir.Name(nil), oldfn.ClosureVars...) + subst.newf = saveNewf + ir.CurFunc = saveNewf + + subst.g.target.Decls = append(subst.g.target.Decls, newfn) } return m } @@ -285,6 +548,20 @@ func (subst *subster) node(n ir.Node) ir.Node { return edit(n) } +func (subst *subster) namelist(l []*ir.Name) []*ir.Name { + s := make([]*ir.Name, len(l)) + for i, n := range l { + s[i] = subst.node(n).(*ir.Name) + if n.Defn != nil { + s[i].Defn = subst.node(n.Defn) + } + if n.Outer != nil { + s[i].Outer = subst.node(n.Outer).(*ir.Name) + } + } + return s +} + func (subst *subster) list(l []ir.Node) []ir.Node { s := make([]ir.Node, len(l)) for i, n := range l { @@ -293,22 +570,50 @@ func (subst *subster) list(l []ir.Node) []ir.Node { return s } -// tstruct substitutes type params in a structure type -func (subst *subster) tstruct(t *types.Type) *types.Type { +// tstruct substitutes type params in types of the fields of a structure type. For +// each field, if Nname is set, tstruct also translates the Nname using +// subst.vars, if Nname is in subst.vars. To always force the creation of a new +// (top-level) struct, regardless of whether anything changed with the types or +// names of the struct's fields, set force to true. +func (subst *subster) tstruct(t *types.Type, force bool) *types.Type { if t.NumFields() == 0 { + if t.HasTParam() { + // For an empty struct, we need to return a new type, + // since it may now be fully instantiated (HasTParam + // becomes false). + return types.NewStruct(t.Pkg(), nil) + } return t } var newfields []*types.Field + if force { + newfields = make([]*types.Field, t.NumFields()) + } for i, f := range t.Fields().Slice() { t2 := subst.typ(f.Type) - if t2 != f.Type && newfields == nil { + if (t2 != f.Type || f.Nname != nil) && newfields == nil { newfields = make([]*types.Field, t.NumFields()) for j := 0; j < i; j++ { newfields[j] = t.Field(j) } } if newfields != nil { + // TODO(danscales): make sure this works for the field + // names of embedded types (which should keep the name of + // the type param, not the instantiated type). newfields[i] = types.NewField(f.Pos, f.Sym, t2) + if f.Nname != nil { + // f.Nname may not be in subst.vars[] if this is + // a function name or a function instantiation type + // that we are translating + v := subst.vars[f.Nname.(*ir.Name)] + // Be careful not to put a nil var into Nname, + // since Nname is an interface, so it would be a + // non-nil interface. + if v != nil { + newfields[i].Nname = v + } + } } } if newfields != nil { @@ -318,15 +623,40 @@ func (subst *subster) tstruct(t *types.Type) *types.Type { } -// instTypeName creates a name for an instantiated type, based on the type args -func instTypeName(name string, targs []ir.Node) string { +// tinter substitutes type params in types of the methods of an interface type. +func (subst *subster) tinter(t *types.Type) *types.Type { + if t.Methods().Len() == 0 { + return t + } + var newfields []*types.Field + for i, f := range t.Methods().Slice() { + t2 := subst.typ(f.Type) + if (t2 != f.Type || f.Nname != nil) && newfields == nil { + newfields = make([]*types.Field, t.Methods().Len()) + for j := 0; j < i; j++ { + newfields[j] = t.Methods().Index(j) + } + } + if newfields != nil { + newfields[i] = types.NewField(f.Pos, f.Sym, t2) + } + } + if newfields != nil { + return types.NewInterface(t.Pkg(), newfields) + } + return t +} + +// instTypeName creates a name for an instantiated type, based on the name of the +// generic type and the type args +func instTypeName(name string, targs []*types.Type) string { b := bytes.NewBufferString(name) b.WriteByte('[') for i, targ := range targs { if i > 0 { b.WriteByte(',') } - b.WriteString(targ.Type().String()) + b.WriteString(targ.String()) } b.WriteByte(']') return b.String() @@ -334,27 +664,85 @@ func instTypeName(name string, targs []ir.Node) string { // typ computes the type obtained by substituting any type parameter in t with the // corresponding type argument in subst. If t contains no type parameters, the -// result is t; otherwise the result is a new type. -// It deals with recursive types by using a map and TFORW types. -// TODO(danscales) deal with recursion besides ptr/struct cases. +// result is t; otherwise the result is a new type. It deals with recursive types +// by using TFORW types and finding partially or fully created types via sym.Def. func (subst *subster) typ(t *types.Type) *types.Type { - if !t.HasTParam() { + if !t.HasTParam() && t.Kind() != types.TFUNC { + // Note: function types need to be copied regardless, as the + // types of closures may contain declarations that need + // to be copied. See #45738. return t } - if subst.seen[t] != nil { - // We've hit a recursive type - return subst.seen[t] - } - var newt *types.Type - switch t.Kind() { - case types.TTYPEPARAM: + if t.Kind() == types.TTYPEPARAM { for i, tp := range subst.tparams { if tp.Type == t { return subst.targs[i].Type() } } - return t + // If t is a simple typeparam T, then t has the name/symbol 'T' + // and t.Underlying() == t. + // + // However, consider the type definition: 'type P[T any] T'. We + // might use this definition so we can have a variant of type T + // that we can add new methods to. Suppose t is a reference to + // P[T]. t has the name 'P[T]', but its kind is TTYPEPARAM, + // because P[T] is defined as T. If we look at t.Underlying(), it + // is different, because the name of t.Underlying() is 'T' rather + // than 'P[T]'. But the kind of t.Underlying() is also TTYPEPARAM. + // In this case, we do the needed recursive substitution in the + // case statement below. + if t.Underlying() == t { + // t is a simple typeparam that didn't match anything in tparam + return t + } + // t is a more complex typeparam (e.g. P[T], as above, whose + // definition is just T). + assert(t.Sym() != nil) + } + + var newsym *types.Sym + var neededTargs []*types.Type + var forw *types.Type + + if t.Sym() != nil { + // Translate the type params for this type according to + // the tparam/targs mapping from subst. + neededTargs = make([]*types.Type, len(t.RParams())) + for i, rparam := range t.RParams() { + neededTargs[i] = subst.typ(rparam) + } + // For a named (defined) type, we have to change the name of the + // type as well. We do this first, so we can look up if we've + // already seen this type during this substitution or other + // definitions/substitutions. + genName := genericTypeName(t.Sym()) + newsym = t.Sym().Pkg.Lookup(instTypeName(genName, neededTargs)) + if newsym.Def != nil { + // We've already created this instantiated defined type. + return newsym.Def.Type() + } + + // In order to deal with recursive generic types, create a TFORW + // type initially and set the Def field of its sym, so it can be + // found if this type appears recursively within the type. + forw = newIncompleteNamedType(t.Pos(), newsym) + //println("Creating new type by sub", newsym.Name, forw.HasTParam()) + forw.SetRParams(neededTargs) + } + + var newt *types.Type + + switch t.Kind() { + case types.TTYPEPARAM: + if t.Sym() == newsym { + // The substitution did not change the type. + return t + } + // Substitute the underlying typeparam (e.g. T in P[T], see + // the example describing type P[T] above). + newt = subst.typ(t.Underlying()) + assert(newt != t) case types.TARRAY: elem := t.Elem() @@ -365,17 +753,10 @@ func (subst *subster) typ(t *types.Type) *types.Type { case types.TPTR: elem := t.Elem() - // In order to deal with recursive generic types, create a TFORW - // type initially and store it in the seen map, so it can be - // accessed if this type appears recursively within the type. - forw := types.New(types.TFORW) - subst.seen[t] = forw newelem := subst.typ(elem) if newelem != elem { - forw.SetUnderlying(types.NewPtr(newelem)) - newt = forw + newt = types.NewPtr(newelem) } - delete(subst.seen, t) case types.TSLICE: elem := t.Elem() @@ -385,62 +766,113 @@ func (subst *subster) typ(t *types.Type) *types.Type { } case types.TSTRUCT: - forw := types.New(types.TFORW) - subst.seen[t] = forw - newt = subst.tstruct(t) - if newt != t { - forw.SetUnderlying(newt) - newt = forw + newt = subst.tstruct(t, false) + if newt == t { + newt = nil } - delete(subst.seen, t) case types.TFUNC: - newrecvs := subst.tstruct(t.Recvs()) - newparams := subst.tstruct(t.Params()) - newresults := subst.tstruct(t.Results()) + newrecvs := subst.tstruct(t.Recvs(), false) + newparams := subst.tstruct(t.Params(), false) + newresults := subst.tstruct(t.Results(), false) if newrecvs != t.Recvs() || newparams != t.Params() || newresults != t.Results() { + // If any types have changed, then the all the fields of + // of recv, params, and results must be copied, because they have + // offset fields that are dependent, and so must have an + // independent copy for each new signature. var newrecv *types.Field if newrecvs.NumFields() > 0 { + if newrecvs == t.Recvs() { + newrecvs = subst.tstruct(t.Recvs(), true) + } newrecv = newrecvs.Field(0) } - newt = types.NewSignature(t.Pkg(), newrecv, nil, newparams.FieldSlice(), newresults.FieldSlice()) + if newparams == t.Params() { + newparams = subst.tstruct(t.Params(), true) + } + if newresults == t.Results() { + newresults = subst.tstruct(t.Results(), true) + } + newt = types.NewSignature(t.Pkg(), newrecv, t.TParams().FieldSlice(), newparams.FieldSlice(), newresults.FieldSlice()) } - // TODO: case TCHAN - // TODO: case TMAP - // TODO: case TINTER - } - if newt != nil { - if t.Sym() != nil { - // Since we've substituted types, we also need to change - // the defined name of the type, by removing the old types - // (in brackets) from the name, and adding the new types. - oldname := t.Sym().Name - i := strings.Index(oldname, "[") - oldname = oldname[:i] - sym := t.Sym().Pkg.Lookup(instTypeName(oldname, subst.targs)) - if sym.Def != nil { - // We've already created this instantiated defined type. - return sym.Def.Type() - } - newt.SetSym(sym) - sym.Def = ir.TypeNode(newt) + case types.TINTER: + newt = subst.tinter(t) + if newt == t { + newt = nil } + + case types.TMAP: + newkey := subst.typ(t.Key()) + newval := subst.typ(t.Elem()) + if newkey != t.Key() || newval != t.Elem() { + newt = types.NewMap(newkey, newval) + } + + case types.TCHAN: + elem := t.Elem() + newelem := subst.typ(elem) + if newelem != elem { + newt = types.NewChan(newelem, t.ChanDir()) + if !newt.HasTParam() { + // TODO(danscales): not sure why I have to do this + // only for channels..... + types.CheckSize(newt) + } + } + } + if newt == nil { + // Even though there were typeparams in the type, there may be no + // change if this is a function type for a function call (which will + // have its own tparams/targs in the function instantiation). + return t + } + + if t.Sym() == nil { + // Not a named type, so there was no forwarding type and there are + // no methods to substitute. + assert(t.Methods().Len() == 0) return newt } - return t + forw.SetUnderlying(newt) + newt = forw + + if t.Kind() != types.TINTER && t.Methods().Len() > 0 { + // Fill in the method info for the new type. + var newfields []*types.Field + newfields = make([]*types.Field, t.Methods().Len()) + for i, f := range t.Methods().Slice() { + t2 := subst.typ(f.Type) + oldsym := f.Nname.Sym() + newsym := makeInstName(oldsym, subst.targs, true) + var nname *ir.Name + if newsym.Def != nil { + nname = newsym.Def.(*ir.Name) + } else { + nname = ir.NewNameAt(f.Pos, newsym) + nname.SetType(t2) + newsym.Def = nname + } + newfields[i] = types.NewField(f.Pos, f.Sym, t2) + newfields[i].Nname = nname + } + newt.Methods().Set(newfields) + if !newt.HasTParam() { + // Generate all the methods for a new fully-instantiated type. + subst.g.instTypeList = append(subst.g.instTypeList, newt) + } + } + return newt } // fields sets the Nname field for the Field nodes inside a type signature, based // on the corresponding in/out parameters in dcl. It depends on the in and out // parameters being in order in dcl. func (subst *subster) fields(class ir.Class, oldfields []*types.Field, dcl []*ir.Name) []*types.Field { - newfields := make([]*types.Field, len(oldfields)) - var i int - // Find the starting index in dcl of declarations of the class (either // PPARAM or PPARAMOUT). + var i int for i = range dcl { if dcl[i].Class == class { break @@ -450,11 +882,36 @@ func (subst *subster) fields(class ir.Class, oldfields []*types.Field, dcl []*ir // Create newfields nodes that are copies of the oldfields nodes, but // with substitution for any type params, and with Nname set to be the node in // Dcl for the corresponding PPARAM or PPARAMOUT. + newfields := make([]*types.Field, len(oldfields)) for j := range oldfields { newfields[j] = oldfields[j].Copy() newfields[j].Type = subst.typ(oldfields[j].Type) - newfields[j].Nname = dcl[i] - i++ + // A param field will be missing from dcl if its name is + // unspecified or specified as "_". So, we compare the dcl sym + // with the field sym. If they don't match, this dcl (if there is + // one left) must apply to a later field. + if i < len(dcl) && dcl[i].Sym() == oldfields[j].Sym { + newfields[j].Nname = dcl[i] + i++ + } } return newfields } + +// defer does a single defer of type t, if it is a pointer type. +func deref(t *types.Type) *types.Type { + if t.IsPtr() { + return t.Elem() + } + return t +} + +// newIncompleteNamedType returns a TFORW type t with name specified by sym, such +// that t.nod and sym.Def are set correctly. +func newIncompleteNamedType(pos src.XPos, sym *types.Sym) *types.Type { + name := ir.NewDeclNameAt(pos, ir.OTYPE, sym) + forw := types.NewNamed(name) + name.SetType(forw) + sym.Def = name + return forw +} diff --git a/src/cmd/compile/internal/noder/stmt.go b/src/cmd/compile/internal/noder/stmt.go index 1775116f41c..32a1483b4aa 100644 --- a/src/cmd/compile/internal/noder/stmt.go +++ b/src/cmd/compile/internal/noder/stmt.go @@ -27,11 +27,6 @@ func (g *irgen) stmts(stmts []syntax.Stmt) []ir.Node { } func (g *irgen) stmt(stmt syntax.Stmt) ir.Node { - // TODO(mdempsky): Remove dependency on typecheck. - return typecheck.Stmt(g.stmt0(stmt)) -} - -func (g *irgen) stmt0(stmt syntax.Stmt) ir.Node { switch stmt := stmt.(type) { case nil, *syntax.EmptyStmt: return nil @@ -46,30 +41,81 @@ func (g *irgen) stmt0(stmt syntax.Stmt) ir.Node { } return x case *syntax.SendStmt: - return ir.NewSendStmt(g.pos(stmt), g.expr(stmt.Chan), g.expr(stmt.Value)) + n := ir.NewSendStmt(g.pos(stmt), g.expr(stmt.Chan), g.expr(stmt.Value)) + if n.Chan.Type().HasTParam() || n.Value.Type().HasTParam() { + // Delay transforming the send if the channel or value + // have a type param. + n.SetTypecheck(3) + return n + } + transformSend(n) + n.SetTypecheck(1) + return n case *syntax.DeclStmt: return ir.NewBlockStmt(g.pos(stmt), g.decls(stmt.DeclList)) case *syntax.AssignStmt: if stmt.Op != 0 && stmt.Op != syntax.Def { op := g.op(stmt.Op, binOps[:]) + var n *ir.AssignOpStmt if stmt.Rhs == nil { - return IncDec(g.pos(stmt), op, g.expr(stmt.Lhs)) + n = IncDec(g.pos(stmt), op, g.expr(stmt.Lhs)) + } else { + n = ir.NewAssignOpStmt(g.pos(stmt), op, g.expr(stmt.Lhs), g.expr(stmt.Rhs)) } - return ir.NewAssignOpStmt(g.pos(stmt), op, g.expr(stmt.Lhs), g.expr(stmt.Rhs)) + if n.X.Typecheck() == 3 { + n.SetTypecheck(3) + return n + } + transformAsOp(n) + n.SetTypecheck(1) + return n } names, lhs := g.assignList(stmt.Lhs, stmt.Op == syntax.Def) rhs := g.exprList(stmt.Rhs) + // We must delay transforming the assign statement if any of the + // lhs or rhs nodes are also delayed, since transformAssign needs + // to know the types of the left and right sides in various cases. + delay := false + for _, e := range lhs { + if e.Typecheck() == 3 { + delay = true + break + } + } + for _, e := range rhs { + if e.Typecheck() == 3 { + delay = true + break + } + } + if len(lhs) == 1 && len(rhs) == 1 { n := ir.NewAssignStmt(g.pos(stmt), lhs[0], rhs[0]) n.Def = initDefn(n, names) + + if delay { + n.SetTypecheck(3) + return n + } + + lhs, rhs := []ir.Node{n.X}, []ir.Node{n.Y} + transformAssign(n, lhs, rhs) + n.X, n.Y = lhs[0], rhs[0] + n.SetTypecheck(1) return n } n := ir.NewAssignListStmt(g.pos(stmt), ir.OAS2, lhs, rhs) n.Def = initDefn(n, names) + if delay { + n.SetTypecheck(3) + return n + } + transformAssign(n, n.Lhs, n.Rhs) + n.SetTypecheck(1) return n case *syntax.BranchStmt: @@ -77,13 +123,27 @@ func (g *irgen) stmt0(stmt syntax.Stmt) ir.Node { case *syntax.CallStmt: return ir.NewGoDeferStmt(g.pos(stmt), g.tokOp(int(stmt.Tok), callOps[:]), g.expr(stmt.Call)) case *syntax.ReturnStmt: - return ir.NewReturnStmt(g.pos(stmt), g.exprList(stmt.Results)) + n := ir.NewReturnStmt(g.pos(stmt), g.exprList(stmt.Results)) + for _, e := range n.Results { + if e.Type().HasTParam() { + // Delay transforming the return statement if any of the + // return values have a type param. + n.SetTypecheck(3) + return n + } + } + transformReturn(n) + n.SetTypecheck(1) + return n case *syntax.IfStmt: return g.ifStmt(stmt) case *syntax.ForStmt: return g.forStmt(stmt) case *syntax.SelectStmt: - return g.selectStmt(stmt) + n := g.selectStmt(stmt) + transformSelect(n.(*ir.SelectStmt)) + n.SetTypecheck(1) + return n case *syntax.SwitchStmt: return g.switchStmt(stmt) diff --git a/src/cmd/compile/internal/noder/transform.go b/src/cmd/compile/internal/noder/transform.go new file mode 100644 index 00000000000..2859089e69b --- /dev/null +++ b/src/cmd/compile/internal/noder/transform.go @@ -0,0 +1,961 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains transformation functions on nodes, which are the +// transformations that the typecheck package does that are distinct from the +// typechecking functionality. These transform functions are pared-down copies of +// the original typechecking functions, with all code removed that is related to: +// +// - Detecting compile-time errors (already done by types2) +// - Setting the actual type of existing nodes (already done based on +// type info from types2) +// - Dealing with untyped constants (which types2 has already resolved) +// +// Each of the transformation functions requires that node passed in has its type +// and typecheck flag set. If the transformation function replaces or adds new +// nodes, it will set the type and typecheck flag for those new nodes. + +package noder + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "fmt" + "go/constant" +) + +// Transformation functions for expressions + +// transformAdd transforms an addition operation (currently just addition of +// strings). Corresponds to the "binary operators" case in typecheck.typecheck1. +func transformAdd(n *ir.BinaryExpr) ir.Node { + assert(n.Type() != nil && n.Typecheck() == 1) + l := n.X + if l.Type().IsString() { + var add *ir.AddStringExpr + if l.Op() == ir.OADDSTR { + add = l.(*ir.AddStringExpr) + add.SetPos(n.Pos()) + } else { + add = ir.NewAddStringExpr(n.Pos(), []ir.Node{l}) + } + r := n.Y + if r.Op() == ir.OADDSTR { + r := r.(*ir.AddStringExpr) + add.List.Append(r.List.Take()...) + } else { + add.List.Append(r) + } + typed(l.Type(), add) + return add + } + return n +} + +// Corresponds to typecheck.stringtoruneslit. +func stringtoruneslit(n *ir.ConvExpr) ir.Node { + if n.X.Op() != ir.OLITERAL || n.X.Val().Kind() != constant.String { + base.Fatalf("stringtoarraylit %v", n) + } + + var list []ir.Node + i := 0 + eltType := n.Type().Elem() + for _, r := range ir.StringVal(n.X) { + elt := ir.NewKeyExpr(base.Pos, ir.NewInt(int64(i)), ir.NewInt(int64(r))) + // Change from untyped int to the actual element type determined + // by types2. No need to change elt.Key, since the array indexes + // are just used for setting up the element ordering. + elt.Value.SetType(eltType) + list = append(list, elt) + i++ + } + + nn := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(n.Type()), nil) + nn.List = list + typed(n.Type(), nn) + // Need to transform the OCOMPLIT. + return transformCompLit(nn) +} + +// transformConv transforms an OCONV node as needed, based on the types involved, +// etc. Corresponds to typecheck.tcConv. +func transformConv(n *ir.ConvExpr) ir.Node { + t := n.X.Type() + op, _ := typecheck.Convertop(n.X.Op() == ir.OLITERAL, t, n.Type()) + n.SetOp(op) + switch n.Op() { + case ir.OCONVNOP: + if t.Kind() == n.Type().Kind() { + switch t.Kind() { + case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128: + // Floating point casts imply rounding and + // so the conversion must be kept. + n.SetOp(ir.OCONV) + } + } + + // Do not convert to []byte literal. See CL 125796. + // Generated code and compiler memory footprint is better without it. + case ir.OSTR2BYTES: + // ok + + case ir.OSTR2RUNES: + if n.X.Op() == ir.OLITERAL { + return stringtoruneslit(n) + } + } + return n +} + +// transformConvCall transforms a conversion call. Corresponds to the OTYPE part of +// typecheck.tcCall. +func transformConvCall(n *ir.CallExpr) ir.Node { + assert(n.Type() != nil && n.Typecheck() == 1) + arg := n.Args[0] + n1 := ir.NewConvExpr(n.Pos(), ir.OCONV, nil, arg) + typed(n.X.Type(), n1) + return transformConv(n1) +} + +// transformCall transforms a normal function/method call. Corresponds to last half +// (non-conversion, non-builtin part) of typecheck.tcCall. +func transformCall(n *ir.CallExpr) { + // n.Type() can be nil for calls with no return value + assert(n.Typecheck() == 1) + transformArgs(n) + l := n.X + t := l.Type() + + switch l.Op() { + case ir.ODOTINTER: + n.SetOp(ir.OCALLINTER) + + case ir.ODOTMETH: + l := l.(*ir.SelectorExpr) + n.SetOp(ir.OCALLMETH) + + tp := t.Recv().Type + + if l.X == nil || !types.Identical(l.X.Type(), tp) { + base.Fatalf("method receiver") + } + + default: + n.SetOp(ir.OCALLFUNC) + } + + typecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args) + if t.NumResults() == 1 { + n.SetType(l.Type().Results().Field(0).Type) + + if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME { + if sym := n.X.(*ir.Name).Sym(); types.IsRuntimePkg(sym.Pkg) && sym.Name == "getg" { + // Emit code for runtime.getg() directly instead of calling function. + // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk, + // so that the ordering pass can make sure to preserve the semantics of the original code + // (in particular, the exact time of the function call) by introducing temporaries. + // In this case, we know getg() always returns the same result within a given function + // and we want to avoid the temporaries, so we do the rewrite earlier than is typical. + n.SetOp(ir.OGETG) + } + } + return + } +} + +// transformCompare transforms a compare operation (currently just equals/not +// equals). Corresponds to the "comparison operators" case in +// typecheck.typecheck1, including tcArith. +func transformCompare(n *ir.BinaryExpr) { + assert(n.Type() != nil && n.Typecheck() == 1) + if (n.Op() == ir.OEQ || n.Op() == ir.ONE) && !types.Identical(n.X.Type(), n.Y.Type()) { + // Comparison is okay as long as one side is assignable to the + // other. The only allowed case where the conversion is not CONVNOP is + // "concrete == interface". In that case, check comparability of + // the concrete type. The conversion allocates, so only do it if + // the concrete type is huge. + l, r := n.X, n.Y + lt, rt := l.Type(), r.Type() + converted := false + if rt.Kind() != types.TBLANK { + aop, _ := typecheck.Assignop(lt, rt) + if aop != ir.OXXX { + types.CalcSize(lt) + if rt.IsInterface() == lt.IsInterface() || lt.Width >= 1<<16 { + l = ir.NewConvExpr(base.Pos, aop, rt, l) + l.SetTypecheck(1) + } + + converted = true + } + } + + if !converted && lt.Kind() != types.TBLANK { + aop, _ := typecheck.Assignop(rt, lt) + if aop != ir.OXXX { + types.CalcSize(rt) + if rt.IsInterface() == lt.IsInterface() || rt.Width >= 1<<16 { + r = ir.NewConvExpr(base.Pos, aop, lt, r) + r.SetTypecheck(1) + } + } + } + n.X, n.Y = l, r + } +} + +// Corresponds to typecheck.implicitstar. +func implicitstar(n ir.Node) ir.Node { + // insert implicit * if needed for fixed array + t := n.Type() + if !t.IsPtr() { + return n + } + t = t.Elem() + if !t.IsArray() { + return n + } + star := ir.NewStarExpr(base.Pos, n) + star.SetImplicit(true) + return typed(t, star) +} + +// transformIndex transforms an index operation. Corresponds to typecheck.tcIndex. +func transformIndex(n *ir.IndexExpr) { + assert(n.Type() != nil && n.Typecheck() == 1) + n.X = implicitstar(n.X) + l := n.X + t := l.Type() + if t.Kind() == types.TMAP { + n.Index = assignconvfn(n.Index, t.Key()) + n.SetOp(ir.OINDEXMAP) + // Set type to just the map value, not (value, bool). This is + // different from types2, but fits the later stages of the + // compiler better. + n.SetType(t.Elem()) + n.Assigned = false + } +} + +// transformSlice transforms a slice operation. Corresponds to typecheck.tcSlice. +func transformSlice(n *ir.SliceExpr) { + assert(n.Type() != nil && n.Typecheck() == 1) + l := n.X + if l.Type().IsArray() { + addr := typecheck.NodAddr(n.X) + addr.SetImplicit(true) + typed(types.NewPtr(n.X.Type()), addr) + n.X = addr + l = addr + } + t := l.Type() + if t.IsString() { + n.SetOp(ir.OSLICESTR) + } else if t.IsPtr() && t.Elem().IsArray() { + if n.Op().IsSlice3() { + n.SetOp(ir.OSLICE3ARR) + } else { + n.SetOp(ir.OSLICEARR) + } + } +} + +// Transformation functions for statements + +// Corresponds to typecheck.checkassign. +func transformCheckAssign(stmt ir.Node, n ir.Node) { + if n.Op() == ir.OINDEXMAP { + n := n.(*ir.IndexExpr) + n.Assigned = true + return + } +} + +// Corresponds to typecheck.assign. +func transformAssign(stmt ir.Node, lhs, rhs []ir.Node) { + checkLHS := func(i int, typ *types.Type) { + transformCheckAssign(stmt, lhs[i]) + } + + cr := len(rhs) + if len(rhs) == 1 { + if rtyp := rhs[0].Type(); rtyp != nil && rtyp.IsFuncArgStruct() { + cr = rtyp.NumFields() + } + } + + // x, ok = y +assignOK: + for len(lhs) == 2 && cr == 1 { + stmt := stmt.(*ir.AssignListStmt) + r := rhs[0] + + switch r.Op() { + case ir.OINDEXMAP: + stmt.SetOp(ir.OAS2MAPR) + case ir.ORECV: + stmt.SetOp(ir.OAS2RECV) + case ir.ODOTTYPE: + r := r.(*ir.TypeAssertExpr) + stmt.SetOp(ir.OAS2DOTTYPE) + r.SetOp(ir.ODOTTYPE2) + default: + break assignOK + } + checkLHS(0, r.Type()) + checkLHS(1, types.UntypedBool) + return + } + + if len(lhs) != cr { + for i := range lhs { + checkLHS(i, nil) + } + return + } + + // x,y,z = f() + if cr > len(rhs) { + stmt := stmt.(*ir.AssignListStmt) + stmt.SetOp(ir.OAS2FUNC) + r := rhs[0].(*ir.CallExpr) + r.Use = ir.CallUseList + rtyp := r.Type() + + for i := range lhs { + checkLHS(i, rtyp.Field(i).Type) + } + return + } + + for i, r := range rhs { + checkLHS(i, r.Type()) + if lhs[i].Type() != nil { + rhs[i] = assignconvfn(r, lhs[i].Type()) + } + } +} + +// Corresponds to typecheck.typecheckargs. +func transformArgs(n ir.InitNode) { + var list []ir.Node + switch n := n.(type) { + default: + base.Fatalf("typecheckargs %+v", n.Op()) + case *ir.CallExpr: + list = n.Args + if n.IsDDD { + return + } + case *ir.ReturnStmt: + list = n.Results + } + if len(list) != 1 { + return + } + + t := list[0].Type() + if t == nil || !t.IsFuncArgStruct() { + return + } + + // Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...). + + // Save n as n.Orig for fmt.go. + if ir.Orig(n) == n { + n.(ir.OrigNode).SetOrig(ir.SepCopy(n)) + } + + as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) + as.Rhs.Append(list...) + + // If we're outside of function context, then this call will + // be executed during the generated init function. However, + // init.go hasn't yet created it. Instead, associate the + // temporary variables with InitTodoFunc for now, and init.go + // will reassociate them later when it's appropriate. + static := ir.CurFunc == nil + if static { + ir.CurFunc = typecheck.InitTodoFunc + } + list = nil + for _, f := range t.FieldSlice() { + t := typecheck.Temp(f.Type) + as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, t)) + as.Lhs.Append(t) + list = append(list, t) + } + if static { + ir.CurFunc = nil + } + + switch n := n.(type) { + case *ir.CallExpr: + n.Args = list + case *ir.ReturnStmt: + n.Results = list + } + + transformAssign(as, as.Lhs, as.Rhs) + as.SetTypecheck(1) + n.PtrInit().Append(as) +} + +// assignconvfn converts node n for assignment to type t. Corresponds to +// typecheck.assignconvfn. +func assignconvfn(n ir.Node, t *types.Type) ir.Node { + if t.Kind() == types.TBLANK { + return n + } + + if types.Identical(n.Type(), t) { + return n + } + + op, _ := typecheck.Assignop(n.Type(), t) + + r := ir.NewConvExpr(base.Pos, op, t, n) + r.SetTypecheck(1) + r.SetImplicit(true) + return r +} + +// Corresponds to typecheck.typecheckaste. +func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes) { + var t *types.Type + var i int + + lno := base.Pos + defer func() { base.Pos = lno }() + + var n ir.Node + if len(nl) == 1 { + n = nl[0] + } + + i = 0 + for _, tl := range tstruct.Fields().Slice() { + t = tl.Type + if tl.IsDDD() { + if isddd { + n = nl[i] + ir.SetPos(n) + if n.Type() != nil { + nl[i] = assignconvfn(n, t) + } + return + } + + // TODO(mdempsky): Make into ... call with implicit slice. + for ; i < len(nl); i++ { + n = nl[i] + ir.SetPos(n) + if n.Type() != nil { + nl[i] = assignconvfn(n, t.Elem()) + } + } + return + } + + n = nl[i] + ir.SetPos(n) + if n.Type() != nil { + nl[i] = assignconvfn(n, t) + } + i++ + } +} + +// transformSend transforms a send statement, converting the value to appropriate +// type for the channel, as needed. Corresponds of typecheck.tcSend. +func transformSend(n *ir.SendStmt) { + n.Value = assignconvfn(n.Value, n.Chan.Type().Elem()) +} + +// transformReturn transforms a return node, by doing the needed assignments and +// any necessary conversions. Corresponds to typecheck.tcReturn() +func transformReturn(rs *ir.ReturnStmt) { + transformArgs(rs) + nl := rs.Results + if ir.HasNamedResults(ir.CurFunc) && len(nl) == 0 { + return + } + + typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), nl) +} + +// transformSelect transforms a select node, creating an assignment list as needed +// for each case. Corresponds to typecheck.tcSelect(). +func transformSelect(sel *ir.SelectStmt) { + for _, ncase := range sel.Cases { + if ncase.Comm != nil { + n := ncase.Comm + oselrecv2 := func(dst, recv ir.Node, def bool) { + n := ir.NewAssignListStmt(n.Pos(), ir.OSELRECV2, []ir.Node{dst, ir.BlankNode}, []ir.Node{recv}) + n.Def = def + n.SetTypecheck(1) + ncase.Comm = n + } + switch n.Op() { + case ir.OAS: + // convert x = <-c into x, _ = <-c + // remove implicit conversions; the eventual assignment + // will reintroduce them. + n := n.(*ir.AssignStmt) + if r := n.Y; r.Op() == ir.OCONVNOP || r.Op() == ir.OCONVIFACE { + r := r.(*ir.ConvExpr) + if r.Implicit() { + n.Y = r.X + } + } + oselrecv2(n.X, n.Y, n.Def) + + case ir.OAS2RECV: + n := n.(*ir.AssignListStmt) + n.SetOp(ir.OSELRECV2) + + case ir.ORECV: + // convert <-c into _, _ = <-c + n := n.(*ir.UnaryExpr) + oselrecv2(ir.BlankNode, n, false) + + case ir.OSEND: + break + } + } + } +} + +// transformAsOp transforms an AssignOp statement. Corresponds to OASOP case in +// typecheck1. +func transformAsOp(n *ir.AssignOpStmt) { + transformCheckAssign(n, n.X) +} + +// transformDot transforms an OXDOT (or ODOT) or ODOT, ODOTPTR, ODOTMETH, +// ODOTINTER, or OCALLPART, as appropriate. It adds in extra nodes as needed to +// access embedded fields. Corresponds to typecheck.tcDot. +func transformDot(n *ir.SelectorExpr, isCall bool) ir.Node { + assert(n.Type() != nil && n.Typecheck() == 1) + if n.Op() == ir.OXDOT { + n = typecheck.AddImplicitDots(n) + n.SetOp(ir.ODOT) + } + + t := n.X.Type() + + if n.X.Op() == ir.OTYPE { + return transformMethodExpr(n) + } + + if t.IsPtr() && !t.Elem().IsInterface() { + t = t.Elem() + n.SetOp(ir.ODOTPTR) + } + + f := typecheck.Lookdot(n, t, 0) + assert(f != nil) + + if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && !isCall { + n.SetOp(ir.OCALLPART) + n.SetType(typecheck.MethodValueWrapper(n).Type()) + } + return n +} + +// Corresponds to typecheck.typecheckMethodExpr. +func transformMethodExpr(n *ir.SelectorExpr) (res ir.Node) { + t := n.X.Type() + + // Compute the method set for t. + var ms *types.Fields + if t.IsInterface() { + ms = t.AllMethods() + } else { + mt := types.ReceiverBaseType(t) + typecheck.CalcMethods(mt) + ms = mt.AllMethods() + + // The method expression T.m requires a wrapper when T + // is different from m's declared receiver type. We + // normally generate these wrappers while writing out + // runtime type descriptors, which is always done for + // types declared at package scope. However, we need + // to make sure to generate wrappers for anonymous + // receiver types too. + if mt.Sym() == nil { + typecheck.NeedRuntimeType(t) + } + } + + s := n.Sel + m := typecheck.Lookdot1(n, s, t, ms, 0) + assert(m != nil) + + n.SetOp(ir.OMETHEXPR) + n.Selection = m + n.SetType(typecheck.NewMethodType(m.Type, n.X.Type())) + return n +} + +// Corresponds to typecheck.tcAppend. +func transformAppend(n *ir.CallExpr) ir.Node { + transformArgs(n) + args := n.Args + t := args[0].Type() + assert(t.IsSlice()) + + if n.IsDDD { + if t.Elem().IsKind(types.TUINT8) && args[1].Type().IsString() { + return n + } + + args[1] = assignconvfn(args[1], t.Underlying()) + return n + } + + as := args[1:] + for i, n := range as { + assert(n.Type() != nil) + as[i] = assignconvfn(n, t.Elem()) + } + return n +} + +// Corresponds to typecheck.tcComplex. +func transformComplex(n *ir.BinaryExpr) ir.Node { + l := n.X + r := n.Y + + assert(types.Identical(l.Type(), r.Type())) + + var t *types.Type + switch l.Type().Kind() { + case types.TFLOAT32: + t = types.Types[types.TCOMPLEX64] + case types.TFLOAT64: + t = types.Types[types.TCOMPLEX128] + default: + panic(fmt.Sprintf("transformComplex: unexpected type %v", l.Type())) + } + + // Must set the type here for generics, because this can't be determined + // by substitution of the generic types. + typed(t, n) + return n +} + +// Corresponds to typecheck.tcDelete. +func transformDelete(n *ir.CallExpr) ir.Node { + transformArgs(n) + args := n.Args + assert(len(args) == 2) + + l := args[0] + r := args[1] + + args[1] = assignconvfn(r, l.Type().Key()) + return n +} + +// Corresponds to typecheck.tcMake. +func transformMake(n *ir.CallExpr) ir.Node { + args := n.Args + + n.Args = nil + l := args[0] + t := l.Type() + assert(t != nil) + + i := 1 + var nn ir.Node + switch t.Kind() { + case types.TSLICE: + l = args[i] + i++ + var r ir.Node + if i < len(args) { + r = args[i] + i++ + } + nn = ir.NewMakeExpr(n.Pos(), ir.OMAKESLICE, l, r) + + case types.TMAP: + if i < len(args) { + l = args[i] + i++ + } else { + l = ir.NewInt(0) + } + nn = ir.NewMakeExpr(n.Pos(), ir.OMAKEMAP, l, nil) + nn.SetEsc(n.Esc()) + + case types.TCHAN: + l = nil + if i < len(args) { + l = args[i] + i++ + } else { + l = ir.NewInt(0) + } + nn = ir.NewMakeExpr(n.Pos(), ir.OMAKECHAN, l, nil) + default: + panic(fmt.Sprintf("transformMake: unexpected type %v", t)) + } + + assert(i == len(args)) + typed(n.Type(), nn) + return nn +} + +// Corresponds to typecheck.tcPanic. +func transformPanic(n *ir.UnaryExpr) ir.Node { + n.X = assignconvfn(n.X, types.Types[types.TINTER]) + return n +} + +// Corresponds to typecheck.tcPrint. +func transformPrint(n *ir.CallExpr) ir.Node { + transformArgs(n) + return n +} + +// Corresponds to typecheck.tcRealImag. +func transformRealImag(n *ir.UnaryExpr) ir.Node { + l := n.X + var t *types.Type + + // Determine result type. + switch l.Type().Kind() { + case types.TCOMPLEX64: + t = types.Types[types.TFLOAT32] + case types.TCOMPLEX128: + t = types.Types[types.TFLOAT64] + default: + panic(fmt.Sprintf("transformRealImag: unexpected type %v", l.Type())) + } + + // Must set the type here for generics, because this can't be determined + // by substitution of the generic types. + typed(t, n) + return n +} + +// Corresponds to typecheck.tcLenCap. +func transformLenCap(n *ir.UnaryExpr) ir.Node { + n.X = implicitstar(n.X) + return n +} + +// Corresponds to Builtin part of tcCall. +func transformBuiltin(n *ir.CallExpr) ir.Node { + // n.Type() can be nil for builtins with no return value + assert(n.Typecheck() == 1) + fun := n.X.(*ir.Name) + op := fun.BuiltinOp + + switch op { + case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER: + n.SetOp(op) + n.X = nil + switch op { + case ir.OAPPEND: + return transformAppend(n) + case ir.ODELETE: + return transformDelete(n) + case ir.OMAKE: + return transformMake(n) + case ir.OPRINT, ir.OPRINTN: + return transformPrint(n) + case ir.ORECOVER: + // nothing more to do + return n + } + + case ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.OPANIC, ir.OREAL: + transformArgs(n) + fallthrough + + case ir.ONEW, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: + u := ir.NewUnaryExpr(n.Pos(), op, n.Args[0]) + u1 := typed(n.Type(), ir.InitExpr(n.Init(), u)) // typecheckargs can add to old.Init + switch op { + case ir.OCAP, ir.OLEN: + return transformLenCap(u1.(*ir.UnaryExpr)) + case ir.OREAL, ir.OIMAG: + return transformRealImag(u1.(*ir.UnaryExpr)) + case ir.OPANIC: + return transformPanic(u1.(*ir.UnaryExpr)) + case ir.OCLOSE, ir.ONEW, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF: + // nothing more to do + return u1 + } + + case ir.OCOMPLEX, ir.OCOPY, ir.OUNSAFEADD, ir.OUNSAFESLICE: + transformArgs(n) + b := ir.NewBinaryExpr(n.Pos(), op, n.Args[0], n.Args[1]) + n1 := typed(n.Type(), ir.InitExpr(n.Init(), b)) + if op != ir.OCOMPLEX { + // nothing more to do + return n1 + } + return transformComplex(n1.(*ir.BinaryExpr)) + + default: + panic(fmt.Sprintf("transformBuiltin: unexpected op %v", op)) + } + + return n +} + +func hasKeys(l ir.Nodes) bool { + for _, n := range l { + if n.Op() == ir.OKEY || n.Op() == ir.OSTRUCTKEY { + return true + } + } + return false +} + +// transformArrayLit runs assignconvfn on each array element and returns the +// length of the slice/array that is needed to hold all the array keys/indexes +// (one more than the highest index). Corresponds to typecheck.typecheckarraylit. +func transformArrayLit(elemType *types.Type, bound int64, elts []ir.Node) int64 { + var key, length int64 + for i, elt := range elts { + ir.SetPos(elt) + r := elts[i] + var kv *ir.KeyExpr + if elt.Op() == ir.OKEY { + elt := elt.(*ir.KeyExpr) + key = typecheck.IndexConst(elt.Key) + assert(key >= 0) + kv = elt + r = elt.Value + } + + r = assignconvfn(r, elemType) + if kv != nil { + kv.Value = r + } else { + elts[i] = r + } + + key++ + if key > length { + length = key + } + } + + return length +} + +// transformCompLit transforms n to an OARRAYLIT, OSLICELIT, OMAPLIT, or +// OSTRUCTLIT node, with any needed conversions. Corresponds to +// typecheck.tcCompLit. +func transformCompLit(n *ir.CompLitExpr) (res ir.Node) { + assert(n.Type() != nil && n.Typecheck() == 1) + lno := base.Pos + defer func() { + base.Pos = lno + }() + + // Save original node (including n.Right) + n.SetOrig(ir.Copy(n)) + + ir.SetPos(n) + + t := n.Type() + + switch t.Kind() { + default: + base.Fatalf("transformCompLit %v", t.Kind()) + + case types.TARRAY: + transformArrayLit(t.Elem(), t.NumElem(), n.List) + n.SetOp(ir.OARRAYLIT) + + case types.TSLICE: + length := transformArrayLit(t.Elem(), -1, n.List) + n.SetOp(ir.OSLICELIT) + n.Len = length + + case types.TMAP: + for _, l := range n.List { + ir.SetPos(l) + assert(l.Op() == ir.OKEY) + l := l.(*ir.KeyExpr) + + r := l.Key + l.Key = assignconvfn(r, t.Key()) + + r = l.Value + l.Value = assignconvfn(r, t.Elem()) + } + + n.SetOp(ir.OMAPLIT) + + case types.TSTRUCT: + // Need valid field offsets for Xoffset below. + types.CalcSize(t) + + if len(n.List) != 0 && !hasKeys(n.List) { + // simple list of values + ls := n.List + for i, n1 := range ls { + ir.SetPos(n1) + + f := t.Field(i) + n1 = assignconvfn(n1, f.Type) + sk := ir.NewStructKeyExpr(base.Pos, f.Sym, n1) + sk.Offset = f.Offset + ls[i] = sk + } + assert(len(ls) >= t.NumFields()) + } else { + // keyed list + ls := n.List + for i, l := range ls { + ir.SetPos(l) + + if l.Op() == ir.OKEY { + kv := l.(*ir.KeyExpr) + key := kv.Key + + // Sym might have resolved to name in other top-level + // package, because of import dot. Redirect to correct sym + // before we do the lookup. + s := key.Sym() + if id, ok := key.(*ir.Ident); ok && typecheck.DotImportRefs[id] != nil { + s = typecheck.Lookup(s.Name) + } + + // An OXDOT uses the Sym field to hold + // the field to the right of the dot, + // so s will be non-nil, but an OXDOT + // is never a valid struct literal key. + assert(!(s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank())) + + l = ir.NewStructKeyExpr(l.Pos(), s, kv.Value) + ls[i] = l + } + + assert(l.Op() == ir.OSTRUCTKEY) + l := l.(*ir.StructKeyExpr) + + f := typecheck.Lookdot1(nil, l.Field, t, t.Fields(), 0) + l.Offset = f.Offset + + l.Value = assignconvfn(l.Value, f.Type) + } + } + + n.SetOp(ir.OSTRUCTLIT) + } + + return n +} diff --git a/src/cmd/compile/internal/noder/types.go b/src/cmd/compile/internal/noder/types.go index c23295c3a11..8680559a412 100644 --- a/src/cmd/compile/internal/noder/types.go +++ b/src/cmd/compile/internal/noder/types.go @@ -12,6 +12,7 @@ import ( "cmd/compile/internal/types" "cmd/compile/internal/types2" "cmd/internal/src" + "strings" ) func (g *irgen) pkg(pkg *types2.Package) *types.Pkg { @@ -29,22 +30,31 @@ func (g *irgen) pkg(pkg *types2.Package) *types.Pkg { // typ converts a types2.Type to a types.Type, including caching of previously // translated types. func (g *irgen) typ(typ types2.Type) *types.Type { - // Caching type mappings isn't strictly needed, because typ0 preserves - // type identity; but caching minimizes memory blow-up from mapping the - // same composite type multiple times, and also plays better with the - // current state of cmd/compile (e.g., haphazard calculation of type - // sizes). + res := g.typ1(typ) + + // Calculate the size for all concrete types seen by the frontend. The old + // typechecker calls CheckSize() a lot, and we want to eliminate calling + // it eventually, so we should do it here instead. We only call it for + // top-level types (i.e. we do it here rather in typ1), to make sure that + // recursive types have been fully constructed before we call CheckSize. + if res != nil && !res.IsUntyped() && !res.IsFuncArgStruct() && !res.HasTParam() { + types.CheckSize(res) + } + return res +} + +// typ1 is like typ, but doesn't call CheckSize, since it may have only +// constructed part of a recursive type. Should not be called from outside this +// file (g.typ is the "external" entry point). +func (g *irgen) typ1(typ types2.Type) *types.Type { + // Cache type2-to-type mappings. Important so that each defined generic + // type (instantiated or not) has a single types.Type representation. + // Also saves a lot of computation and memory by avoiding re-translating + // types2 types repeatedly. res, ok := g.typs[typ] if !ok { res = g.typ0(typ) g.typs[typ] = res - - // Ensure we calculate the size for all concrete types seen by - // the frontend. This is another heavy hammer for something that - // should really be the backend's responsibility instead. - if res != nil && !res.IsUntyped() && !res.IsFuncArgStruct() { - types.CheckSize(res) - } } return res } @@ -58,8 +68,15 @@ func instTypeName2(name string, targs []types2.Type) string { if i > 0 { b.WriteByte(',') } - b.WriteString(types2.TypeString(targ, - func(*types2.Package) string { return "" })) + tname := types2.TypeString(targ, + func(*types2.Package) string { return "" }) + if strings.Index(tname, ", ") >= 0 { + // types2.TypeString puts spaces after a comma in a type + // list, but we don't want spaces in our actual type names + // and method/function names derived from them. + tname = strings.Replace(tname, ", ", ",", -1) + } + b.WriteString(tname) } b.WriteByte(']') return b.String() @@ -99,27 +116,33 @@ func (g *irgen) typ0(typ types2.Type) *types.Type { // Create a forwarding type first and put it in the g.typs // map, in order to deal with recursive generic types. - ntyp := types.New(types.TFORW) + // Fully set up the extra ntyp information (Def, RParams, + // which may set HasTParam) before translating the + // underlying type itself, so we handle recursion + // correctly, including via method signatures. + ntyp := newIncompleteNamedType(g.pos(typ.Obj().Pos()), s) g.typs[typ] = ntyp - ntyp.SetUnderlying(g.typ(typ.Underlying())) - ntyp.SetSym(s) - if ntyp.HasTParam() { - // If ntyp still has type params, then we must be - // referencing something like 'value[T2]', as when - // specifying the generic receiver of a method, - // where value was defined as "type value[T any] - // ...". Save the type args, which will now be the - // new type params of the current type. - ntyp.RParams = make([]*types.Type, len(typ.TArgs())) - for i, targ := range typ.TArgs() { - ntyp.RParams[i] = g.typ(targ) - } + // If ntyp still has type params, then we must be + // referencing something like 'value[T2]', as when + // specifying the generic receiver of a method, + // where value was defined as "type value[T any] + // ...". Save the type args, which will now be the + // new type of the current type. + // + // If ntyp does not have type params, we are saving the + // concrete types used to instantiate this type. We'll use + // these when instantiating the methods of the + // instantiated type. + rparams := make([]*types.Type, len(typ.TArgs())) + for i, targ := range typ.TArgs() { + rparams[i] = g.typ1(targ) } + ntyp.SetRParams(rparams) + //fmt.Printf("Saw new type %v %v\n", instName, ntyp.HasTParam()) - // Make sure instantiated type can be uniquely found from - // the sym - s.Def = ir.TypeNode(ntyp) + ntyp.SetUnderlying(g.typ1(typ.Underlying())) + g.fillinMethods(typ, ntyp) return ntyp } obj := g.obj(typ.Obj()) @@ -129,23 +152,23 @@ func (g *irgen) typ0(typ types2.Type) *types.Type { return obj.Type() case *types2.Array: - return types.NewArray(g.typ(typ.Elem()), typ.Len()) + return types.NewArray(g.typ1(typ.Elem()), typ.Len()) case *types2.Chan: - return types.NewChan(g.typ(typ.Elem()), dirs[typ.Dir()]) + return types.NewChan(g.typ1(typ.Elem()), dirs[typ.Dir()]) case *types2.Map: - return types.NewMap(g.typ(typ.Key()), g.typ(typ.Elem())) + return types.NewMap(g.typ1(typ.Key()), g.typ1(typ.Elem())) case *types2.Pointer: - return types.NewPtr(g.typ(typ.Elem())) + return types.NewPtr(g.typ1(typ.Elem())) case *types2.Signature: return g.signature(nil, typ) case *types2.Slice: - return types.NewSlice(g.typ(typ.Elem())) + return types.NewSlice(g.typ1(typ.Elem())) case *types2.Struct: fields := make([]*types.Field, typ.NumFields()) for i := range fields { v := typ.Field(i) - f := types.NewField(g.pos(v), g.selector(v), g.typ(v.Type())) + f := types.NewField(g.pos(v), g.selector(v), g.typ1(v.Type())) f.Note = typ.Tag(i) if v.Embedded() { f.Embedded = 1 @@ -156,11 +179,20 @@ func (g *irgen) typ0(typ types2.Type) *types.Type { case *types2.Interface: embeddeds := make([]*types.Field, typ.NumEmbeddeds()) + j := 0 for i := range embeddeds { // TODO(mdempsky): Get embedding position. e := typ.EmbeddedType(i) - embeddeds[i] = types.NewField(src.NoXPos, nil, g.typ(e)) + if t := types2.AsInterface(e); t != nil && t.IsComparable() { + // Ignore predefined type 'comparable', since it + // doesn't resolve and it doesn't have any + // relevant methods. + continue + } + embeddeds[j] = types.NewField(src.NoXPos, nil, g.typ1(e)) + j++ } + embeddeds = embeddeds[:j] methods := make([]*types.Field, typ.NumExplicitMethods()) for i := range methods { @@ -172,9 +204,18 @@ func (g *irgen) typ0(typ types2.Type) *types.Type { return types.NewInterface(g.tpkg(typ), append(embeddeds, methods...)) case *types2.TypeParam: - tp := types.NewTypeParam(g.tpkg(typ), g.typ(typ.Bound())) + tp := types.NewTypeParam(g.tpkg(typ)) // Save the name of the type parameter in the sym of the type. - tp.SetSym(g.sym(typ.Obj())) + // Include the types2 subscript in the sym name + sym := g.pkg(typ.Obj().Pkg()).Lookup(types2.TypeString(typ, func(*types2.Package) string { return "" })) + tp.SetSym(sym) + // Set g.typs[typ] in case the bound methods reference typ. + g.typs[typ] = tp + + // TODO(danscales): we don't currently need to use the bounds + // anywhere, so eventually we can probably remove. + bound := g.typ1(typ.Bound()) + *tp.Methods() = *bound.Methods() return tp case *types2.Tuple: @@ -188,8 +229,6 @@ func (g *irgen) typ0(typ types2.Type) *types.Type { fields[i] = g.param(typ.At(i)) } t := types.NewStruct(types.LocalPkg, fields) - types.CheckSize(t) - // Can only set after doing the types.CheckSize() t.StructType().Funarg = types.FunargResults return t @@ -199,12 +238,77 @@ func (g *irgen) typ0(typ types2.Type) *types.Type { } } +// fillinMethods fills in the method name nodes and types for a defined type. This +// is needed for later typechecking when looking up methods of instantiated types, +// and for actually generating the methods for instantiated types. +func (g *irgen) fillinMethods(typ *types2.Named, ntyp *types.Type) { + if typ.NumMethods() != 0 { + targs := make([]ir.Node, len(typ.TArgs())) + for i, targ := range typ.TArgs() { + targs[i] = ir.TypeNode(g.typ1(targ)) + } + + methods := make([]*types.Field, typ.NumMethods()) + for i := range methods { + m := typ.Method(i) + meth := g.obj(m) + recvType := types2.AsSignature(m.Type()).Recv().Type() + ptr := types2.AsPointer(recvType) + if ptr != nil { + recvType = ptr.Elem() + } + if recvType != types2.Type(typ) { + // Unfortunately, meth is the type of the method of the + // generic type, so we have to do a substitution to get + // the name/type of the method of the instantiated type, + // using m.Type().RParams() and typ.TArgs() + inst2 := instTypeName2("", typ.TArgs()) + name := meth.Sym().Name + i1 := strings.Index(name, "[") + i2 := strings.Index(name[i1:], "]") + assert(i1 >= 0 && i2 >= 0) + // Generate the name of the instantiated method. + name = name[0:i1] + inst2 + name[i1+i2+1:] + newsym := meth.Sym().Pkg.Lookup(name) + var meth2 *ir.Name + if newsym.Def != nil { + meth2 = newsym.Def.(*ir.Name) + } else { + meth2 = ir.NewNameAt(meth.Pos(), newsym) + rparams := types2.AsSignature(m.Type()).RParams() + tparams := make([]*types.Field, len(rparams)) + for i, rparam := range rparams { + tparams[i] = types.NewField(src.NoXPos, nil, g.typ1(rparam.Type())) + } + assert(len(tparams) == len(targs)) + subst := &subster{ + g: g, + tparams: tparams, + targs: targs, + } + // Do the substitution of the type + meth2.SetType(subst.typ(meth.Type())) + newsym.Def = meth2 + } + meth = meth2 + } + methods[i] = types.NewField(meth.Pos(), g.selector(m), meth.Type()) + methods[i].Nname = meth + } + ntyp.Methods().Set(methods) + if !ntyp.HasTParam() { + // Generate all the methods for a new fully-instantiated type. + g.instTypeList = append(g.instTypeList, ntyp) + } + } +} + func (g *irgen) signature(recv *types.Field, sig *types2.Signature) *types.Type { tparams2 := sig.TParams() tparams := make([]*types.Field, len(tparams2)) for i := range tparams { tp := tparams2[i] - tparams[i] = types.NewField(g.pos(tp), g.sym(tp), g.typ(tp.Type())) + tparams[i] = types.NewField(g.pos(tp), g.sym(tp), g.typ1(tp.Type())) } do := func(typ *types2.Tuple) []*types.Field { @@ -224,7 +328,7 @@ func (g *irgen) signature(recv *types.Field, sig *types2.Signature) *types.Type } func (g *irgen) param(v *types2.Var) *types.Field { - return types.NewField(g.pos(v), g.sym(v), g.typ(v.Type())) + return types.NewField(g.pos(v), g.sym(v), g.typ1(v.Type())) } func (g *irgen) sym(obj types2.Object) *types.Sym { diff --git a/src/cmd/compile/internal/noder/validate.go b/src/cmd/compile/internal/noder/validate.go index f97f81d5ad8..b926222c89c 100644 --- a/src/cmd/compile/internal/noder/validate.go +++ b/src/cmd/compile/internal/noder/validate.go @@ -23,10 +23,14 @@ func (g *irgen) match(t1 *types.Type, t2 types2.Type, hasOK bool) bool { } if hasOK { - // For has-ok values, types2 represents the expression's type as - // a 2-element tuple, whereas ir just uses the first type and - // infers that the second type is boolean. - return tuple.Len() == 2 && types.Identical(t1, g.typ(tuple.At(0).Type())) + // For has-ok values, types2 represents the expression's type as a + // 2-element tuple, whereas ir just uses the first type and infers + // that the second type is boolean. Must match either, since we + // sometimes delay the transformation to the ir form. + if tuple.Len() == 2 && types.Identical(t1, g.typ(tuple.At(0).Type())) { + return true + } + return types.Identical(t1, g.typ(t2)) } if t1 == nil || tuple == nil { @@ -96,9 +100,7 @@ func (g *irgen) unsafeExpr(name string, arg syntax.Expr) int64 { selection := g.info.Selections[sel] typ := g.typ(g.info.Types[sel.X].Type) - if typ.IsPtr() { - typ = typ.Elem() - } + typ = deref(typ) var offset int64 for _, i := range selection.Index() { diff --git a/src/cmd/compile/internal/objw/objw.go b/src/cmd/compile/internal/objw/objw.go index dfbcf515565..ed5ad754d9b 100644 --- a/src/cmd/compile/internal/objw/objw.go +++ b/src/cmd/compile/internal/objw/objw.go @@ -11,6 +11,8 @@ import ( "cmd/internal/obj" ) +// Uint8 writes an unsigned byte v into s at offset off, +// and returns the next unused offset (i.e., off+1). func Uint8(s *obj.LSym, off int, v uint8) int { return UintN(s, off, uint64(v), 1) } @@ -27,6 +29,8 @@ func Uintptr(s *obj.LSym, off int, v uint64) int { return UintN(s, off, v, types.PtrSize) } +// UintN writes an unsigned integer v of size wid bytes into s at offset off, +// and returns the next unused offset. func UintN(s *obj.LSym, off int, v uint64, wid int) int { if off&(wid-1) != 0 { base.Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off) @@ -42,6 +46,13 @@ func SymPtr(s *obj.LSym, off int, x *obj.LSym, xoff int) int { return off } +func SymPtrWeak(s *obj.LSym, off int, x *obj.LSym, xoff int) int { + off = int(types.Rnd(int64(off), int64(types.PtrSize))) + s.WriteWeakAddr(base.Ctxt, int64(off), types.PtrSize, x, int64(xoff)) + off += types.PtrSize + return off +} + func SymPtrOff(s *obj.LSym, off int, x *obj.LSym) int { s.WriteOff(base.Ctxt, int64(off), x, 0) off += 4 @@ -62,6 +73,8 @@ func Global(s *obj.LSym, width int32, flags int16) { base.Ctxt.Globl(s, int64(width), int(flags)) } +// Bitvec writes the contents of bv into s as sequence of bytes +// in little-endian order, and returns the next unused offset. func BitVec(s *obj.LSym, off int, bv bitvec.BitVec) int { // Runtime reads the bitmaps as byte arrays. Oblige. for j := 0; int32(j) < bv.N; j += 8 { diff --git a/src/cmd/compile/internal/ppc64/galign.go b/src/cmd/compile/internal/ppc64/galign.go index c72d1aa8348..590290fa371 100644 --- a/src/cmd/compile/internal/ppc64/galign.go +++ b/src/cmd/compile/internal/ppc64/galign.go @@ -7,12 +7,12 @@ package ppc64 import ( "cmd/compile/internal/ssagen" "cmd/internal/obj/ppc64" - "cmd/internal/objabi" + "internal/buildcfg" ) func Init(arch *ssagen.ArchInfo) { arch.LinkArch = &ppc64.Linkppc64 - if objabi.GOARCH == "ppc64le" { + if buildcfg.GOARCH == "ppc64le" { arch.LinkArch = &ppc64.Linkppc64le } arch.REGSP = ppc64.REGSP diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go index c85e110ed35..11226f65a0f 100644 --- a/src/cmd/compile/internal/ppc64/ssa.go +++ b/src/cmd/compile/internal/ppc64/ssa.go @@ -13,7 +13,7 @@ import ( "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/ppc64" - "cmd/internal/objabi" + "internal/buildcfg" "math" "strings" ) @@ -419,7 +419,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // If it is a Compare-and-Swap-Release operation, set the EH field with // the release hint. if v.AuxInt == 0 { - p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: 0}) + p.SetFrom3Const(0) } // CMP reg1,reg2 p1 := s.Prog(cmp) @@ -586,7 +586,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(v.Op.Asm()) // clrlslwi ra,rs,mb,sh will become rlwinm ra,rs,sh,mb-sh,31-sh as described in ISA p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)} - p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)}) + p.SetFrom3Const(ssa.GetPPC64Shiftsh(shifts)) p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r @@ -598,7 +598,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(v.Op.Asm()) // clrlsldi ra,rs,mb,sh will become rldic ra,rs,sh,mb-sh p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)} - p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)}) + p.SetFrom3Const(ssa.GetPPC64Shiftsh(shifts)) p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r @@ -610,7 +610,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { shifts := v.AuxInt p := s.Prog(v.Op.Asm()) p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)} - p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)}) + p.SetFrom3Const(ssa.GetPPC64Shiftmb(shifts)) p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r @@ -653,21 +653,21 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // Auxint holds encoded rotate + mask case ssa.OpPPC64RLWINM, ssa.OpPPC64RLWMI: - rot, _, _, mask := ssa.DecodePPC64RotateMask(v.AuxInt) + rot, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt) p := s.Prog(v.Op.Asm()) p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()} p.Reg = v.Args[0].Reg() p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(rot)} - p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(mask)}) + p.SetRestArgs([]obj.Addr{{Type: obj.TYPE_CONST, Offset: mb}, {Type: obj.TYPE_CONST, Offset: me}}) // Auxint holds mask case ssa.OpPPC64RLWNM: - _, _, _, mask := ssa.DecodePPC64RotateMask(v.AuxInt) + _, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt) p := s.Prog(v.Op.Asm()) p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()} p.Reg = v.Args[0].Reg() p.From = obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[1].Reg()} - p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(mask)}) + p.SetRestArgs([]obj.Addr{{Type: obj.TYPE_CONST, Offset: mb}, {Type: obj.TYPE_CONST, Offset: me}}) case ssa.OpPPC64MADDLD: r := v.Reg() @@ -679,7 +679,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.From.Type = obj.TYPE_REG p.From.Reg = r1 p.Reg = r2 - p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: r3}) + p.SetFrom3Reg(r3) p.To.Type = obj.TYPE_REG p.To.Reg = r @@ -693,7 +693,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.From.Type = obj.TYPE_REG p.From.Reg = r1 p.Reg = r3 - p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: r2}) + p.SetFrom3Reg(r2) p.To.Type = obj.TYPE_REG p.To.Reg = r @@ -720,7 +720,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { case ssa.OpPPC64SUBFCconst: p := s.Prog(v.Op.Asm()) - p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt}) + p.SetFrom3Const(v.AuxInt) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() p.To.Type = obj.TYPE_REG @@ -798,42 +798,63 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Reg = v.Reg() p.To.Type = obj.TYPE_REG - case ssa.OpPPC64MOVDload: + case ssa.OpPPC64MOVDload, ssa.OpPPC64MOVWload: - // MOVDload uses a DS instruction which requires the offset value of the data to be a multiple of 4. - // For offsets known at compile time, a MOVDload won't be selected, but in the case of a go.string, - // the offset is not known until link time. If the load of a go.string uses relocation for the - // offset field of the instruction, and if the offset is not aligned to 4, then a link error will occur. - // To avoid this problem, the full address of the go.string is computed and loaded into the base register, - // and that base register is used for the MOVDload using a 0 offset. This problem can only occur with - // go.string types because other types will have proper alignment. + // MOVDload and MOVWload are DS form instructions that are restricted to + // offsets that are a multiple of 4. If the offset is not a multple of 4, + // then the address of the symbol to be loaded is computed (base + offset) + // and used as the new base register and the offset field in the instruction + // can be set to zero. - gostring := false - switch n := v.Aux.(type) { - case *obj.LSym: - gostring = strings.HasPrefix(n.Name, "go.string.") + // This same problem can happen with gostrings since the final offset is not + // known yet, but could be unaligned after the relocation is resolved. + // So gostrings are handled the same way. + + // This allows the MOVDload and MOVWload to be generated in more cases and + // eliminates some offset and alignment checking in the rules file. + + fromAddr := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[0].Reg()} + ssagen.AddAux(&fromAddr, v) + + genAddr := false + + switch fromAddr.Name { + case obj.NAME_EXTERN, obj.NAME_STATIC: + // Special case for a rule combines the bytes of gostring. + // The v alignment might seem OK, but we don't want to load it + // using an offset because relocation comes later. + genAddr = strings.HasPrefix(fromAddr.Sym.Name, "go.string") || v.Type.Alignment()%4 != 0 || fromAddr.Offset%4 != 0 + default: + genAddr = fromAddr.Offset%4 != 0 } - if gostring { - // Generate full addr of the go.string const - // including AuxInt + if genAddr { + // Load full address into the temp register. p := s.Prog(ppc64.AMOVD) p.From.Type = obj.TYPE_ADDR p.From.Reg = v.Args[0].Reg() ssagen.AddAux(&p.From, v) + // Load target using temp as base register + // and offset zero. Setting NAME_NONE + // prevents any extra offsets from being + // added. p.To.Type = obj.TYPE_REG - p.To.Reg = v.Reg() - // Load go.string using 0 offset - p = s.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_MEM - p.From.Reg = v.Reg() - p.To.Type = obj.TYPE_REG - p.To.Reg = v.Reg() - break + p.To.Reg = ppc64.REGTMP + fromAddr.Reg = ppc64.REGTMP + // Clear the offset field and other + // information that might be used + // by the assembler to add to the + // final offset value. + fromAddr.Offset = 0 + fromAddr.Name = obj.NAME_NONE + fromAddr.Sym = nil } - // Not a go.string, generate a normal load - fallthrough + p := s.Prog(v.Op.Asm()) + p.From = fromAddr + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + break - case ssa.OpPPC64MOVWload, ssa.OpPPC64MOVHload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHZload, ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload: + case ssa.OpPPC64MOVHload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHZload, ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() @@ -865,7 +886,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() - case ssa.OpPPC64MOVDstorezero, ssa.OpPPC64MOVWstorezero, ssa.OpPPC64MOVHstorezero, ssa.OpPPC64MOVBstorezero: + case ssa.OpPPC64MOVWstorezero, ssa.OpPPC64MOVHstorezero, ssa.OpPPC64MOVBstorezero: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = ppc64.REGZERO @@ -873,7 +894,46 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Reg = v.Args[0].Reg() ssagen.AddAux(&p.To, v) - case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore, ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore: + case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVDstorezero: + + // MOVDstore and MOVDstorezero become DS form instructions that are restricted + // to offset values that are a multple of 4. If the offset field is not a + // multiple of 4, then the full address of the store target is computed (base + + // offset) and used as the new base register and the offset in the instruction + // is set to 0. + + // This allows the MOVDstore and MOVDstorezero to be generated in more cases, + // and prevents checking of the offset value and alignment in the rules. + + toAddr := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[0].Reg()} + ssagen.AddAux(&toAddr, v) + + if toAddr.Offset%4 != 0 { + p := s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_ADDR + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REGTMP + toAddr.Reg = ppc64.REGTMP + // Clear the offset field and other + // information that might be used + // by the assembler to add to the + // final offset value. + toAddr.Offset = 0 + toAddr.Name = obj.NAME_NONE + toAddr.Sym = nil + } + p := s.Prog(v.Op.Asm()) + p.To = toAddr + p.From.Type = obj.TYPE_REG + if v.Op == ssa.OpPPC64MOVDstorezero { + p.From.Reg = ppc64.REGZERO + } else { + p.From.Reg = v.Args[1].Reg() + } + + case ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore, ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[1].Reg() @@ -910,7 +970,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // AuxInt values 4,5,6 implemented with reverse operand order from 0,1,2 if v.AuxInt > 3 { p.Reg = r.Reg - p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[0].Reg()}) + p.SetFrom3Reg(v.Args[0].Reg()) } else { p.Reg = v.Args[0].Reg() p.SetFrom3(r) @@ -1476,7 +1536,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { case rem >= 8: op, size = ppc64.AMOVD, 8 case rem >= 4: - op, size = ppc64.AMOVW, 4 + op, size = ppc64.AMOVWZ, 4 case rem >= 2: op, size = ppc64.AMOVH, 2 } @@ -1743,7 +1803,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { case rem >= 8: op, size = ppc64.AMOVD, 8 case rem >= 4: - op, size = ppc64.AMOVW, 4 + op, size = ppc64.AMOVWZ, 4 case rem >= 2: op, size = ppc64.AMOVH, 2 } @@ -1784,7 +1844,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { pp.To.Reg = ppc64.REG_LR // Insert a hint this is not a subroutine return. - pp.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: 1}) + pp.SetFrom3Const(1) if base.Ctxt.Flag_shared { // When compiling Go into PIC, the function we just @@ -1813,7 +1873,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { s.UseArgs(16) // space used in callee args area by assembly stubs case ssa.OpPPC64LoweredNilCheck: - if objabi.GOOS == "aix" { + if buildcfg.GOOS == "aix" { // CMP Rarg0, R0 // BNE 2(PC) // STW R0, 0(R0) @@ -1867,7 +1927,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT: v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) - case ssa.OpClobber: + case ssa.OpClobber, ssa.OpClobberReg: // TODO: implement for clobberdead experiment. Nop is ok for now. default: v.Fatalf("genValue not implemented: %s", v.LongString()) diff --git a/src/cmd/compile/internal/reflectdata/alg.go b/src/cmd/compile/internal/reflectdata/alg.go index faa431a9d19..d12d9ca0a7d 100644 --- a/src/cmd/compile/internal/reflectdata/alg.go +++ b/src/cmd/compile/internal/reflectdata/alg.go @@ -287,6 +287,7 @@ func hashfor(t *types.Type) ir.Node { sym = TypeSymPrefix(".hash", t) } + // TODO(austin): This creates an ir.Name with a nil Func. n := typecheck.NewName(sym) ir.MarkFunc(n) n.SetType(types.NewSignature(types.NoPkg, nil, nil, []*types.Field{ @@ -352,7 +353,7 @@ func geneq(t *types.Type) *obj.LSym { return closure } if memequalvarlen == nil { - memequalvarlen = typecheck.LookupRuntimeVar("memequal_varlen") // asm func + memequalvarlen = typecheck.LookupRuntimeFunc("memequal_varlen") } ot := 0 ot = objw.SymPtr(closure, ot, memequalvarlen, 0) @@ -775,6 +776,7 @@ func memrun(t *types.Type, start int) (size int64, next int) { func hashmem(t *types.Type) ir.Node { sym := ir.Pkgs.Runtime.Lookup("memhash") + // TODO(austin): This creates an ir.Name with a nil Func. n := typecheck.NewName(sym) ir.MarkFunc(n) n.SetType(types.NewSignature(types.NoPkg, nil, nil, []*types.Field{ diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index 06a7f91c522..8c0e33f6df1 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -5,7 +5,9 @@ package reflectdata import ( + "encoding/binary" "fmt" + "internal/buildcfg" "os" "sort" "strings" @@ -51,6 +53,9 @@ var ( signatset = make(map[*types.Type]struct{}) signatslice []*types.Type + gcsymmu sync.Mutex // protects gcsymset and gcsymslice + gcsymset = make(map[*types.Type]struct{}) + itabs []itabEntry ptabs []*ir.Name ) @@ -364,7 +369,7 @@ func methods(t *types.Type) []*typeSig { // imethods returns the methods of the interface type t, sorted by name. func imethods(t *types.Type) []*typeSig { var methods []*typeSig - for _, f := range t.Fields().Slice() { + for _, f := range t.AllMethods().Slice() { if f.Type.Kind() != types.TFUNC || f.Sym == nil { continue } @@ -469,21 +474,25 @@ func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int { // dnameData writes the contents of a reflect.name into s at offset ot. func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int { - if len(name) > 1<<16-1 { - base.Fatalf("name too long: %s", name) + if len(name) >= 1<<29 { + base.Fatalf("name too long: %d %s...", len(name), name[:1024]) } - if len(tag) > 1<<16-1 { - base.Fatalf("tag too long: %s", tag) + if len(tag) >= 1<<29 { + base.Fatalf("tag too long: %d %s...", len(tag), tag[:1024]) } + var nameLen [binary.MaxVarintLen64]byte + nameLenLen := binary.PutUvarint(nameLen[:], uint64(len(name))) + var tagLen [binary.MaxVarintLen64]byte + tagLenLen := binary.PutUvarint(tagLen[:], uint64(len(tag))) // Encode name and tag. See reflect/type.go for details. var bits byte - l := 1 + 2 + len(name) + l := 1 + nameLenLen + len(name) if exported { bits |= 1 << 0 } if len(tag) > 0 { - l += 2 + len(tag) + l += tagLenLen + len(tag) bits |= 1 << 1 } if pkg != nil { @@ -491,14 +500,12 @@ func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported b } b := make([]byte, l) b[0] = bits - b[1] = uint8(len(name) >> 8) - b[2] = uint8(len(name)) - copy(b[3:], name) + copy(b[1:], nameLen[:nameLenLen]) + copy(b[1+nameLenLen:], name) if len(tag) > 0 { - tb := b[3+len(name):] - tb[0] = uint8(len(tag) >> 8) - tb[1] = uint8(len(tag)) - copy(tb[2:], tag) + tb := b[1+nameLenLen+len(name):] + copy(tb, tagLen[:tagLenLen]) + copy(tb[tagLenLen:], tag) } ot = int(s.WriteBytes(base.Ctxt, int64(ot), b)) @@ -594,7 +601,7 @@ func typePkg(t *types.Type) *types.Pkg { } } } - if tsym != nil && t != types.Types[t.Kind()] && t != types.ErrorType { + if tsym != nil && tsym.Pkg != types.BuiltinPkg { return tsym.Pkg } return nil @@ -693,7 +700,8 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { sptr = writeType(tptr) } - gcsym, useGCProg, ptrdata := dgcsym(t) + gcsym, useGCProg, ptrdata := dgcsym(t, true) + delete(gcsymset, t) // ../../../../reflect/type.go:/^type.rtype // actual type structure @@ -1320,6 +1328,16 @@ func WriteRuntimeTypes() { } } } + + // Emit GC data symbols. + gcsyms := make([]typeAndStr, 0, len(gcsymset)) + for t := range gcsymset { + gcsyms = append(gcsyms, typeAndStr{t: t, short: types.TypeSymName(t), regular: t.String()}) + } + sort.Sort(typesByString(gcsyms)) + for _, ts := range gcsyms { + dgcsym(ts.t, true) + } } func WriteTabs() { @@ -1338,7 +1356,7 @@ func WriteTabs() { o = objw.Uint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash o += 4 // skip unused field for _, fn := range genfun(i.t, i.itype) { - o = objw.SymPtr(i.lsym, o, fn, 0) // method pointer for each method + o = objw.SymPtrWeak(i.lsym, o, fn, 0) // method pointer for each method } // Nothing writes static itabs, so they are read only. objw.Global(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA)) @@ -1489,31 +1507,46 @@ func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // const maxPtrmaskBytes = 2048 -// dgcsym emits and returns a data symbol containing GC information for type t, -// along with a boolean reporting whether the UseGCProg bit should be set in -// the type kind, and the ptrdata field to record in the reflect type information. -func dgcsym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) { +// GCSym returns a data symbol containing GC information for type t, along +// with a boolean reporting whether the UseGCProg bit should be set in the +// type kind, and the ptrdata field to record in the reflect type information. +// GCSym may be called in concurrent backend, so it does not emit the symbol +// content. +func GCSym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) { + // Record that we need to emit the GC symbol. + gcsymmu.Lock() + if _, ok := gcsymset[t]; !ok { + gcsymset[t] = struct{}{} + } + gcsymmu.Unlock() + + return dgcsym(t, false) +} + +// dgcsym returns a data symbol containing GC information for type t, along +// with a boolean reporting whether the UseGCProg bit should be set in the +// type kind, and the ptrdata field to record in the reflect type information. +// When write is true, it writes the symbol data. +func dgcsym(t *types.Type, write bool) (lsym *obj.LSym, useGCProg bool, ptrdata int64) { ptrdata = types.PtrDataSize(t) if ptrdata/int64(types.PtrSize) <= maxPtrmaskBytes*8 { - lsym = dgcptrmask(t) + lsym = dgcptrmask(t, write) return } useGCProg = true - lsym, ptrdata = dgcprog(t) + lsym, ptrdata = dgcprog(t, write) return } // dgcptrmask emits and returns the symbol containing a pointer mask for type t. -func dgcptrmask(t *types.Type) *obj.LSym { +func dgcptrmask(t *types.Type, write bool) *obj.LSym { ptrmask := make([]byte, (types.PtrDataSize(t)/int64(types.PtrSize)+7)/8) fillptrmask(t, ptrmask) - p := fmt.Sprintf("gcbits.%x", ptrmask) + p := fmt.Sprintf("runtime.gcbits.%x", ptrmask) - sym := ir.Pkgs.Runtime.Lookup(p) - lsym := sym.Linksym() - if !sym.Uniq() { - sym.SetUniq(true) + lsym := base.Ctxt.Lookup(p) + if write && !lsym.OnList() { for i, x := range ptrmask { objw.Uint8(lsym, i, x) } @@ -1525,7 +1558,7 @@ func dgcptrmask(t *types.Type) *obj.LSym { // fillptrmask fills in ptrmask with 1s corresponding to the // word offsets in t that hold pointers. -// ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits. +// ptrmask is assumed to fit at least types.PtrDataSize(t)/PtrSize bits. func fillptrmask(t *types.Type, ptrmask []byte) { for i := range ptrmask { ptrmask[i] = 0 @@ -1546,17 +1579,18 @@ func fillptrmask(t *types.Type, ptrmask []byte) { } // dgcprog emits and returns the symbol containing a GC program for type t -// along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]). -// In practice, the size is typeptrdata(t) except for non-trivial arrays. +// along with the size of the data described by the program (in the range +// [types.PtrDataSize(t), t.Width]). +// In practice, the size is types.PtrDataSize(t) except for non-trivial arrays. // For non-trivial arrays, the program describes the full t.Width size. -func dgcprog(t *types.Type) (*obj.LSym, int64) { +func dgcprog(t *types.Type, write bool) (*obj.LSym, int64) { types.CalcSize(t) if t.Width == types.BADWIDTH { base.Fatalf("dgcprog: %v badwidth", t) } lsym := TypeLinksymPrefix(".gcprog", t) var p gcProg - p.init(lsym) + p.init(lsym, write) p.emit(t, 0) offset := p.w.BitIndex() * int64(types.PtrSize) p.end() @@ -1570,11 +1604,17 @@ type gcProg struct { lsym *obj.LSym symoff int w gcprog.Writer + write bool } -func (p *gcProg) init(lsym *obj.LSym) { +func (p *gcProg) init(lsym *obj.LSym, write bool) { p.lsym = lsym + p.write = write && !lsym.OnList() p.symoff = 4 // first 4 bytes hold program length + if !write { + p.w.Init(func(byte) {}) + return + } p.w.Init(p.writeByte) if base.Debug.GCProg > 0 { fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym) @@ -1588,8 +1628,12 @@ func (p *gcProg) writeByte(x byte) { func (p *gcProg) end() { p.w.End() + if !p.write { + return + } objw.Uint32(p.lsym, 0, uint32(p.symoff-4)) objw.Global(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) + p.lsym.Set(obj.AttrContentAddressable, true) if base.Debug.GCProg > 0 { fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym) } @@ -1740,6 +1784,9 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym { typecheck.NewFuncParams(method.Type.Params(), true), typecheck.NewFuncParams(method.Type.Results(), false)) + // TODO(austin): SelectorExpr may have created one or more + // ir.Names for these already with a nil Func field. We should + // consolidate these and always attach a Func to the Name. fn := typecheck.DeclFunc(newnam, tfn) fn.SetDupok(true) @@ -1766,7 +1813,11 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym { // the TOC to the appropriate value for that module. But if it returns // directly to the wrapper's caller, nothing will reset it to the correct // value for that function. - if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) { + // + // Disable tailcall for RegabiArgs for now. The IR does not connect the + // arguments with the OTAILCALL node, and the arguments are not marshaled + // correctly. + if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) && !buildcfg.Experiment.RegabiArgs { // generate tail call: adjust pointer receiver and jump to embedded method. left := dot.X // skip final .M if !left.Type().IsPtr() { @@ -1828,12 +1879,16 @@ func MarkTypeUsedInInterface(t *types.Type, from *obj.LSym) { // MarkUsedIfaceMethod marks that an interface method is used in the current // function. n is OCALLINTER node. func MarkUsedIfaceMethod(n *ir.CallExpr) { + // skip unnamed functions (func _()) + if ir.CurFunc.LSym == nil { + return + } dot := n.X.(*ir.SelectorExpr) ityp := dot.X.Type() tsym := TypeLinksym(ityp) r := obj.Addrel(ir.CurFunc.LSym) r.Sym = tsym - // dot.Xoffset is the method index * Widthptr (the offset of code pointer + // dot.Xoffset is the method index * PtrSize (the offset of code pointer // in itab). midx := dot.Offset() / int64(types.PtrSize) r.Add = InterfaceMethodOffset(ityp, midx) diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go index 70c29a4b7b3..64a9b3b33b9 100644 --- a/src/cmd/compile/internal/riscv64/ssa.go +++ b/src/cmd/compile/internal/riscv64/ssa.go @@ -211,9 +211,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = rd case ssa.OpRISCV64MOVDnop: - if v.Reg() != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } // nothing to do case ssa.OpLoadReg: if v.Type.IsFlags() { @@ -304,7 +301,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.Reg = v.Args[0].Reg() p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() - case ssa.OpRISCV64MOVBconst, ssa.OpRISCV64MOVHconst, ssa.OpRISCV64MOVWconst, ssa.OpRISCV64MOVDconst: + case ssa.OpRISCV64MOVDconst: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt @@ -513,6 +510,14 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p6 := s.Prog(obj.ANOP) p2.To.SetTarget(p6) + case ssa.OpRISCV64LoweredAtomicAnd32, ssa.OpRISCV64LoweredAtomicOr32: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + p.RegTo2 = riscv.REG_ZERO + case ssa.OpRISCV64LoweredZero: mov, sz := largestMove(v.AuxInt) @@ -624,6 +629,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Sym = ir.Syms.Duffcopy p.To.Offset = v.AuxInt + case ssa.OpClobber, ssa.OpClobberReg: + // TODO: implement for clobberdead experiment. Nop is ok for now. + default: v.Fatalf("Unhandled op %v", v.Op) } diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go index d4c7a286e26..ddc05b36add 100644 --- a/src/cmd/compile/internal/s390x/ssa.go +++ b/src/cmd/compile/internal/s390x/ssa.go @@ -175,10 +175,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.Reg = r1 } case ssa.OpS390XRXSBG: - r1 := v.Reg() - if r1 != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } r2 := v.Args[1].Reg() i := v.Aux.(s390x.RotateParams) p := s.Prog(v.Op.Asm()) @@ -188,7 +184,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { {Type: obj.TYPE_CONST, Offset: int64(i.Amount)}, {Type: obj.TYPE_REG, Reg: r2}, }) - p.To = obj.Addr{Type: obj.TYPE_REG, Reg: r1} + p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()} case ssa.OpS390XRISBGZ: r1 := v.Reg() r2 := v.Args[0].Reg() @@ -233,12 +229,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.Reg = r2 } case ssa.OpS390XADDE, ssa.OpS390XSUBE: - r1 := v.Reg0() - if r1 != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } r2 := v.Args[1].Reg() - opregreg(s, v.Op.Asm(), r1, r2) + opregreg(s, v.Op.Asm(), v.Reg0(), r2) case ssa.OpS390XADDCconst: r1 := v.Reg0() r3 := v.Args[0].Reg() @@ -248,18 +240,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { case ssa.OpS390XMULLD, ssa.OpS390XMULLW, ssa.OpS390XMULHD, ssa.OpS390XMULHDU, ssa.OpS390XFMULS, ssa.OpS390XFMUL, ssa.OpS390XFDIVS, ssa.OpS390XFDIV: - r := v.Reg() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } - opregreg(s, v.Op.Asm(), r, v.Args[1].Reg()) + opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg()) case ssa.OpS390XFSUBS, ssa.OpS390XFSUB, ssa.OpS390XFADDS, ssa.OpS390XFADD: - r := v.Reg0() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } - opregreg(s, v.Op.Asm(), r, v.Args[1].Reg()) + opregreg(s, v.Op.Asm(), v.Reg0(), v.Args[1].Reg()) case ssa.OpS390XMLGR: // MLGR Rx R3 -> R2:R3 r0 := v.Args[0].Reg() @@ -274,10 +258,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG case ssa.OpS390XFMADD, ssa.OpS390XFMADDS, ssa.OpS390XFMSUB, ssa.OpS390XFMSUBS: - r := v.Reg() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } r1 := v.Args[1].Reg() r2 := v.Args[2].Reg() p := s.Prog(v.Op.Asm()) @@ -285,7 +265,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.From.Reg = r1 p.Reg = r2 p.To.Type = obj.TYPE_REG - p.To.Reg = r + p.To.Reg = v.Reg() case ssa.OpS390XFIDBR: switch v.AuxInt { case 0, 1, 3, 4, 5, 6, 7: @@ -361,15 +341,11 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssa.OpS390XANDconst, ssa.OpS390XANDWconst, ssa.OpS390XORconst, ssa.OpS390XORWconst, ssa.OpS390XXORconst, ssa.OpS390XXORWconst: - r := v.Reg() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG - p.To.Reg = r + p.To.Reg = v.Reg() case ssa.OpS390XSLDconst, ssa.OpS390XSLWconst, ssa.OpS390XSRDconst, ssa.OpS390XSRWconst, ssa.OpS390XSRADconst, ssa.OpS390XSRAWconst, @@ -441,16 +417,12 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssa.OpS390XANDWload, ssa.OpS390XANDload, ssa.OpS390XORWload, ssa.OpS390XORload, ssa.OpS390XXORWload, ssa.OpS390XXORload: - r := v.Reg() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[1].Reg() ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG - p.To.Reg = r + p.To.Reg = v.Reg() case ssa.OpS390XMOVDload, ssa.OpS390XMOVWZload, ssa.OpS390XMOVHZload, ssa.OpS390XMOVBZload, ssa.OpS390XMOVDBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVHBRload, @@ -508,10 +480,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST sc := v.AuxValAndOff() - p.From.Offset = sc.Val() + p.From.Offset = sc.Val64() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - ssagen.AddAux2(&p.To, v, sc.Off()) + ssagen.AddAux2(&p.To, v, sc.Off64()) case ssa.OpS390XMOVBreg, ssa.OpS390XMOVHreg, ssa.OpS390XMOVWreg, ssa.OpS390XMOVBZreg, ssa.OpS390XMOVHZreg, ssa.OpS390XMOVWZreg, ssa.OpS390XLDGR, ssa.OpS390XLGDR, @@ -527,10 +499,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST sc := v.AuxValAndOff() - p.From.Offset = sc.Val() + p.From.Offset = sc.Val64() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - ssagen.AddAux2(&p.To, v, sc.Off()) + ssagen.AddAux2(&p.To, v, sc.Off64()) case ssa.OpCopy: if v.Type.IsMemory() { return @@ -608,17 +580,13 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { case ssa.OpS390XSumBytes2, ssa.OpS390XSumBytes4, ssa.OpS390XSumBytes8: v.Fatalf("SumBytes generated %s", v.LongString()) case ssa.OpS390XLOCGR: - r := v.Reg() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = int64(v.Aux.(s390x.CCMask)) p.Reg = v.Args[1].Reg() p.To.Type = obj.TYPE_REG - p.To.Reg = r - case ssa.OpS390XFSQRT: + p.To.Reg = v.Reg() + case ssa.OpS390XFSQRTS, ssa.OpS390XFSQRT: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() @@ -650,15 +618,15 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { vo := v.AuxValAndOff() p := s.Prog(s390x.AMVC) p.From.Type = obj.TYPE_CONST - p.From.Offset = vo.Val() + p.From.Offset = vo.Val64() p.SetFrom3(obj.Addr{ Type: obj.TYPE_MEM, Reg: v.Args[1].Reg(), - Offset: vo.Off(), + Offset: vo.Off64(), }) p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - p.To.Offset = vo.Off() + p.To.Offset = vo.Off64() case ssa.OpS390XSTMG2, ssa.OpS390XSTMG3, ssa.OpS390XSTMG4, ssa.OpS390XSTM2, ssa.OpS390XSTM3, ssa.OpS390XSTM4: for i := 2; i < len(v.Args)-1; i++ { @@ -876,7 +844,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { bne.To.SetTarget(cs) case ssa.OpS390XSYNC: s.Prog(s390x.ASYNC) - case ssa.OpClobber: + case ssa.OpClobber, ssa.OpClobberReg: // TODO: implement for clobberdead experiment. Nop is ok for now. default: v.Fatalf("genValue not implemented: %s", v.LongString()) @@ -926,7 +894,7 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { p.From.Type = obj.TYPE_CONST p.From.Offset = int64(s390x.NotEqual & s390x.NotUnordered) // unordered is not possible p.Reg = s390x.REG_R3 - p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: 0}) + p.SetFrom3Const(0) if b.Succs[0].Block() != next { s.Br(s390x.ABR, b.Succs[0].Block()) } @@ -969,17 +937,17 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { p.From.Type = obj.TYPE_CONST p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible p.Reg = b.Controls[0].Reg() - p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: b.Controls[1].Reg()}) + p.SetFrom3Reg(b.Controls[1].Reg()) case ssa.BlockS390XCGIJ, ssa.BlockS390XCIJ: p.From.Type = obj.TYPE_CONST p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible p.Reg = b.Controls[0].Reg() - p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(int8(b.AuxInt))}) + p.SetFrom3Const(int64(int8(b.AuxInt))) case ssa.BlockS390XCLGIJ, ssa.BlockS390XCLIJ: p.From.Type = obj.TYPE_CONST p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible p.Reg = b.Controls[0].Reg() - p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(uint8(b.AuxInt))}) + p.SetFrom3Const(int64(uint8(b.AuxInt))) default: b.Fatalf("branch not implemented: %s", b.LongString()) } diff --git a/src/cmd/compile/internal/ssa/README.md b/src/cmd/compile/internal/ssa/README.md index 4483c2c85f3..833bf1ddc9f 100644 --- a/src/cmd/compile/internal/ssa/README.md +++ b/src/cmd/compile/internal/ssa/README.md @@ -184,6 +184,19 @@ compile passes, making it easy to see what each pass does to a particular program. You can also click on values and blocks to highlight them, to help follow the control flow and values. +The value specified in GOSSAFUNC can also be a package-qualified function +name, e.g. + + GOSSAFUNC=blah.Foo go build + +This will match any function named "Foo" within a package whose final +suffix is "blah" (e.g. something/blah.Foo, anotherthing/extra/blah.Foo). + +If non-HTML dumps are needed, append a "+" to the GOSSAFUNC value +and dumps will be written to stdout: + + GOSSAFUNC=Bar+ go build + diff --git a/src/cmd/compile/internal/ssa/bench_test.go b/src/cmd/compile/internal/ssa/bench_test.go new file mode 100644 index 00000000000..09716675071 --- /dev/null +++ b/src/cmd/compile/internal/ssa/bench_test.go @@ -0,0 +1,32 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package ssa + +import ( + "math/rand" + "testing" +) + +var d int + +//go:noinline +func fn(a, b int) bool { + c := false + if a > 0 { + if b < 0 { + d = d + 1 + } + c = true + } + return c +} + +func BenchmarkPhioptPass(b *testing.B) { + for i := 0; i < b.N; i++ { + a := rand.Perm(i/10 + 10) + for i := 1; i < len(a)/2; i++ { + fn(a[i]-a[i-1], a[i+len(a)/2-2]-a[i+len(a)/2-1]) + } + } +} diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go index 937c757b215..71ca774431e 100644 --- a/src/cmd/compile/internal/ssa/block.go +++ b/src/cmd/compile/internal/ssa/block.go @@ -358,6 +358,22 @@ func (b *Block) AuxIntString() string { } } +// likelyBranch reports whether block b is the likely branch of all of its predecessors. +func (b *Block) likelyBranch() bool { + if len(b.Preds) == 0 { + return false + } + for _, e := range b.Preds { + p := e.b + if len(p.Succs) == 1 || len(p.Succs) == 2 && (p.Likely == BranchLikely && p.Succs[0].b == b || + p.Likely == BranchUnlikely && p.Succs[1].b == b) { + continue + } + return false + } + return true +} + func (b *Block) Logf(msg string, args ...interface{}) { b.Func.Logf(msg, args...) } func (b *Block) Log() bool { return b.Func.Log() } func (b *Block) Fatalf(msg string, args ...interface{}) { b.Func.Fatalf(msg, args...) } diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go index 9e4aa6cd79f..969fd96dbf5 100644 --- a/src/cmd/compile/internal/ssa/check.go +++ b/src/cmd/compile/internal/ssa/check.go @@ -182,6 +182,12 @@ func checkFunc(f *Func) { f.Fatalf("value %v has Aux type %T, want *AuxCall", v, v.Aux) } canHaveAux = true + case auxNameOffsetInt8: + if _, ok := v.Aux.(*AuxNameOffset); !ok { + f.Fatalf("value %v has Aux type %T, want *AuxNameOffset", v, v.Aux) + } + canHaveAux = true + canHaveAuxInt = true case auxSym, auxTyp: canHaveAux = true case auxSymOff, auxSymValAndOff, auxTypSize: diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go index c267274366b..cd8eba405d5 100644 --- a/src/cmd/compile/internal/ssa/compile.go +++ b/src/cmd/compile/internal/ssa/compile.go @@ -6,10 +6,10 @@ package ssa import ( "bytes" - "cmd/internal/objabi" "cmd/internal/src" "fmt" "hash/crc32" + "internal/buildcfg" "log" "math/rand" "os" @@ -297,6 +297,11 @@ enables time reporting for all phases -d=ssa/prove/debug=2 sets debugging level to 2 in the prove pass +Be aware that when "/debug=X" is applied to a pass, some passes +will emit debug output for all functions, and other passes will +only emit debug output for functions that match the current +GOSSAFUNC value. + Multiple flags can be passed at once, by separating them with commas. For example: @@ -454,7 +459,7 @@ var passes = [...]pass{ {name: "dse", fn: dse}, {name: "writebarrier", fn: writebarrier, required: true}, // expand write barrier ops {name: "insert resched checks", fn: insertLoopReschedChecks, - disabled: objabi.Preemptibleloops_enabled == 0}, // insert resched checks in loops. + disabled: !buildcfg.Experiment.PreemptibleLoops}, // insert resched checks in loops. {name: "lower", fn: lower, required: true}, {name: "addressing modes", fn: addressingModes, required: false}, {name: "lowered deadcode for cse", fn: deadcode}, // deadcode immediately before CSE avoids CSE making dead values live again diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index c29bc8fae6f..a8393a19995 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -5,12 +5,12 @@ package ssa import ( - "cmd/compile/internal/base" + "cmd/compile/internal/abi" "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/obj" - "cmd/internal/objabi" "cmd/internal/src" + "internal/buildcfg" ) // A Config holds readonly compilation information. @@ -21,30 +21,33 @@ type Config struct { PtrSize int64 // 4 or 8; copy of cmd/internal/sys.Arch.PtrSize RegSize int64 // 4 or 8; copy of cmd/internal/sys.Arch.RegSize Types Types - lowerBlock blockRewriter // lowering function - lowerValue valueRewriter // lowering function - splitLoad valueRewriter // function for splitting merged load ops; only used on some architectures - registers []Register // machine registers - gpRegMask regMask // general purpose integer register mask - fpRegMask regMask // floating point register mask - fp32RegMask regMask // floating point register mask - fp64RegMask regMask // floating point register mask - specialRegMask regMask // special register mask - GCRegMap []*Register // garbage collector register map, by GC register index - FPReg int8 // register number of frame pointer, -1 if not used - LinkReg int8 // register number of link register if it is a general purpose register, -1 if not used - hasGReg bool // has hardware g register - ctxt *obj.Link // Generic arch information - optimize bool // Do optimization - noDuffDevice bool // Don't use Duff's device - useSSE bool // Use SSE for non-float operations - useAvg bool // Use optimizations that need Avg* operations - useHmul bool // Use optimizations that need Hmul* operations - SoftFloat bool // - Race bool // race detector enabled - NeedsFpScratch bool // No direct move between GP and FP register sets - BigEndian bool // - UseFMA bool // Use hardware FMA operation + lowerBlock blockRewriter // lowering function + lowerValue valueRewriter // lowering function + splitLoad valueRewriter // function for splitting merged load ops; only used on some architectures + registers []Register // machine registers + gpRegMask regMask // general purpose integer register mask + fpRegMask regMask // floating point register mask + fp32RegMask regMask // floating point register mask + fp64RegMask regMask // floating point register mask + specialRegMask regMask // special register mask + intParamRegs []int8 // register numbers of integer param (in/out) registers + floatParamRegs []int8 // register numbers of floating param (in/out) registers + ABI1 *abi.ABIConfig // "ABIInternal" under development // TODO change comment when this becomes current + ABI0 *abi.ABIConfig + GCRegMap []*Register // garbage collector register map, by GC register index + FPReg int8 // register number of frame pointer, -1 if not used + LinkReg int8 // register number of link register if it is a general purpose register, -1 if not used + hasGReg bool // has hardware g register + ctxt *obj.Link // Generic arch information + optimize bool // Do optimization + noDuffDevice bool // Don't use Duff's device + useSSE bool // Use SSE for non-float operations + useAvg bool // Use optimizations that need Avg* operations + useHmul bool // Use optimizations that need Hmul* operations + SoftFloat bool // + Race bool // race detector enabled + BigEndian bool // + UseFMA bool // Use hardware FMA operation } type ( @@ -144,13 +147,6 @@ type Frontend interface { // Given the name for a compound type, returns the name we should use // for the parts of that compound type. - SplitString(LocalSlot) (LocalSlot, LocalSlot) - SplitInterface(LocalSlot) (LocalSlot, LocalSlot) - SplitSlice(LocalSlot) (LocalSlot, LocalSlot, LocalSlot) - SplitComplex(LocalSlot) (LocalSlot, LocalSlot) - SplitStruct(LocalSlot, int) LocalSlot - SplitArray(LocalSlot) LocalSlot // array must be length 1 - SplitInt64(LocalSlot) (LocalSlot, LocalSlot) // returns (hi, lo) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot // DerefItab dereferences an itab function @@ -196,9 +192,11 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config c.gpRegMask = gpRegMaskAMD64 c.fpRegMask = fpRegMaskAMD64 c.specialRegMask = specialRegMaskAMD64 + c.intParamRegs = paramIntRegAMD64 + c.floatParamRegs = paramFloatRegAMD64 c.FPReg = framepointerRegAMD64 c.LinkReg = linkRegAMD64 - c.hasGReg = base.Flag.ABIWrap + c.hasGReg = buildcfg.Experiment.RegabiG case "386": c.PtrSize = 4 c.RegSize = 4 @@ -233,7 +231,7 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config c.FPReg = framepointerRegARM64 c.LinkReg = linkRegARM64 c.hasGReg = true - c.noDuffDevice = objabi.GOOS == "darwin" || objabi.GOOS == "ios" // darwin linker cannot handle BR26 reloc with non-zero addend + c.noDuffDevice = buildcfg.GOOS == "darwin" || buildcfg.GOOS == "ios" // darwin linker cannot handle BR26 reloc with non-zero addend case "ppc64": c.BigEndian = true fallthrough @@ -327,8 +325,11 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config c.useSSE = true c.UseFMA = true + c.ABI0 = abi.NewABIConfig(0, 0, ctxt.FixedFrameSize()) + c.ABI1 = abi.NewABIConfig(len(c.intParamRegs), len(c.floatParamRegs), ctxt.FixedFrameSize()) + // On Plan 9, floating point operations are not allowed in note handler. - if objabi.GOOS == "plan9" { + if buildcfg.GOOS == "plan9" { // Don't use FMA on Plan 9 c.UseFMA = false diff --git a/src/cmd/compile/internal/ssa/copyelim.go b/src/cmd/compile/internal/ssa/copyelim.go index 5954d3bec8e..17f65127ee0 100644 --- a/src/cmd/compile/internal/ssa/copyelim.go +++ b/src/cmd/compile/internal/ssa/copyelim.go @@ -26,7 +26,7 @@ func copyelim(f *Func) { // Update named values. for _, name := range f.Names { - values := f.NamedValues[name] + values := f.NamedValues[*name] for i, v := range values { if v.Op == OpCopy { values[i] = v.Args[0] diff --git a/src/cmd/compile/internal/ssa/cse.go b/src/cmd/compile/internal/ssa/cse.go index f78527410c8..ade5e0648e7 100644 --- a/src/cmd/compile/internal/ssa/cse.go +++ b/src/cmd/compile/internal/ssa/cse.go @@ -299,7 +299,7 @@ func cmpVal(v, w *Value, auxIDs auxmap) types.Cmp { // OpSelect is a pseudo-op. We need to be more aggressive // regarding CSE to keep multiple OpSelect's of the same // argument from existing. - if v.Op != OpSelect0 && v.Op != OpSelect1 { + if v.Op != OpSelect0 && v.Op != OpSelect1 && v.Op != OpSelectN { if tc := v.Type.Compare(w.Type); tc != types.CMPeq { return tc } diff --git a/src/cmd/compile/internal/ssa/deadcode.go b/src/cmd/compile/internal/ssa/deadcode.go index 96b552ecf3b..5d10dfe025e 100644 --- a/src/cmd/compile/internal/ssa/deadcode.go +++ b/src/cmd/compile/internal/ssa/deadcode.go @@ -223,7 +223,7 @@ func deadcode(f *Func) { for _, name := range f.Names { j := 0 s.clear() - values := f.NamedValues[name] + values := f.NamedValues[*name] for _, v := range values { if live[v.ID] && !s.contains(v.ID) { values[j] = v @@ -232,19 +232,19 @@ func deadcode(f *Func) { } } if j == 0 { - delete(f.NamedValues, name) + delete(f.NamedValues, *name) } else { f.Names[i] = name i++ for k := len(values) - 1; k >= j; k-- { values[k] = nil } - f.NamedValues[name] = values[:j] + f.NamedValues[*name] = values[:j] } } clearNames := f.Names[i:] for j := range clearNames { - clearNames[j] = LocalSlot{} + clearNames[j] = nil } f.Names = f.Names[:i] diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go index 31d3f62d4e7..d694133ec3b 100644 --- a/src/cmd/compile/internal/ssa/deadstore.go +++ b/src/cmd/compile/internal/ssa/deadstore.go @@ -201,8 +201,9 @@ func elimDeadAutosGeneric(f *Func) { panic("unhandled op with sym effect") } - if v.Uses == 0 && v.Op != OpNilCheck || len(args) == 0 { + if v.Uses == 0 && v.Op != OpNilCheck && !v.Op.IsCall() && !v.Op.HasSideEffects() || len(args) == 0 { // Nil check has no use, but we need to keep it. + // Also keep calls and values that have side effects. return } diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go index 68b6ab5fe9e..a2c2a2d98e8 100644 --- a/src/cmd/compile/internal/ssa/debug.go +++ b/src/cmd/compile/internal/ssa/debug.go @@ -5,6 +5,7 @@ package ssa import ( + "cmd/compile/internal/abi" "cmd/compile/internal/ir" "cmd/internal/dwarf" "cmd/internal/obj" @@ -152,6 +153,12 @@ var BlockEnd = &Value{ Aux: StringToAux("BlockEnd"), } +var FuncEnd = &Value{ + ID: -30000, + Op: OpInvalid, + Aux: StringToAux("FuncEnd"), +} + // RegisterSet is a bitmap of registers, indexed by Register.num. type RegisterSet uint64 @@ -360,12 +367,12 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu state.slots = state.slots[:0] state.vars = state.vars[:0] for i, slot := range f.Names { - state.slots = append(state.slots, slot) + state.slots = append(state.slots, *slot) if ir.IsSynthetic(slot.N) { continue } - topSlot := &slot + topSlot := slot for topSlot.SplitOf != nil { topSlot = topSlot.SplitOf } @@ -429,7 +436,7 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu if ir.IsSynthetic(slot.N) { continue } - for _, value := range f.NamedValues[slot] { + for _, value := range f.NamedValues[*slot] { state.valueNames[value.ID] = append(state.valueNames[value.ID], SlotID(i)) } } @@ -901,10 +908,10 @@ func (state *debugState) buildLocationLists(blockLocs []*BlockDebug) { if opcodeTable[v.Op].zeroWidth { if changed { - if v.Op == OpArg || v.Op == OpPhi || v.Op.isLoweredGetClosurePtr() { + if hasAnyArgOp(v) || v.Op == OpPhi || v.Op.isLoweredGetClosurePtr() { // These ranges begin at true beginning of block, not after first instruction if zeroWidthPending { - b.Func.Fatalf("Unexpected op mixed with OpArg/OpPhi/OpLoweredGetClosurePtr at beginning of block %s in %s\n%s", b, b.Func.Name, b.Func) + panic(fmt.Errorf("Unexpected op '%s' mixed with OpArg/OpPhi/OpLoweredGetClosurePtr at beginning of block %s in %s\n%s", v.LongString(), b, b.Func.Name, b.Func)) } apcChangedSize = len(state.changedVars.contents()) continue @@ -948,7 +955,7 @@ func (state *debugState) buildLocationLists(blockLocs []*BlockDebug) { // Flush any leftover entries live at the end of the last block. for varID := range state.lists { - state.writePendingEntry(VarID(varID), state.f.Blocks[len(state.f.Blocks)-1].ID, BlockEnd.ID) + state.writePendingEntry(VarID(varID), state.f.Blocks[len(state.f.Blocks)-1].ID, FuncEnd.ID) list := state.lists[varID] if state.loggingEnabled { if len(list) == 0 { @@ -1118,8 +1125,11 @@ func (debugInfo *FuncDebug) PutLocationList(list []byte, ctxt *obj.Link, listSym listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, 0) } -// Pack a value and block ID into an address-sized uint, returning ~0 if they -// don't fit. +// Pack a value and block ID into an address-sized uint, returning encoded +// value and boolean indicating whether the encoding succeeded. For +// 32-bit architectures the process may fail for very large procedures +// (the theory being that it's ok to have degraded debug quality in +// this case). func encodeValue(ctxt *obj.Link, b, v ID) (uint64, bool) { if ctxt.Arch.PtrSize == 8 { result := uint64(b)<<32 | uint64(uint32(v)) @@ -1186,3 +1196,276 @@ func readPtr(ctxt *obj.Link, buf []byte) uint64 { } } + +// setupLocList creates the initial portion of a location list for a +// user variable. It emits the encoded start/end of the range and a +// placeholder for the size. Return value is the new list plus the +// slot in the list holding the size (to be updated later). +func setupLocList(ctxt *obj.Link, f *Func, list []byte, st, en ID) ([]byte, int) { + start, startOK := encodeValue(ctxt, f.Entry.ID, st) + end, endOK := encodeValue(ctxt, f.Entry.ID, en) + if !startOK || !endOK { + // This could happen if someone writes a function that uses + // >65K values on a 32-bit platform. Hopefully a degraded debugging + // experience is ok in that case. + return nil, 0 + } + list = appendPtr(ctxt, list, start) + list = appendPtr(ctxt, list, end) + + // Where to write the length of the location description once + // we know how big it is. + sizeIdx := len(list) + list = list[:len(list)+2] + return list, sizeIdx +} + +// locatePrologEnd walks the entry block of a function with incoming +// register arguments and locates the last instruction in the prolog +// that spills a register arg. It returns the ID of that instruction +// Example: +// +// b1: +// v3 = ArgIntReg {p1+0} [0] : AX +// ... more arg regs .. +// v4 = ArgFloatReg {f1+0} [0] : X0 +// v52 = MOVQstore {p1} v2 v3 v1 +// ... more stores ... +// v68 = MOVSSstore {f4} v2 v67 v66 +// v38 = MOVQstoreconst {blob} [val=0,off=0] v2 v32 +// +// Important: locatePrologEnd is expected to work properly only with +// optimization turned off (e.g. "-N"). If optimization is enabled +// we can't be assured of finding all input arguments spilled in the +// entry block prolog. +func locatePrologEnd(f *Func) ID { + + // returns true if this instruction looks like it moves an ABI + // register to the stack, along with the value being stored. + isRegMoveLike := func(v *Value) (bool, ID) { + n, ok := v.Aux.(*ir.Name) + var r ID + if !ok || n.Class != ir.PPARAM { + return false, r + } + regInputs, memInputs, spInputs := 0, 0, 0 + for _, a := range v.Args { + if a.Op == OpArgIntReg || a.Op == OpArgFloatReg { + regInputs++ + r = a.ID + } else if a.Type.IsMemory() { + memInputs++ + } else if a.Op == OpSP { + spInputs++ + } else { + return false, r + } + } + return v.Type.IsMemory() && memInputs == 1 && + regInputs == 1 && spInputs == 1, r + } + + // OpArg*Reg values we've seen so far on our forward walk, + // for which we have not yet seen a corresponding spill. + regArgs := make([]ID, 0, 32) + + // removeReg tries to remove a value from regArgs, returning true + // if found and removed, or false otherwise. + removeReg := func(r ID) bool { + for i := 0; i < len(regArgs); i++ { + if regArgs[i] == r { + regArgs = append(regArgs[:i], regArgs[i+1:]...) + return true + } + } + return false + } + + // Walk forwards through the block. When we see OpArg*Reg, record + // the value it produces in the regArgs list. When see a store that uses + // the value, remove the entry. When we hit the last store (use) + // then we've arrived at the end of the prolog. + for k, v := range f.Entry.Values { + if v.Op == OpArgIntReg || v.Op == OpArgFloatReg { + regArgs = append(regArgs, v.ID) + continue + } + if ok, r := isRegMoveLike(v); ok { + if removed := removeReg(r); removed { + if len(regArgs) == 0 { + // Found our last spill; return the value after + // it. Note that it is possible that this spill is + // the last instruction in the block. If so, then + // return the "end of block" sentinel. + if k < len(f.Entry.Values)-1 { + return f.Entry.Values[k+1].ID + } + return BlockEnd.ID + } + } + } + if v.Op.IsCall() { + // if we hit a call, we've gone too far. + return v.ID + } + } + // nothing found + return ID(-1) +} + +// isNamedRegParam returns true if the param corresponding to "p" +// is a named, non-blank input parameter assigned to one or more +// registers. +func isNamedRegParam(p abi.ABIParamAssignment) bool { + if p.Name == nil { + return false + } + n := p.Name.(*ir.Name) + if n.Sym() == nil || n.Sym().IsBlank() { + return false + } + if len(p.Registers) == 0 { + return false + } + return true +} + +// BuildFuncDebugNoOptimized constructs a FuncDebug object with +// entries corresponding to the register-resident input parameters for +// the function "f"; it is used when we are compiling without +// optimization but the register ABI is enabled. For each reg param, +// it constructs a 2-element location list: the first element holds +// the input register, and the second element holds the stack location +// of the param (the assumption being that when optimization is off, +// each input param reg will be spilled in the prolog. +func BuildFuncDebugNoOptimized(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset func(LocalSlot) int32) *FuncDebug { + fd := FuncDebug{} + + pri := f.ABISelf.ABIAnalyzeFuncType(f.Type.FuncType()) + + // Look to see if we have any named register-promoted parameters. + // If there are none, bail early and let the caller sort things + // out for the remainder of the params/locals. + numRegParams := 0 + for _, inp := range pri.InParams() { + if isNamedRegParam(inp) { + numRegParams++ + } + } + if numRegParams == 0 { + return &fd + } + + state := debugState{f: f} + + if loggingEnabled { + state.logf("generating -N reg param loc lists for func %q\n", f.Name) + } + + // Allocate location lists. + fd.LocationLists = make([][]byte, numRegParams) + + // Locate the value corresponding to the last spill of + // an input register. + afterPrologVal := locatePrologEnd(f) + + // Walk the input params again and process the register-resident elements. + pidx := 0 + for _, inp := range pri.InParams() { + if !isNamedRegParam(inp) { + // will be sorted out elsewhere + continue + } + + n := inp.Name.(*ir.Name) + sl := LocalSlot{N: n, Type: inp.Type, Off: 0} + fd.Vars = append(fd.Vars, n) + fd.Slots = append(fd.Slots, sl) + slid := len(fd.VarSlots) + fd.VarSlots = append(fd.VarSlots, []SlotID{SlotID(slid)}) + + if afterPrologVal == ID(-1) { + // This can happen for degenerate functions with infinite + // loops such as that in issue 45948. In such cases, leave + // the var/slot set up for the param, but don't try to + // emit a location list. + if loggingEnabled { + state.logf("locatePrologEnd failed, skipping %v\n", n) + } + pidx++ + continue + } + + // Param is arriving in one or more registers. We need a 2-element + // location expression for it. First entry in location list + // will correspond to lifetime in input registers. + list, sizeIdx := setupLocList(ctxt, f, fd.LocationLists[pidx], + BlockStart.ID, afterPrologVal) + if list == nil { + pidx++ + continue + } + if loggingEnabled { + state.logf("param %v:\n [, %d]:\n", n, afterPrologVal) + } + rtypes, _ := inp.RegisterTypesAndOffsets() + padding := make([]uint64, 0, 32) + padding = inp.ComputePadding(padding) + for k, r := range inp.Registers { + reg := ObjRegForAbiReg(r, f.Config) + dwreg := ctxt.Arch.DWARFRegisters[reg] + if dwreg < 32 { + list = append(list, dwarf.DW_OP_reg0+byte(dwreg)) + } else { + list = append(list, dwarf.DW_OP_regx) + list = dwarf.AppendUleb128(list, uint64(dwreg)) + } + if loggingEnabled { + state.logf(" piece %d -> dwreg %d", k, dwreg) + } + if len(inp.Registers) > 1 { + list = append(list, dwarf.DW_OP_piece) + ts := rtypes[k].Width + list = dwarf.AppendUleb128(list, uint64(ts)) + if padding[k] > 0 { + if loggingEnabled { + state.logf(" [pad %d bytes]", padding[k]) + } + list = append(list, dwarf.DW_OP_piece) + list = dwarf.AppendUleb128(list, padding[k]) + } + } + if loggingEnabled { + state.logf("\n") + } + } + // fill in length of location expression element + ctxt.Arch.ByteOrder.PutUint16(list[sizeIdx:], uint16(len(list)-sizeIdx-2)) + + // Second entry in the location list will be the stack home + // of the param, once it has been spilled. Emit that now. + list, sizeIdx = setupLocList(ctxt, f, list, + afterPrologVal, FuncEnd.ID) + if list == nil { + pidx++ + continue + } + soff := stackOffset(sl) + if soff == 0 { + list = append(list, dwarf.DW_OP_call_frame_cfa) + } else { + list = append(list, dwarf.DW_OP_fbreg) + list = dwarf.AppendSleb128(list, int64(soff)) + } + if loggingEnabled { + state.logf(" [%d, ): stackOffset=%d\n", afterPrologVal, soff) + } + + // fill in size + ctxt.Arch.ByteOrder.PutUint16(list[sizeIdx:], uint16(len(list)-sizeIdx-2)) + + fd.LocationLists[pidx] = list + pidx++ + } + return &fd +} diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go index ea988e44f61..753d69cebcd 100644 --- a/src/cmd/compile/internal/ssa/decompose.go +++ b/src/cmd/compile/internal/ssa/decompose.go @@ -24,7 +24,7 @@ func decomposeBuiltIn(f *Func) { } // Decompose other values - // Note: deadcode is false because we need to keep the original + // Note: Leave dead values because we need to keep the original // values around so the name component resolution below can still work. applyRewrite(f, rewriteBlockdec, rewriteValuedec, leaveDeadValues) if f.Config.RegSize == 4 { @@ -36,64 +36,65 @@ func decomposeBuiltIn(f *Func) { // accumulate new LocalSlots in newNames for addition after the iteration. This decomposition is for // builtin types with leaf components, and thus there is no need to reprocess the newly create LocalSlots. var toDelete []namedVal - var newNames []LocalSlot + var newNames []*LocalSlot for i, name := range f.Names { t := name.Type switch { case t.IsInteger() && t.Size() > f.Config.RegSize: - hiName, loName := f.fe.SplitInt64(name) - newNames = append(newNames, hiName, loName) - for j, v := range f.NamedValues[name] { + hiName, loName := f.SplitInt64(name) + newNames = maybeAppend2(f, newNames, hiName, loName) + for j, v := range f.NamedValues[*name] { if v.Op != OpInt64Make { continue } - f.NamedValues[hiName] = append(f.NamedValues[hiName], v.Args[0]) - f.NamedValues[loName] = append(f.NamedValues[loName], v.Args[1]) + f.NamedValues[*hiName] = append(f.NamedValues[*hiName], v.Args[0]) + f.NamedValues[*loName] = append(f.NamedValues[*loName], v.Args[1]) toDelete = append(toDelete, namedVal{i, j}) } case t.IsComplex(): - rName, iName := f.fe.SplitComplex(name) - newNames = append(newNames, rName, iName) - for j, v := range f.NamedValues[name] { + rName, iName := f.SplitComplex(name) + newNames = maybeAppend2(f, newNames, rName, iName) + for j, v := range f.NamedValues[*name] { if v.Op != OpComplexMake { continue } - f.NamedValues[rName] = append(f.NamedValues[rName], v.Args[0]) - f.NamedValues[iName] = append(f.NamedValues[iName], v.Args[1]) + f.NamedValues[*rName] = append(f.NamedValues[*rName], v.Args[0]) + f.NamedValues[*iName] = append(f.NamedValues[*iName], v.Args[1]) toDelete = append(toDelete, namedVal{i, j}) } case t.IsString(): - ptrName, lenName := f.fe.SplitString(name) - newNames = append(newNames, ptrName, lenName) - for j, v := range f.NamedValues[name] { + ptrName, lenName := f.SplitString(name) + newNames = maybeAppend2(f, newNames, ptrName, lenName) + for j, v := range f.NamedValues[*name] { if v.Op != OpStringMake { continue } - f.NamedValues[ptrName] = append(f.NamedValues[ptrName], v.Args[0]) - f.NamedValues[lenName] = append(f.NamedValues[lenName], v.Args[1]) + f.NamedValues[*ptrName] = append(f.NamedValues[*ptrName], v.Args[0]) + f.NamedValues[*lenName] = append(f.NamedValues[*lenName], v.Args[1]) toDelete = append(toDelete, namedVal{i, j}) } case t.IsSlice(): - ptrName, lenName, capName := f.fe.SplitSlice(name) - newNames = append(newNames, ptrName, lenName, capName) - for j, v := range f.NamedValues[name] { + ptrName, lenName, capName := f.SplitSlice(name) + newNames = maybeAppend2(f, newNames, ptrName, lenName) + newNames = maybeAppend(f, newNames, capName) + for j, v := range f.NamedValues[*name] { if v.Op != OpSliceMake { continue } - f.NamedValues[ptrName] = append(f.NamedValues[ptrName], v.Args[0]) - f.NamedValues[lenName] = append(f.NamedValues[lenName], v.Args[1]) - f.NamedValues[capName] = append(f.NamedValues[capName], v.Args[2]) + f.NamedValues[*ptrName] = append(f.NamedValues[*ptrName], v.Args[0]) + f.NamedValues[*lenName] = append(f.NamedValues[*lenName], v.Args[1]) + f.NamedValues[*capName] = append(f.NamedValues[*capName], v.Args[2]) toDelete = append(toDelete, namedVal{i, j}) } case t.IsInterface(): - typeName, dataName := f.fe.SplitInterface(name) - newNames = append(newNames, typeName, dataName) - for j, v := range f.NamedValues[name] { + typeName, dataName := f.SplitInterface(name) + newNames = maybeAppend2(f, newNames, typeName, dataName) + for j, v := range f.NamedValues[*name] { if v.Op != OpIMake { continue } - f.NamedValues[typeName] = append(f.NamedValues[typeName], v.Args[0]) - f.NamedValues[dataName] = append(f.NamedValues[dataName], v.Args[1]) + f.NamedValues[*typeName] = append(f.NamedValues[*typeName], v.Args[0]) + f.NamedValues[*dataName] = append(f.NamedValues[*dataName], v.Args[1]) toDelete = append(toDelete, namedVal{i, j}) } case t.IsFloat(): @@ -107,6 +108,18 @@ func decomposeBuiltIn(f *Func) { f.Names = append(f.Names, newNames...) } +func maybeAppend(f *Func, ss []*LocalSlot, s *LocalSlot) []*LocalSlot { + if _, ok := f.NamedValues[*s]; !ok { + f.NamedValues[*s] = nil + return append(ss, s) + } + return ss +} + +func maybeAppend2(f *Func, ss []*LocalSlot, s1, s2 *LocalSlot) []*LocalSlot { + return maybeAppend(f, maybeAppend(f, ss, s1), s2) +} + func decomposeBuiltInPhi(v *Value) { switch { case v.Type.IsInteger() && v.Type.Size() > v.Block.Func.Config.RegSize: @@ -230,7 +243,7 @@ func decomposeUser(f *Func) { } // Split up named values into their components. i := 0 - var newNames []LocalSlot + var newNames []*LocalSlot for _, name := range f.Names { t := name.Type switch { @@ -250,7 +263,7 @@ func decomposeUser(f *Func) { // decomposeUserArrayInto creates names for the element(s) of arrays referenced // by name where possible, and appends those new names to slots, which is then // returned. -func decomposeUserArrayInto(f *Func, name LocalSlot, slots []LocalSlot) []LocalSlot { +func decomposeUserArrayInto(f *Func, name *LocalSlot, slots []*LocalSlot) []*LocalSlot { t := name.Type if t.NumElem() == 0 { // TODO(khr): Not sure what to do here. Probably nothing. @@ -261,20 +274,20 @@ func decomposeUserArrayInto(f *Func, name LocalSlot, slots []LocalSlot) []LocalS // shouldn't get here due to CanSSA f.Fatalf("array not of size 1") } - elemName := f.fe.SplitArray(name) + elemName := f.SplitArray(name) var keep []*Value - for _, v := range f.NamedValues[name] { + for _, v := range f.NamedValues[*name] { if v.Op != OpArrayMake1 { keep = append(keep, v) continue } - f.NamedValues[elemName] = append(f.NamedValues[elemName], v.Args[0]) + f.NamedValues[*elemName] = append(f.NamedValues[*elemName], v.Args[0]) } if len(keep) == 0 { // delete the name for the array as a whole - delete(f.NamedValues, name) + delete(f.NamedValues, *name) } else { - f.NamedValues[name] = keep + f.NamedValues[*name] = keep } if t.Elem().IsArray() { @@ -289,38 +302,38 @@ func decomposeUserArrayInto(f *Func, name LocalSlot, slots []LocalSlot) []LocalS // decomposeUserStructInto creates names for the fields(s) of structs referenced // by name where possible, and appends those new names to slots, which is then // returned. -func decomposeUserStructInto(f *Func, name LocalSlot, slots []LocalSlot) []LocalSlot { - fnames := []LocalSlot{} // slots for struct in name +func decomposeUserStructInto(f *Func, name *LocalSlot, slots []*LocalSlot) []*LocalSlot { + fnames := []*LocalSlot{} // slots for struct in name t := name.Type n := t.NumFields() for i := 0; i < n; i++ { - fs := f.fe.SplitStruct(name, i) + fs := f.SplitStruct(name, i) fnames = append(fnames, fs) // arrays and structs will be decomposed further, so // there's no need to record a name if !fs.Type.IsArray() && !fs.Type.IsStruct() { - slots = append(slots, fs) + slots = maybeAppend(f, slots, fs) } } makeOp := StructMakeOp(n) var keep []*Value // create named values for each struct field - for _, v := range f.NamedValues[name] { + for _, v := range f.NamedValues[*name] { if v.Op != makeOp { keep = append(keep, v) continue } for i := 0; i < len(fnames); i++ { - f.NamedValues[fnames[i]] = append(f.NamedValues[fnames[i]], v.Args[i]) + f.NamedValues[*fnames[i]] = append(f.NamedValues[*fnames[i]], v.Args[i]) } } if len(keep) == 0 { // delete the name for the struct as a whole - delete(f.NamedValues, name) + delete(f.NamedValues, *name) } else { - f.NamedValues[name] = keep + f.NamedValues[*name] = keep } // now that this f.NamedValues contains values for the struct @@ -328,10 +341,10 @@ func decomposeUserStructInto(f *Func, name LocalSlot, slots []LocalSlot) []Local for i := 0; i < n; i++ { if name.Type.FieldType(i).IsStruct() { slots = decomposeUserStructInto(f, fnames[i], slots) - delete(f.NamedValues, fnames[i]) + delete(f.NamedValues, *fnames[i]) } else if name.Type.FieldType(i).IsArray() { slots = decomposeUserArrayInto(f, fnames[i], slots) - delete(f.NamedValues, fnames[i]) + delete(f.NamedValues, *fnames[i]) } } return slots @@ -416,9 +429,10 @@ type namedVal struct { locIndex, valIndex int // f.NamedValues[f.Names[locIndex]][valIndex] = key } -// deleteNamedVals removes particular values with debugger names from f's naming data structures +// deleteNamedVals removes particular values with debugger names from f's naming data structures, +// removes all values with OpInvalid, and re-sorts the list of Names. func deleteNamedVals(f *Func, toDelete []namedVal) { - // Arrange to delete from larger indices to smaller, to ensure swap-with-end deletion does not invalid pending indices. + // Arrange to delete from larger indices to smaller, to ensure swap-with-end deletion does not invalidate pending indices. sort.Slice(toDelete, func(i, j int) bool { if toDelete[i].locIndex != toDelete[j].locIndex { return toDelete[i].locIndex > toDelete[j].locIndex @@ -430,16 +444,36 @@ func deleteNamedVals(f *Func, toDelete []namedVal) { // Get rid of obsolete names for _, d := range toDelete { loc := f.Names[d.locIndex] - vals := f.NamedValues[loc] + vals := f.NamedValues[*loc] l := len(vals) - 1 if l > 0 { vals[d.valIndex] = vals[l] - f.NamedValues[loc] = vals[:l] - } else { - delete(f.NamedValues, loc) - l = len(f.Names) - 1 - f.Names[d.locIndex] = f.Names[l] - f.Names = f.Names[:l] + } + vals[l] = nil + f.NamedValues[*loc] = vals[:l] + } + // Delete locations with no values attached. + end := len(f.Names) + for i := len(f.Names) - 1; i >= 0; i-- { + loc := f.Names[i] + vals := f.NamedValues[*loc] + last := len(vals) + for j := len(vals) - 1; j >= 0; j-- { + if vals[j].Op == OpInvalid { + last-- + vals[j] = vals[last] + vals[last] = nil + } + } + if last < len(vals) { + f.NamedValues[*loc] = vals[:last] + } + if len(vals) == 0 { + delete(f.NamedValues, *loc) + end-- + f.Names[i] = f.Names[end] + f.Names[end] = nil } } + f.Names = f.Names[:end] } diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go index 579818e4f3c..d37d06f8e71 100644 --- a/src/cmd/compile/internal/ssa/expand_calls.go +++ b/src/cmd/compile/internal/ssa/expand_calls.go @@ -5,6 +5,9 @@ package ssa import ( + "cmd/compile/internal/abi" + "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/src" "fmt" @@ -12,22 +15,22 @@ import ( ) type selKey struct { - from *Value - offset int64 - size int64 - typ *types.Type + from *Value // what is selected from + offsetOrIndex int64 // whatever is appropriate for the selector + size int64 + typ *types.Type } -type offsetKey struct { - from *Value - offset int64 - pt *types.Type -} +type Abi1RO uint8 // An offset within a parameter's slice of register indices, for abi1. func isBlockMultiValueExit(b *Block) bool { return (b.Kind == BlockRet || b.Kind == BlockRetJmp) && len(b.Controls) > 0 && b.Controls[0].Op == OpMakeResult } +func badVal(s string, v *Value) error { + return fmt.Errorf("%s %s", s, v.LongString()) +} + // removeTrivialWrapperTypes unwraps layers of // struct { singleField SomeType } and [1]SomeType // until a non-wrapper type is reached. This is useful @@ -51,20 +54,145 @@ func removeTrivialWrapperTypes(t *types.Type) *types.Type { return t } +// A registerCursor tracks which register is used for an Arg or regValues, or a piece of such. +type registerCursor struct { + // TODO(register args) convert this to a generalized target cursor. + storeDest *Value // if there are no register targets, then this is the base of the store. + regsLen int // the number of registers available for this Arg/result (which is all in registers or not at all) + nextSlice Abi1RO // the next register/register-slice offset + config *abi.ABIConfig + regValues *[]*Value // values assigned to registers accumulate here +} + +func (rc *registerCursor) String() string { + dest := "" + if rc.storeDest != nil { + dest = rc.storeDest.String() + } + regs := "" + if rc.regValues != nil { + regs = "" + for i, x := range *rc.regValues { + if i > 0 { + regs = regs + "; " + } + regs = regs + x.LongString() + } + } + // not printing the config because that has not been useful + return fmt.Sprintf("RCSR{storeDest=%v, regsLen=%d, nextSlice=%d, regValues=[%s]}", dest, rc.regsLen, rc.nextSlice, regs) +} + +// next effectively post-increments the register cursor; the receiver is advanced, +// the old value is returned. +func (c *registerCursor) next(t *types.Type) registerCursor { + rc := *c + if int(c.nextSlice) < c.regsLen { + w := c.config.NumParamRegs(t) + c.nextSlice += Abi1RO(w) + } + return rc +} + +// plus returns a register cursor offset from the original, without modifying the original. +func (c *registerCursor) plus(regWidth Abi1RO) registerCursor { + rc := *c + rc.nextSlice += regWidth + return rc +} + +const ( + // Register offsets for fields of built-in aggregate types; the ones not listed are zero. + RO_complex_imag = 1 + RO_string_len = 1 + RO_slice_len = 1 + RO_slice_cap = 2 + RO_iface_data = 1 +) + +func (x *expandState) regWidth(t *types.Type) Abi1RO { + return Abi1RO(x.abi1.NumParamRegs(t)) +} + +// regOffset returns the register offset of the i'th element of type t +func (x *expandState) regOffset(t *types.Type, i int) Abi1RO { + // TODO maybe cache this in a map if profiling recommends. + if i == 0 { + return 0 + } + if t.IsArray() { + return Abi1RO(i) * x.regWidth(t.Elem()) + } + if t.IsStruct() { + k := Abi1RO(0) + for j := 0; j < i; j++ { + k += x.regWidth(t.FieldType(j)) + } + return k + } + panic("Haven't implemented this case yet, do I need to?") +} + +// at returns the register cursor for component i of t, where the first +// component is numbered 0. +func (c *registerCursor) at(t *types.Type, i int) registerCursor { + rc := *c + if i == 0 || c.regsLen == 0 { + return rc + } + if t.IsArray() { + w := c.config.NumParamRegs(t.Elem()) + rc.nextSlice += Abi1RO(i * w) + return rc + } + if t.IsStruct() { + for j := 0; j < i; j++ { + rc.next(t.FieldType(j)) + } + return rc + } + panic("Haven't implemented this case yet, do I need to?") +} + +func (c *registerCursor) init(regs []abi.RegIndex, info *abi.ABIParamResultInfo, result *[]*Value, storeDest *Value) { + c.regsLen = len(regs) + c.nextSlice = 0 + if len(regs) == 0 { + c.storeDest = storeDest // only save this if there are no registers, will explode if misused. + return + } + c.config = info.Config() + c.regValues = result +} + +func (c *registerCursor) addArg(v *Value) { + *c.regValues = append(*c.regValues, v) +} + +func (c *registerCursor) hasRegs() bool { + return c.regsLen > 0 +} + type expandState struct { - f *Func - debug bool - canSSAType func(*types.Type) bool - regSize int64 - sp *Value - typs *Types - ptrSize int64 - hiOffset int64 - lowOffset int64 - namedSelects map[*Value][]namedVal - sdom SparseTree - common map[selKey]*Value - offsets map[offsetKey]*Value + f *Func + abi1 *abi.ABIConfig + debug bool + canSSAType func(*types.Type) bool + regSize int64 + sp *Value + typs *Types + ptrSize int64 + hiOffset int64 + lowOffset int64 + hiRo Abi1RO + loRo Abi1RO + namedSelects map[*Value][]namedVal + sdom SparseTree + commonSelectors map[selKey]*Value // used to de-dupe selectors + commonArgs map[selKey]*Value // used to de-dupe OpArg/OpArgIntReg/OpArgFloatReg + memForCall map[ID]*Value // For a call, need to know the unique selector that gets the mem. + transformedSelects map[ID]bool // OpSelectN after rewriting, either created or renumbered. + indentLevel int // Indentation for debugging recursion } // intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target @@ -92,9 +220,16 @@ func (x *expandState) isAlreadyExpandedAggregateType(t *types.Type) bool { // offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP // TODO should also optimize offsets from SB? -func (x *expandState) offsetFrom(from *Value, offset int64, pt *types.Type) *Value { - if offset == 0 && from.Type == pt { // this is not actually likely - return from +func (x *expandState) offsetFrom(b *Block, from *Value, offset int64, pt *types.Type) *Value { + ft := from.Type + if offset == 0 { + if ft == pt { + return from + } + // This captures common, (apparently) safe cases. The unsafe cases involve ft == uintptr + if (ft.IsPtr() || ft.IsUnsafePtr()) && pt.IsPtr() { + return from + } } // Simplify, canonicalize for from.Op == OpOffPtr { @@ -104,25 +239,51 @@ func (x *expandState) offsetFrom(from *Value, offset int64, pt *types.Type) *Val if from == x.sp { return x.f.ConstOffPtrSP(pt, offset, x.sp) } - key := offsetKey{from, offset, pt} - v := x.offsets[key] - if v != nil { - return v - } - v = from.Block.NewValue1I(from.Pos.WithNotStmt(), OpOffPtr, pt, offset, from) - x.offsets[key] = v - return v + return b.NewValue1I(from.Pos.WithNotStmt(), OpOffPtr, pt, offset, from) } // splitSlots splits one "field" (specified by sfx, offset, and ty) out of the LocalSlots in ls and returns the new LocalSlots this generates. -func (x *expandState) splitSlots(ls []LocalSlot, sfx string, offset int64, ty *types.Type) []LocalSlot { - var locs []LocalSlot +func (x *expandState) splitSlots(ls []*LocalSlot, sfx string, offset int64, ty *types.Type) []*LocalSlot { + var locs []*LocalSlot for i := range ls { - locs = append(locs, x.f.fe.SplitSlot(&ls[i], sfx, offset, ty)) + locs = append(locs, x.f.SplitSlot(ls[i], sfx, offset, ty)) } return locs } +// prAssignForArg returns the ABIParamAssignment for v, assumed to be an OpArg. +func (x *expandState) prAssignForArg(v *Value) *abi.ABIParamAssignment { + if v.Op != OpArg { + panic(badVal("Wanted OpArg, instead saw", v)) + } + return ParamAssignmentForArgName(x.f, v.Aux.(*ir.Name)) +} + +// ParamAssignmentForArgName returns the ABIParamAssignment for f's arg with matching name. +func ParamAssignmentForArgName(f *Func, name *ir.Name) *abi.ABIParamAssignment { + abiInfo := f.OwnAux.abiInfo + ip := abiInfo.InParams() + for i, a := range ip { + if a.Name == name { + return &ip[i] + } + } + panic(fmt.Errorf("Did not match param %v in prInfo %+v", name, abiInfo.InParams())) +} + +// indent increments (or decrements) the indentation. +func (x *expandState) indent(n int) { + x.indentLevel += n +} + +// Printf does an indented fmt.Printf on te format and args. +func (x *expandState) Printf(format string, a ...interface{}) (n int, err error) { + if x.indentLevel > 0 { + fmt.Printf("%[1]*s", x.indentLevel, "") + } + return fmt.Printf(format, a...) +} + // Calls that need lowering have some number of inputs, including a memory input, // and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able. @@ -140,11 +301,13 @@ func (x *expandState) splitSlots(ls []LocalSlot, sfx string, offset int64, ty *t // It emits the code necessary to implement the leaf select operation that leads to the root. // // TODO when registers really arrive, must also decompose anything split across two registers or registers and memory. -func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64) []LocalSlot { +func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64, regOffset Abi1RO) []*LocalSlot { if x.debug { - fmt.Printf("rewriteSelect(%s, %s, %d)\n", leaf.LongString(), selector.LongString(), offset) + x.indent(3) + defer x.indent(-3) + x.Printf("rewriteSelect(%s; %s; memOff=%d; regOff=%d)\n", leaf.LongString(), selector.LongString(), offset, regOffset) } - var locs []LocalSlot + var locs []*LocalSlot leafType := leaf.Type if len(selector.Args) > 0 { w := selector.Args[0] @@ -156,15 +319,24 @@ func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64) } } switch selector.Op { + case OpArgIntReg, OpArgFloatReg: + if leafType == selector.Type { // OpIData leads us here, sometimes. + leaf.copyOf(selector) + } else { + x.f.Fatalf("Unexpected %s type, selector=%s, leaf=%s\n", selector.Op.String(), selector.LongString(), leaf.LongString()) + } + if x.debug { + x.Printf("---%s, break\n", selector.Op.String()) + } case OpArg: if !x.isAlreadyExpandedAggregateType(selector.Type) { if leafType == selector.Type { // OpIData leads us here, sometimes. - leaf.copyOf(selector) + x.newArgToMemOrRegs(selector, leaf, offset, regOffset, leafType, leaf.Pos) } else { x.f.Fatalf("Unexpected OpArg type, selector=%s, leaf=%s\n", selector.LongString(), leaf.LongString()) } if x.debug { - fmt.Printf("\tOpArg, break\n") + x.Printf("---OpArg, break\n") } break } @@ -172,20 +344,8 @@ func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64) case OpIData, OpStructSelect, OpArraySelect: leafType = removeTrivialWrapperTypes(leaf.Type) } - aux := selector.Aux - auxInt := selector.AuxInt + offset - if leaf.Block == selector.Block { - leaf.reset(OpArg) - leaf.Aux = aux - leaf.AuxInt = auxInt - leaf.Type = leafType - } else { - w := selector.Block.NewValue0IA(leaf.Pos, OpArg, leafType, auxInt, aux) - leaf.copyOf(w) - if x.debug { - fmt.Printf("\tnew %s\n", w.LongString()) - } - } + x.newArgToMemOrRegs(selector, leaf, offset, regOffset, leafType, leaf.Pos) + for _, s := range x.namedSelects[selector] { locs = append(locs, x.f.Names[s.locIndex]) } @@ -228,28 +388,83 @@ func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64) } case OpSelectN: + // TODO(register args) result case + // if applied to Op-mumble-call, the Aux tells us which result, regOffset specifies offset within result. If a register, should rewrite to OpSelectN for new call. // TODO these may be duplicated. Should memoize. Intermediate selectors will go dead, no worries there. call := selector.Args[0] + call0 := call aux := call.Aux.(*AuxCall) which := selector.AuxInt + if x.transformedSelects[selector.ID] { + // This is a minor hack. Either this select has had its operand adjusted (mem) or + // it is some other intermediate node that was rewritten to reference a register (not a generic arg). + // This can occur with chains of selection/indexing from single field/element aggregates. + leaf.copyOf(selector) + break + } if which == aux.NResults() { // mem is after the results. // rewrite v as a Copy of call -- the replacement call will produce a mem. - leaf.copyOf(call) + if leaf != selector { + panic(fmt.Errorf("Unexpected selector of memory, selector=%s, call=%s, leaf=%s", selector.LongString(), call.LongString(), leaf.LongString())) + } + if aux.abiInfo == nil { + panic(badVal("aux.abiInfo nil for call", call)) + } + if existing := x.memForCall[call.ID]; existing == nil { + selector.AuxInt = int64(aux.abiInfo.OutRegistersUsed()) + x.memForCall[call.ID] = selector + x.transformedSelects[selector.ID] = true // operand adjusted + } else { + selector.copyOf(existing) + } + } else { leafType := removeTrivialWrapperTypes(leaf.Type) if x.canSSAType(leafType) { pt := types.NewPtr(leafType) - off := x.offsetFrom(x.sp, offset+aux.OffsetOfResult(which), pt) // Any selection right out of the arg area/registers has to be same Block as call, use call as mem input. - if leaf.Block == call.Block { - leaf.reset(OpLoad) - leaf.SetArgs2(off, call) - leaf.Type = leafType + // Create a "mem" for any loads that need to occur. + if mem := x.memForCall[call.ID]; mem != nil { + if mem.Block != call.Block { + panic(fmt.Errorf("selector and call need to be in same block, selector=%s; call=%s", selector.LongString(), call.LongString())) + } + call = mem } else { - w := call.Block.NewValue2(leaf.Pos, OpLoad, leafType, off, call) - leaf.copyOf(w) - if x.debug { - fmt.Printf("\tnew %s\n", w.LongString()) + mem = call.Block.NewValue1I(call.Pos.WithNotStmt(), OpSelectN, types.TypeMem, int64(aux.abiInfo.OutRegistersUsed()), call) + x.transformedSelects[mem.ID] = true // select uses post-expansion indexing + x.memForCall[call.ID] = mem + call = mem + } + outParam := aux.abiInfo.OutParam(int(which)) + if len(outParam.Registers) > 0 { + firstReg := uint32(0) + for i := 0; i < int(which); i++ { + firstReg += uint32(len(aux.abiInfo.OutParam(i).Registers)) + } + reg := int64(regOffset + Abi1RO(firstReg)) + if leaf.Block == call.Block { + leaf.reset(OpSelectN) + leaf.SetArgs1(call0) + leaf.Type = leafType + leaf.AuxInt = reg + x.transformedSelects[leaf.ID] = true // leaf, rewritten to use post-expansion indexing. + } else { + w := call.Block.NewValue1I(leaf.Pos, OpSelectN, leafType, reg, call0) + x.transformedSelects[w.ID] = true // select, using post-expansion indexing. + leaf.copyOf(w) + } + } else { + off := x.offsetFrom(x.f.Entry, x.sp, offset+aux.OffsetOfResult(which), pt) + if leaf.Block == call.Block { + leaf.reset(OpLoad) + leaf.SetArgs2(off, call) + leaf.Type = leafType + } else { + w := call.Block.NewValue2(leaf.Pos, OpLoad, leafType, off, call) + leaf.copyOf(w) + if x.debug { + x.Printf("---new %s\n", w.LongString()) + } } } for _, s := range x.namedSelects[selector] { @@ -262,44 +477,46 @@ func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64) case OpStructSelect: w := selector.Args[0] - var ls []LocalSlot + var ls []*LocalSlot if w.Type.Kind() != types.TSTRUCT { // IData artifact - ls = x.rewriteSelect(leaf, w, offset) + ls = x.rewriteSelect(leaf, w, offset, regOffset) } else { - ls = x.rewriteSelect(leaf, w, offset+w.Type.FieldOff(int(selector.AuxInt))) + fldi := int(selector.AuxInt) + ls = x.rewriteSelect(leaf, w, offset+w.Type.FieldOff(fldi), regOffset+x.regOffset(w.Type, fldi)) if w.Op != OpIData { for _, l := range ls { - locs = append(locs, x.f.fe.SplitStruct(l, int(selector.AuxInt))) + locs = append(locs, x.f.SplitStruct(l, int(selector.AuxInt))) } } } case OpArraySelect: w := selector.Args[0] - x.rewriteSelect(leaf, w, offset+selector.Type.Size()*selector.AuxInt) + index := selector.AuxInt + x.rewriteSelect(leaf, w, offset+selector.Type.Size()*index, regOffset+x.regOffset(w.Type, int(index))) case OpInt64Hi: w := selector.Args[0] - ls := x.rewriteSelect(leaf, w, offset+x.hiOffset) + ls := x.rewriteSelect(leaf, w, offset+x.hiOffset, regOffset+x.hiRo) locs = x.splitSlots(ls, ".hi", x.hiOffset, leafType) case OpInt64Lo: w := selector.Args[0] - ls := x.rewriteSelect(leaf, w, offset+x.lowOffset) + ls := x.rewriteSelect(leaf, w, offset+x.lowOffset, regOffset+x.loRo) locs = x.splitSlots(ls, ".lo", x.lowOffset, leafType) case OpStringPtr: - ls := x.rewriteSelect(leaf, selector.Args[0], offset) + ls := x.rewriteSelect(leaf, selector.Args[0], offset, regOffset) locs = x.splitSlots(ls, ".ptr", 0, x.typs.BytePtr) - case OpSlicePtr: + case OpSlicePtr, OpSlicePtrUnchecked: w := selector.Args[0] - ls := x.rewriteSelect(leaf, w, offset) + ls := x.rewriteSelect(leaf, w, offset, regOffset) locs = x.splitSlots(ls, ".ptr", 0, types.NewPtr(w.Type.Elem())) case OpITab: w := selector.Args[0] - ls := x.rewriteSelect(leaf, w, offset) + ls := x.rewriteSelect(leaf, w, offset, regOffset) sfx := ".itab" if w.Type.IsEmptyInterface() { sfx = ".type" @@ -307,27 +524,27 @@ func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64) locs = x.splitSlots(ls, sfx, 0, x.typs.Uintptr) case OpComplexReal: - ls := x.rewriteSelect(leaf, selector.Args[0], offset) + ls := x.rewriteSelect(leaf, selector.Args[0], offset, regOffset) locs = x.splitSlots(ls, ".real", 0, leafType) case OpComplexImag: - ls := x.rewriteSelect(leaf, selector.Args[0], offset+leafType.Width) // result is FloatNN, width of result is offset of imaginary part. + ls := x.rewriteSelect(leaf, selector.Args[0], offset+leafType.Width, regOffset+RO_complex_imag) // result is FloatNN, width of result is offset of imaginary part. locs = x.splitSlots(ls, ".imag", leafType.Width, leafType) case OpStringLen, OpSliceLen: - ls := x.rewriteSelect(leaf, selector.Args[0], offset+x.ptrSize) + ls := x.rewriteSelect(leaf, selector.Args[0], offset+x.ptrSize, regOffset+RO_slice_len) locs = x.splitSlots(ls, ".len", x.ptrSize, leafType) case OpIData: - ls := x.rewriteSelect(leaf, selector.Args[0], offset+x.ptrSize) + ls := x.rewriteSelect(leaf, selector.Args[0], offset+x.ptrSize, regOffset+RO_iface_data) locs = x.splitSlots(ls, ".data", x.ptrSize, leafType) case OpSliceCap: - ls := x.rewriteSelect(leaf, selector.Args[0], offset+2*x.ptrSize) + ls := x.rewriteSelect(leaf, selector.Args[0], offset+2*x.ptrSize, regOffset+RO_slice_cap) locs = x.splitSlots(ls, ".cap", 2*x.ptrSize, leafType) case OpCopy: // If it's an intermediate result, recurse - locs = x.rewriteSelect(leaf, selector.Args[0], offset) + locs = x.rewriteSelect(leaf, selector.Args[0], offset, regOffset) for _, s := range x.namedSelects[selector] { // this copy may have had its own name, preserve that, too. locs = append(locs, x.f.Names[s.locIndex]) @@ -342,7 +559,7 @@ func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64) func (x *expandState) rewriteDereference(b *Block, base, a, mem *Value, offset, size int64, typ *types.Type, pos src.XPos) *Value { source := a.Args[0] - dst := x.offsetFrom(base, offset, source.Type) + dst := x.offsetFrom(b, base, offset, source.Type) if a.Uses == 1 && a.Block == b { a.reset(OpMove) a.Pos = pos @@ -358,26 +575,157 @@ func (x *expandState) rewriteDereference(b *Block, base, a, mem *Value, offset, return mem } -// decomposeArgOrLoad is a helper for storeArgOrLoad. -// It decomposes a Load or an Arg into smaller parts, parameterized by the decomposeOne and decomposeTwo functions -// passed to it, and returns the new mem. If the type does not match one of the expected aggregate types, it returns nil instead. -func (x *expandState) decomposeArgOrLoad(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64, - decomposeOne func(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t1 *types.Type, offArg, offStore int64) *Value, - decomposeTwo func(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value) *Value { +var indexNames [1]string = [1]string{"[0]"} + +// pathTo returns the selection path to the leaf type at offset within container. +// e.g. len(thing.field[0]) => ".field[0].len" +// this is for purposes of generating names ultimately fed to a debugger. +func (x *expandState) pathTo(container, leaf *types.Type, offset int64) string { + if container == leaf || offset == 0 && container.Size() == leaf.Size() { + return "" + } + path := "" +outer: + for { + switch container.Kind() { + case types.TARRAY: + container = container.Elem() + if container.Size() == 0 { + return path + } + i := offset / container.Size() + offset = offset % container.Size() + // If a future compiler/ABI supports larger SSA/Arg-able arrays, expand indexNames. + path = path + indexNames[i] + continue + case types.TSTRUCT: + for i := 0; i < container.NumFields(); i++ { + fld := container.Field(i) + if fld.Offset+fld.Type.Size() > offset { + offset -= fld.Offset + path += "." + fld.Sym.Name + container = fld.Type + continue outer + } + } + return path + case types.TINT64, types.TUINT64: + if container.Width == x.regSize { + return path + } + if offset == x.hiOffset { + return path + ".hi" + } + return path + ".lo" + case types.TINTER: + if offset != 0 { + return path + ".data" + } + if container.IsEmptyInterface() { + return path + ".type" + } + return path + ".itab" + + case types.TSLICE: + if offset == 2*x.regSize { + return path + ".cap" + } + fallthrough + case types.TSTRING: + if offset == 0 { + return path + ".ptr" + } + return path + ".len" + case types.TCOMPLEX64, types.TCOMPLEX128: + if offset == 0 { + return path + ".real" + } + return path + ".imag" + } + return path + } +} + +// decomposeArg is a helper for storeArgOrLoad. +// It decomposes a Load or an Arg into smaller parts and returns the new mem. +// If the type does not match one of the expected aggregate types, it returns nil instead. +// Parameters: +// pos -- the location of any generated code. +// b -- the block into which any generated code should normally be placed +// source -- the value, possibly an aggregate, to be stored. +// mem -- the mem flowing into this decomposition (loads depend on it, stores updated it) +// t -- the type of the value to be stored +// storeOffset -- if the value is stored in memory, it is stored at base (see storeRc) + storeOffset +// loadRegOffset -- regarding source as a value in registers, the register offset in ABI1. Meaningful only if source is OpArg. +// storeRc -- storeRC; if the value is stored in registers, this specifies the registers. +// StoreRc also identifies whether the target is registers or memory, and has the base for the store operation. +func (x *expandState) decomposeArg(pos src.XPos, b *Block, source, mem *Value, t *types.Type, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value { + + pa := x.prAssignForArg(source) + var locs []*LocalSlot + for _, s := range x.namedSelects[source] { + locs = append(locs, x.f.Names[s.locIndex]) + } + + if len(pa.Registers) > 0 { + // Handle the in-registers case directly + rts, offs := pa.RegisterTypesAndOffsets() + last := loadRegOffset + x.regWidth(t) + if offs[loadRegOffset] != 0 { + // Document the problem before panicking. + for i := 0; i < len(rts); i++ { + rt := rts[i] + off := offs[i] + fmt.Printf("rt=%s, off=%d, rt.Width=%d, rt.Align=%d\n", rt.String(), off, rt.Width, rt.Align) + } + panic(fmt.Errorf("offset %d of requested register %d should be zero, source=%s", offs[loadRegOffset], loadRegOffset, source.LongString())) + } + + if x.debug { + x.Printf("decompose arg %s has %d locs\n", source.LongString(), len(locs)) + } + + for i := loadRegOffset; i < last; i++ { + rt := rts[i] + off := offs[i] + w := x.commonArgs[selKey{source, off, rt.Width, rt}] + if w == nil { + w = x.newArgToMemOrRegs(source, w, off, i, rt, pos) + suffix := x.pathTo(source.Type, rt, off) + if suffix != "" { + x.splitSlotsIntoNames(locs, suffix, off, rt, w) + } + } + if t.IsPtrShaped() { + // Preserve the original store type. This ensures pointer type + // properties aren't discarded (e.g, notinheap). + if rt.Width != t.Width || len(pa.Registers) != 1 || i != loadRegOffset { + b.Func.Fatalf("incompatible store type %v and %v, i=%d", t, rt, i) + } + rt = t + } + mem = x.storeArgOrLoad(pos, b, w, mem, rt, storeOffset+off, i, storeRc.next(rt)) + } + return mem + } + u := source.Type switch u.Kind() { case types.TARRAY: elem := u.Elem() + elemRO := x.regWidth(elem) for i := int64(0); i < u.NumElem(); i++ { elemOff := i * elem.Size() - mem = decomposeOne(x, pos, b, base, source, mem, elem, source.AuxInt+elemOff, offset+elemOff) + mem = storeOneArg(x, pos, b, locs, indexNames[i], source, mem, elem, elemOff, storeOffset+elemOff, loadRegOffset, storeRc.next(elem)) + loadRegOffset += elemRO pos = pos.WithNotStmt() } return mem case types.TSTRUCT: for i := 0; i < u.NumFields(); i++ { fld := u.Field(i) - mem = decomposeOne(x, pos, b, base, source, mem, fld.Type, source.AuxInt+fld.Offset, offset+fld.Offset) + mem = storeOneArg(x, pos, b, locs, "."+fld.Sym.Name, source, mem, fld.Type, fld.Offset, storeOffset+fld.Offset, loadRegOffset, storeRc.next(fld.Type)) + loadRegOffset += x.regWidth(fld.Type) pos = pos.WithNotStmt() } return mem @@ -386,100 +734,186 @@ func (x *expandState) decomposeArgOrLoad(pos src.XPos, b *Block, base, source, m break } tHi, tLo := x.intPairTypes(t.Kind()) - mem = decomposeOne(x, pos, b, base, source, mem, tHi, source.AuxInt+x.hiOffset, offset+x.hiOffset) + mem = storeOneArg(x, pos, b, locs, ".hi", source, mem, tHi, x.hiOffset, storeOffset+x.hiOffset, loadRegOffset+x.hiRo, storeRc.plus(x.hiRo)) pos = pos.WithNotStmt() - return decomposeOne(x, pos, b, base, source, mem, tLo, source.AuxInt+x.lowOffset, offset+x.lowOffset) + return storeOneArg(x, pos, b, locs, ".lo", source, mem, tLo, x.lowOffset, storeOffset+x.lowOffset, loadRegOffset+x.loRo, storeRc.plus(x.loRo)) case types.TINTER: - return decomposeTwo(x, pos, b, base, source, mem, x.typs.Uintptr, x.typs.BytePtr, source.AuxInt, offset) + sfx := ".itab" + if u.IsEmptyInterface() { + sfx = ".type" + } + return storeTwoArg(x, pos, b, locs, sfx, ".idata", source, mem, x.typs.Uintptr, x.typs.BytePtr, 0, storeOffset, loadRegOffset, storeRc) case types.TSTRING: - return decomposeTwo(x, pos, b, base, source, mem, x.typs.BytePtr, x.typs.Int, source.AuxInt, offset) + return storeTwoArg(x, pos, b, locs, ".ptr", ".len", source, mem, x.typs.BytePtr, x.typs.Int, 0, storeOffset, loadRegOffset, storeRc) case types.TCOMPLEX64: - return decomposeTwo(x, pos, b, base, source, mem, x.typs.Float32, x.typs.Float32, source.AuxInt, offset) + return storeTwoArg(x, pos, b, locs, ".real", ".imag", source, mem, x.typs.Float32, x.typs.Float32, 0, storeOffset, loadRegOffset, storeRc) case types.TCOMPLEX128: - return decomposeTwo(x, pos, b, base, source, mem, x.typs.Float64, x.typs.Float64, source.AuxInt, offset) + return storeTwoArg(x, pos, b, locs, ".real", ".imag", source, mem, x.typs.Float64, x.typs.Float64, 0, storeOffset, loadRegOffset, storeRc) case types.TSLICE: - mem = decomposeTwo(x, pos, b, base, source, mem, x.typs.BytePtr, x.typs.Int, source.AuxInt, offset) - return decomposeOne(x, pos, b, base, source, mem, x.typs.Int, source.AuxInt+2*x.ptrSize, offset+2*x.ptrSize) + mem = storeOneArg(x, pos, b, locs, ".ptr", source, mem, x.typs.BytePtr, 0, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr)) + return storeTwoArg(x, pos, b, locs, ".len", ".cap", source, mem, x.typs.Int, x.typs.Int, x.ptrSize, storeOffset+x.ptrSize, loadRegOffset+RO_slice_len, storeRc) + } + return nil +} + +func (x *expandState) splitSlotsIntoNames(locs []*LocalSlot, suffix string, off int64, rt *types.Type, w *Value) { + wlocs := x.splitSlots(locs, suffix, off, rt) + for _, l := range wlocs { + old, ok := x.f.NamedValues[*l] + x.f.NamedValues[*l] = append(old, w) + if !ok { + x.f.Names = append(x.f.Names, l) + } + } +} + +// decomposeLoad is a helper for storeArgOrLoad. +// It decomposes a Load into smaller parts and returns the new mem. +// If the type does not match one of the expected aggregate types, it returns nil instead. +// Parameters: +// pos -- the location of any generated code. +// b -- the block into which any generated code should normally be placed +// source -- the value, possibly an aggregate, to be stored. +// mem -- the mem flowing into this decomposition (loads depend on it, stores updated it) +// t -- the type of the value to be stored +// storeOffset -- if the value is stored in memory, it is stored at base (see storeRc) + offset +// loadRegOffset -- regarding source as a value in registers, the register offset in ABI1. Meaningful only if source is OpArg. +// storeRc -- storeRC; if the value is stored in registers, this specifies the registers. +// StoreRc also identifies whether the target is registers or memory, and has the base for the store operation. +// +// TODO -- this needs cleanup; it just works for SSA-able aggregates, and won't fully generalize to register-args aggregates. +func (x *expandState) decomposeLoad(pos src.XPos, b *Block, source, mem *Value, t *types.Type, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value { + u := source.Type + switch u.Kind() { + case types.TARRAY: + elem := u.Elem() + elemRO := x.regWidth(elem) + for i := int64(0); i < u.NumElem(); i++ { + elemOff := i * elem.Size() + mem = storeOneLoad(x, pos, b, source, mem, elem, elemOff, storeOffset+elemOff, loadRegOffset, storeRc.next(elem)) + loadRegOffset += elemRO + pos = pos.WithNotStmt() + } + return mem + case types.TSTRUCT: + for i := 0; i < u.NumFields(); i++ { + fld := u.Field(i) + mem = storeOneLoad(x, pos, b, source, mem, fld.Type, fld.Offset, storeOffset+fld.Offset, loadRegOffset, storeRc.next(fld.Type)) + loadRegOffset += x.regWidth(fld.Type) + pos = pos.WithNotStmt() + } + return mem + case types.TINT64, types.TUINT64: + if t.Width == x.regSize { + break + } + tHi, tLo := x.intPairTypes(t.Kind()) + mem = storeOneLoad(x, pos, b, source, mem, tHi, x.hiOffset, storeOffset+x.hiOffset, loadRegOffset+x.hiRo, storeRc.plus(x.hiRo)) + pos = pos.WithNotStmt() + return storeOneLoad(x, pos, b, source, mem, tLo, x.lowOffset, storeOffset+x.lowOffset, loadRegOffset+x.loRo, storeRc.plus(x.loRo)) + case types.TINTER: + return storeTwoLoad(x, pos, b, source, mem, x.typs.Uintptr, x.typs.BytePtr, 0, storeOffset, loadRegOffset, storeRc) + case types.TSTRING: + return storeTwoLoad(x, pos, b, source, mem, x.typs.BytePtr, x.typs.Int, 0, storeOffset, loadRegOffset, storeRc) + case types.TCOMPLEX64: + return storeTwoLoad(x, pos, b, source, mem, x.typs.Float32, x.typs.Float32, 0, storeOffset, loadRegOffset, storeRc) + case types.TCOMPLEX128: + return storeTwoLoad(x, pos, b, source, mem, x.typs.Float64, x.typs.Float64, 0, storeOffset, loadRegOffset, storeRc) + case types.TSLICE: + mem = storeOneLoad(x, pos, b, source, mem, x.typs.BytePtr, 0, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr)) + return storeTwoLoad(x, pos, b, source, mem, x.typs.Int, x.typs.Int, x.ptrSize, storeOffset+x.ptrSize, loadRegOffset+RO_slice_len, storeRc) } return nil } // storeOneArg creates a decomposed (one step) arg that is then stored. -// pos and b locate the store instruction, base is the base of the store target, source is the "base" of the value input, +// pos and b locate the store instruction, source is the "base" of the value input, // mem is the input mem, t is the type in question, and offArg and offStore are the offsets from the respective bases. -func storeOneArg(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offArg, offStore int64) *Value { - w := x.common[selKey{source, offArg, t.Width, t}] - if w == nil { - w = source.Block.NewValue0IA(source.Pos, OpArg, t, offArg, source.Aux) - x.common[selKey{source, offArg, t.Width, t}] = w +func storeOneArg(x *expandState, pos src.XPos, b *Block, locs []*LocalSlot, suffix string, source, mem *Value, t *types.Type, argOffset, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value { + if x.debug { + x.indent(3) + defer x.indent(-3) + x.Printf("storeOneArg(%s; %s; %s; aO=%d; sO=%d; lrO=%d; %s)\n", source.LongString(), mem.String(), t.String(), argOffset, storeOffset, loadRegOffset, storeRc.String()) } - return x.storeArgOrLoad(pos, b, base, w, mem, t, offStore) + + w := x.commonArgs[selKey{source, argOffset, t.Width, t}] + if w == nil { + w = x.newArgToMemOrRegs(source, w, argOffset, loadRegOffset, t, pos) + x.splitSlotsIntoNames(locs, suffix, argOffset, t, w) + } + return x.storeArgOrLoad(pos, b, w, mem, t, storeOffset, loadRegOffset, storeRc) } // storeOneLoad creates a decomposed (one step) load that is then stored. -func storeOneLoad(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offArg, offStore int64) *Value { - from := x.offsetFrom(source.Args[0], offArg, types.NewPtr(t)) +func storeOneLoad(x *expandState, pos src.XPos, b *Block, source, mem *Value, t *types.Type, offArg, offStore int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value { + from := x.offsetFrom(b, source.Args[0], offArg, types.NewPtr(t)) w := source.Block.NewValue2(source.Pos, OpLoad, t, from, mem) - return x.storeArgOrLoad(pos, b, base, w, mem, t, offStore) + return x.storeArgOrLoad(pos, b, w, mem, t, offStore, loadRegOffset, storeRc) } -func storeTwoArg(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value { - mem = storeOneArg(x, pos, b, base, source, mem, t1, offArg, offStore) +func storeTwoArg(x *expandState, pos src.XPos, b *Block, locs []*LocalSlot, suffix1 string, suffix2 string, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value { + mem = storeOneArg(x, pos, b, locs, suffix1, source, mem, t1, offArg, offStore, loadRegOffset, storeRc.next(t1)) pos = pos.WithNotStmt() t1Size := t1.Size() - return storeOneArg(x, pos, b, base, source, mem, t2, offArg+t1Size, offStore+t1Size) + return storeOneArg(x, pos, b, locs, suffix2, source, mem, t2, offArg+t1Size, offStore+t1Size, loadRegOffset+1, storeRc) } -func storeTwoLoad(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value { - mem = storeOneLoad(x, pos, b, base, source, mem, t1, offArg, offStore) +// storeTwoLoad creates a pair of decomposed (one step) loads that are then stored. +// the elements of the pair must not require any additional alignment. +func storeTwoLoad(x *expandState, pos src.XPos, b *Block, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value { + mem = storeOneLoad(x, pos, b, source, mem, t1, offArg, offStore, loadRegOffset, storeRc.next(t1)) pos = pos.WithNotStmt() t1Size := t1.Size() - return storeOneLoad(x, pos, b, base, source, mem, t2, offArg+t1Size, offStore+t1Size) + return storeOneLoad(x, pos, b, source, mem, t2, offArg+t1Size, offStore+t1Size, loadRegOffset+1, storeRc) } -// storeArgOrLoad converts stores of SSA-able aggregate arguments (passed to a call) into a series of primitive-typed +// storeArgOrLoad converts stores of SSA-able potentially aggregatable arguments (passed to a call) into a series of primitive-typed // stores of non-aggregate types. It recursively walks up a chain of selectors until it reaches a Load or an Arg. // If it does not reach a Load or an Arg, nothing happens; this allows a little freedom in phase ordering. -func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64) *Value { +func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value, t *types.Type, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value { if x.debug { - fmt.Printf("\tstoreArgOrLoad(%s; %s; %s; %s; %d)\n", base.LongString(), source.LongString(), mem.String(), t.String(), offset) + x.indent(3) + defer x.indent(-3) + x.Printf("storeArgOrLoad(%s; %s; %s; %d; %s)\n", source.LongString(), mem.String(), t.String(), storeOffset, storeRc.String()) } + // Start with Opcodes that can be disassembled switch source.Op { case OpCopy: - return x.storeArgOrLoad(pos, b, base, source.Args[0], mem, t, offset) + return x.storeArgOrLoad(pos, b, source.Args[0], mem, t, storeOffset, loadRegOffset, storeRc) - case OpLoad: - ret := x.decomposeArgOrLoad(pos, b, base, source, mem, t, offset, storeOneLoad, storeTwoLoad) + case OpLoad, OpDereference: + ret := x.decomposeLoad(pos, b, source, mem, t, storeOffset, loadRegOffset, storeRc) if ret != nil { return ret } case OpArg: - ret := x.decomposeArgOrLoad(pos, b, base, source, mem, t, offset, storeOneArg, storeTwoArg) + ret := x.decomposeArg(pos, b, source, mem, t, storeOffset, loadRegOffset, storeRc) if ret != nil { return ret } case OpArrayMake0, OpStructMake0: + // TODO(register args) is this correct for registers? return mem case OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4: for i := 0; i < t.NumFields(); i++ { fld := t.Field(i) - mem = x.storeArgOrLoad(pos, b, base, source.Args[i], mem, fld.Type, offset+fld.Offset) + mem = x.storeArgOrLoad(pos, b, source.Args[i], mem, fld.Type, storeOffset+fld.Offset, 0, storeRc.next(fld.Type)) pos = pos.WithNotStmt() } return mem case OpArrayMake1: - return x.storeArgOrLoad(pos, b, base, source.Args[0], mem, t.Elem(), offset) + return x.storeArgOrLoad(pos, b, source.Args[0], mem, t.Elem(), storeOffset, 0, storeRc.at(t, 0)) case OpInt64Make: tHi, tLo := x.intPairTypes(t.Kind()) - mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, tHi, offset+x.hiOffset) + mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, tHi, storeOffset+x.hiOffset, 0, storeRc.next(tHi)) pos = pos.WithNotStmt() - return x.storeArgOrLoad(pos, b, base, source.Args[1], mem, tLo, offset+x.lowOffset) + return x.storeArgOrLoad(pos, b, source.Args[1], mem, tLo, storeOffset+x.lowOffset, 0, storeRc) case OpComplexMake: tPart := x.typs.Float32 @@ -487,25 +921,25 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, base, source, mem * if wPart == 8 { tPart = x.typs.Float64 } - mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, tPart, offset) + mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, tPart, storeOffset, 0, storeRc.next(tPart)) pos = pos.WithNotStmt() - return x.storeArgOrLoad(pos, b, base, source.Args[1], mem, tPart, offset+wPart) + return x.storeArgOrLoad(pos, b, source.Args[1], mem, tPart, storeOffset+wPart, 0, storeRc) case OpIMake: - mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, x.typs.Uintptr, offset) + mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, x.typs.Uintptr, storeOffset, 0, storeRc.next(x.typs.Uintptr)) pos = pos.WithNotStmt() - return x.storeArgOrLoad(pos, b, base, source.Args[1], mem, x.typs.BytePtr, offset+x.ptrSize) + return x.storeArgOrLoad(pos, b, source.Args[1], mem, x.typs.BytePtr, storeOffset+x.ptrSize, 0, storeRc) case OpStringMake: - mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, x.typs.BytePtr, offset) + mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, x.typs.BytePtr, storeOffset, 0, storeRc.next(x.typs.BytePtr)) pos = pos.WithNotStmt() - return x.storeArgOrLoad(pos, b, base, source.Args[1], mem, x.typs.Int, offset+x.ptrSize) + return x.storeArgOrLoad(pos, b, source.Args[1], mem, x.typs.Int, storeOffset+x.ptrSize, 0, storeRc) case OpSliceMake: - mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, x.typs.BytePtr, offset) + mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, x.typs.BytePtr, storeOffset, 0, storeRc.next(x.typs.BytePtr)) pos = pos.WithNotStmt() - mem = x.storeArgOrLoad(pos, b, base, source.Args[1], mem, x.typs.Int, offset+x.ptrSize) - return x.storeArgOrLoad(pos, b, base, source.Args[2], mem, x.typs.Int, offset+2*x.ptrSize) + mem = x.storeArgOrLoad(pos, b, source.Args[1], mem, x.typs.Int, storeOffset+x.ptrSize, 0, storeRc.next(x.typs.Int)) + return x.storeArgOrLoad(pos, b, source.Args[2], mem, x.typs.Int, storeOffset+2*x.ptrSize, 0, storeRc) } // For nodes that cannot be taken apart -- OpSelectN, other structure selectors. @@ -515,11 +949,13 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, base, source, mem * if source.Type != t && t.NumElem() == 1 && elt.Width == t.Width && t.Width == x.regSize { t = removeTrivialWrapperTypes(t) // it could be a leaf type, but the "leaf" could be complex64 (for example) - return x.storeArgOrLoad(pos, b, base, source, mem, t, offset) + return x.storeArgOrLoad(pos, b, source, mem, t, storeOffset, loadRegOffset, storeRc) } + eltRO := x.regWidth(elt) for i := int64(0); i < t.NumElem(); i++ { sel := source.Block.NewValue1I(pos, OpArraySelect, elt, i, source) - mem = x.storeArgOrLoad(pos, b, base, sel, mem, elt, offset+i*elt.Width) + mem = x.storeArgOrLoad(pos, b, sel, mem, elt, storeOffset+i*elt.Width, loadRegOffset, storeRc.at(t, 0)) + loadRegOffset += eltRO pos = pos.WithNotStmt() } return mem @@ -546,13 +982,14 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, base, source, mem * // of a *uint8, which does not succeed. t = removeTrivialWrapperTypes(t) // it could be a leaf type, but the "leaf" could be complex64 (for example) - return x.storeArgOrLoad(pos, b, base, source, mem, t, offset) + return x.storeArgOrLoad(pos, b, source, mem, t, storeOffset, loadRegOffset, storeRc) } for i := 0; i < t.NumFields(); i++ { fld := t.Field(i) sel := source.Block.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source) - mem = x.storeArgOrLoad(pos, b, base, sel, mem, fld.Type, offset+fld.Offset) + mem = x.storeArgOrLoad(pos, b, sel, mem, fld.Type, storeOffset+fld.Offset, loadRegOffset, storeRc.next(fld.Type)) + loadRegOffset += x.regWidth(fld.Type) pos = pos.WithNotStmt() } return mem @@ -563,91 +1000,128 @@ func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, base, source, mem * } tHi, tLo := x.intPairTypes(t.Kind()) sel := source.Block.NewValue1(pos, OpInt64Hi, tHi, source) - mem = x.storeArgOrLoad(pos, b, base, sel, mem, tHi, offset+x.hiOffset) + mem = x.storeArgOrLoad(pos, b, sel, mem, tHi, storeOffset+x.hiOffset, loadRegOffset+x.hiRo, storeRc.plus(x.hiRo)) pos = pos.WithNotStmt() sel = source.Block.NewValue1(pos, OpInt64Lo, tLo, source) - return x.storeArgOrLoad(pos, b, base, sel, mem, tLo, offset+x.lowOffset) + return x.storeArgOrLoad(pos, b, sel, mem, tLo, storeOffset+x.lowOffset, loadRegOffset+x.loRo, storeRc.plus(x.hiRo)) case types.TINTER: sel := source.Block.NewValue1(pos, OpITab, x.typs.BytePtr, source) - mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.BytePtr, offset) + mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr)) pos = pos.WithNotStmt() sel = source.Block.NewValue1(pos, OpIData, x.typs.BytePtr, source) - return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.BytePtr, offset+x.ptrSize) + return x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset+x.ptrSize, loadRegOffset+RO_iface_data, storeRc) case types.TSTRING: sel := source.Block.NewValue1(pos, OpStringPtr, x.typs.BytePtr, source) - mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.BytePtr, offset) + mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr)) pos = pos.WithNotStmt() sel = source.Block.NewValue1(pos, OpStringLen, x.typs.Int, source) - return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Int, offset+x.ptrSize) + return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+x.ptrSize, loadRegOffset+RO_string_len, storeRc) case types.TSLICE: et := types.NewPtr(t.Elem()) sel := source.Block.NewValue1(pos, OpSlicePtr, et, source) - mem = x.storeArgOrLoad(pos, b, base, sel, mem, et, offset) + mem = x.storeArgOrLoad(pos, b, sel, mem, et, storeOffset, loadRegOffset, storeRc.next(et)) pos = pos.WithNotStmt() sel = source.Block.NewValue1(pos, OpSliceLen, x.typs.Int, source) - mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Int, offset+x.ptrSize) + mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+x.ptrSize, loadRegOffset+RO_slice_len, storeRc.next(x.typs.Int)) sel = source.Block.NewValue1(pos, OpSliceCap, x.typs.Int, source) - return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Int, offset+2*x.ptrSize) + return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+2*x.ptrSize, loadRegOffset+RO_slice_cap, storeRc) case types.TCOMPLEX64: sel := source.Block.NewValue1(pos, OpComplexReal, x.typs.Float32, source) - mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Float32, offset) + mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float32, storeOffset, loadRegOffset, storeRc.next(x.typs.Float32)) pos = pos.WithNotStmt() sel = source.Block.NewValue1(pos, OpComplexImag, x.typs.Float32, source) - return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Float32, offset+4) + return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float32, storeOffset+4, loadRegOffset+RO_complex_imag, storeRc) case types.TCOMPLEX128: sel := source.Block.NewValue1(pos, OpComplexReal, x.typs.Float64, source) - mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Float64, offset) + mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float64, storeOffset, loadRegOffset, storeRc.next(x.typs.Float64)) pos = pos.WithNotStmt() sel = source.Block.NewValue1(pos, OpComplexImag, x.typs.Float64, source) - return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Float64, offset+8) + return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float64, storeOffset+8, loadRegOffset+RO_complex_imag, storeRc) } - dst := x.offsetFrom(base, offset, types.NewPtr(t)) - s := b.NewValue3A(pos, OpStore, types.TypeMem, t, dst, source, mem) + s := mem + if source.Op == OpDereference { + source.Op = OpLoad // For purposes of parameter passing expansion, a Dereference is a Load. + } + if storeRc.hasRegs() { + storeRc.addArg(source) + } else { + dst := x.offsetFrom(b, storeRc.storeDest, storeOffset, types.NewPtr(t)) + s = b.NewValue3A(pos, OpStore, types.TypeMem, t, dst, source, mem) + } if x.debug { - fmt.Printf("\t\tstoreArg returns %s\n", s.LongString()) + x.Printf("-->storeArg returns %s, storeRc=%s\n", s.LongString(), storeRc.String()) } return s } -// rewriteArgs removes all the Args from a call and converts the call args into appropriate -// stores (or later, register movement). Extra args for interface and closure calls are ignored, -// but removed. -func (x *expandState) rewriteArgs(v *Value, firstArg int) *Value { +// rewriteArgs replaces all the call-parameter Args to a call with their register translation (if any). +// Preceding parameters (code pointers, closure pointer) are preserved, and the memory input is modified +// to account for any parameter stores required. +// Any of the old Args that have their use count fall to zero are marked OpInvalid. +func (x *expandState) rewriteArgs(v *Value, firstArg int) { + if x.debug { + x.indent(3) + defer x.indent(-3) + x.Printf("rewriteArgs(%s; %d)\n", v.LongString(), firstArg) + } // Thread the stores on the memory arg aux := v.Aux.(*AuxCall) pos := v.Pos.WithNotStmt() m0 := v.MemoryArg() mem := m0 - for i, a := range v.Args { - if i < firstArg { - continue - } - if a == m0 { // mem is last. - break - } - auxI := int64(i - firstArg) - if a.Op == OpDereference { + newArgs := []*Value{} + oldArgs := []*Value{} + for i, a := range v.Args[firstArg : len(v.Args)-1] { // skip leading non-parameter SSA Args and trailing mem SSA Arg. + oldArgs = append(oldArgs, a) + auxI := int64(i) + aRegs := aux.RegsOfArg(auxI) + aType := aux.TypeOfArg(auxI) + if len(aRegs) == 0 && a.Op == OpDereference { + aOffset := aux.OffsetOfArg(auxI) if a.MemoryArg() != m0 { x.f.Fatalf("Op...LECall and OpDereference have mismatched mem, %s and %s", v.LongString(), a.LongString()) } // "Dereference" of addressed (probably not-SSA-eligible) value becomes Move - // TODO this will be more complicated with registers in the picture. - mem = x.rewriteDereference(v.Block, x.sp, a, mem, aux.OffsetOfArg(auxI), aux.SizeOfArg(auxI), aux.TypeOfArg(auxI), pos) + // TODO(register args) this will be more complicated with registers in the picture. + mem = x.rewriteDereference(v.Block, x.sp, a, mem, aOffset, aux.SizeOfArg(auxI), aType, pos) } else { - if x.debug { - fmt.Printf("storeArg %s, %v, %d\n", a.LongString(), aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI)) + var rc registerCursor + var result *[]*Value + var aOffset int64 + if len(aRegs) > 0 { + result = &newArgs + } else { + aOffset = aux.OffsetOfArg(auxI) } - mem = x.storeArgOrLoad(pos, v.Block, x.sp, a, mem, aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI)) + if x.debug { + x.Printf("...storeArg %s, %v, %d\n", a.LongString(), aType, aOffset) + } + rc.init(aRegs, aux.abiInfo, result, x.sp) + mem = x.storeArgOrLoad(pos, v.Block, a, mem, aType, aOffset, 0, rc) } } + var preArgStore [2]*Value + preArgs := append(preArgStore[:0], v.Args[0:firstArg]...) v.resetArgs() - return mem + v.AddArgs(preArgs...) + v.AddArgs(newArgs...) + v.AddArg(mem) + for _, a := range oldArgs { + if a.Uses == 0 { + if x.debug { + x.Printf("...marking %v unused\n", a.LongString()) + } + a.invalidateRecursively() + } + } + + return } // expandCalls converts LE (Late Expansion) calls that act like they receive value args into a lower-level form @@ -666,70 +1140,88 @@ func expandCalls(f *Func) { // memory output as their input. sp, _ := f.spSb() x := &expandState{ - f: f, - debug: f.pass.debug > 0, - canSSAType: f.fe.CanSSA, - regSize: f.Config.RegSize, - sp: sp, - typs: &f.Config.Types, - ptrSize: f.Config.PtrSize, - namedSelects: make(map[*Value][]namedVal), - sdom: f.Sdom(), - common: make(map[selKey]*Value), - offsets: make(map[offsetKey]*Value), + f: f, + abi1: f.ABI1, + debug: f.pass.debug > 0, + canSSAType: f.fe.CanSSA, + regSize: f.Config.RegSize, + sp: sp, + typs: &f.Config.Types, + ptrSize: f.Config.PtrSize, + namedSelects: make(map[*Value][]namedVal), + sdom: f.Sdom(), + commonArgs: make(map[selKey]*Value), + memForCall: make(map[ID]*Value), + transformedSelects: make(map[ID]bool), } // For 32-bit, need to deal with decomposition of 64-bit integers, which depends on endianness. if f.Config.BigEndian { - x.lowOffset = 4 + x.lowOffset, x.hiOffset = 4, 0 + x.loRo, x.hiRo = 1, 0 } else { - x.hiOffset = 4 + x.lowOffset, x.hiOffset = 0, 4 + x.loRo, x.hiRo = 0, 1 } if x.debug { - fmt.Printf("\nexpandsCalls(%s)\n", f.Name) + x.Printf("\nexpandsCalls(%s)\n", f.Name) + } + + for i, name := range f.Names { + t := name.Type + if x.isAlreadyExpandedAggregateType(t) { + for j, v := range f.NamedValues[*name] { + if v.Op == OpSelectN || v.Op == OpArg && x.isAlreadyExpandedAggregateType(v.Type) { + ns := x.namedSelects[v] + x.namedSelects[v] = append(ns, namedVal{locIndex: i, valIndex: j}) + } + } + } } // TODO if too slow, whole program iteration can be replaced w/ slices of appropriate values, accumulated in first loop here. - // Step 0: rewrite the calls to convert incoming args to stores. + // Step 0: rewrite the calls to convert args to calls into stores/register movement. for _, b := range f.Blocks { for _, v := range b.Values { + firstArg := 0 switch v.Op { case OpStaticLECall: - mem := x.rewriteArgs(v, 0) - v.SetArgs1(mem) - case OpClosureLECall: - code := v.Args[0] - context := v.Args[1] - mem := x.rewriteArgs(v, 2) - v.SetArgs3(code, context, mem) case OpInterLECall: - code := v.Args[0] - mem := x.rewriteArgs(v, 1) - v.SetArgs2(code, mem) + firstArg = 1 + case OpClosureLECall: + firstArg = 2 + default: + continue } + x.rewriteArgs(v, firstArg) } if isBlockMultiValueExit(b) { + x.indent(3) // Very similar to code in rewriteArgs, but results instead of args. v := b.Controls[0] m0 := v.MemoryArg() mem := m0 aux := f.OwnAux pos := v.Pos.WithNotStmt() - for j, a := range v.Args { + allResults := []*Value{} + if x.debug { + x.Printf("multiValueExit rewriting %s\n", v.LongString()) + } + var oldArgs []*Value + for j, a := range v.Args[:len(v.Args)-1] { + oldArgs = append(oldArgs, a) i := int64(j) - if a == m0 { - break - } auxType := aux.TypeOfResult(i) - auxBase := b.NewValue2A(v.Pos, OpLocalAddr, types.NewPtr(auxType), aux.results[i].Name, x.sp, mem) + auxBase := b.NewValue2A(v.Pos, OpLocalAddr, types.NewPtr(auxType), aux.NameOfResult(i), x.sp, mem) auxOffset := int64(0) auxSize := aux.SizeOfResult(i) - if a.Op == OpDereference { + aRegs := aux.RegsOfResult(int64(j)) + if len(aRegs) == 0 && a.Op == OpDereference { // Avoid a self-move, and if one is detected try to remove the already-inserted VarDef for the assignment that won't happen. if dAddr, dMem := a.Args[0], a.Args[1]; dAddr.Op == OpLocalAddr && dAddr.Args[0].Op == OpSP && - dAddr.Args[1] == dMem && dAddr.Aux == aux.results[i].Name { + dAddr.Args[1] == dMem && dAddr.Aux == aux.NameOfResult(i) { if dMem.Op == OpVarDef && dMem.Aux == dAddr.Aux { dMem.copyOf(dMem.MemoryArg()) // elide the VarDef } @@ -738,28 +1230,37 @@ func expandCalls(f *Func) { mem = x.rewriteDereference(v.Block, auxBase, a, mem, auxOffset, auxSize, auxType, pos) } else { if a.Op == OpLoad && a.Args[0].Op == OpLocalAddr { - addr := a.Args[0] - if addr.MemoryArg() == a.MemoryArg() && addr.Aux == aux.results[i].Name { + addr := a.Args[0] // This is a self-move. // TODO(register args) do what here for registers? + if addr.MemoryArg() == a.MemoryArg() && addr.Aux == aux.NameOfResult(i) { continue } } - mem = x.storeArgOrLoad(v.Pos, b, auxBase, a, mem, aux.TypeOfResult(i), auxOffset) + var rc registerCursor + var result *[]*Value + if len(aRegs) > 0 { + result = &allResults + } + rc.init(aRegs, aux.abiInfo, result, auxBase) + mem = x.storeArgOrLoad(v.Pos, b, a, mem, aux.TypeOfResult(i), auxOffset, 0, rc) } } - b.SetControl(mem) - v.reset(OpInvalid) // otherwise it can have a mem operand which will fail check(), even though it is dead. - } - } - - for i, name := range f.Names { - t := name.Type - if x.isAlreadyExpandedAggregateType(t) { - for j, v := range f.NamedValues[name] { - if v.Op == OpSelectN || v.Op == OpArg && x.isAlreadyExpandedAggregateType(v.Type) { - ns := x.namedSelects[v] - x.namedSelects[v] = append(ns, namedVal{locIndex: i, valIndex: j}) + v.resetArgs() + v.AddArgs(allResults...) + v.AddArg(mem) + v.Type = types.NewResults(append(abi.RegisterTypes(aux.abiInfo.OutParams()), types.TypeMem)) + b.SetControl(v) + for _, a := range oldArgs { + if a.Uses == 0 { + if x.debug { + x.Printf("...marking %v unused\n", a.LongString()) + } + a.invalidateRecursively() } } + if x.debug { + x.Printf("...multiValueExit new result %s\n", v.LongString()) + } + x.indent(-3) } } @@ -781,14 +1282,9 @@ func expandCalls(f *Func) { t = tSrc } } - if iAEATt { - if x.debug { - fmt.Printf("Splitting store %s\n", v.LongString()) - } - dst, mem := v.Args[0], v.Args[2] - mem = x.storeArgOrLoad(v.Pos, b, dst, source, mem, t, 0) - v.copyOf(mem) - } + dst, mem := v.Args[0], v.Args[2] + mem = x.storeArgOrLoad(v.Pos, b, source, mem, t, 0, 0, registerCursor{storeDest: dst}) + v.copyOf(mem) } } } @@ -808,7 +1304,7 @@ func expandCalls(f *Func) { case OpStructSelect, OpArraySelect, OpIData, OpITab, OpStringPtr, OpStringLen, - OpSlicePtr, OpSliceLen, OpSliceCap, + OpSlicePtr, OpSliceLen, OpSliceCap, OpSlicePtrUnchecked, OpComplexReal, OpComplexImag, OpInt64Hi, OpInt64Lo: w := v.Args[0] @@ -816,7 +1312,7 @@ func expandCalls(f *Func) { case OpStructSelect, OpArraySelect, OpSelectN, OpArg: val2Preds[w] += 1 if x.debug { - fmt.Printf("v2p[%s] = %d\n", w.LongString(), val2Preds[w]) + x.Printf("v2p[%s] = %d\n", w.LongString(), val2Preds[w]) } } fallthrough @@ -825,7 +1321,7 @@ func expandCalls(f *Func) { if _, ok := val2Preds[v]; !ok { val2Preds[v] = 0 if x.debug { - fmt.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v]) + x.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v]) } } @@ -836,7 +1332,7 @@ func expandCalls(f *Func) { if _, ok := val2Preds[v]; !ok { val2Preds[v] = 0 if x.debug { - fmt.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v]) + x.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v]) } } @@ -846,7 +1342,7 @@ func expandCalls(f *Func) { which := v.AuxInt aux := call.Aux.(*AuxCall) pt := v.Type - off := x.offsetFrom(x.sp, aux.OffsetOfResult(which), pt) + off := x.offsetFrom(x.f.Entry, x.sp, aux.OffsetOfResult(which), pt) v.copyOf(off) } } @@ -902,7 +1398,7 @@ func expandCalls(f *Func) { } } - x.common = make(map[selKey]*Value) + x.commonSelectors = make(map[selKey]*Value) // Rewrite duplicate selectors as copies where possible. for i := len(allOrdered) - 1; i >= 0; i-- { v := allOrdered[i] @@ -932,7 +1428,7 @@ func expandCalls(f *Func) { case OpArraySelect: offset = size * v.AuxInt case OpSelectN: - offset = w.Aux.(*AuxCall).OffsetOfResult(v.AuxInt) + offset = v.AuxInt // offset is just a key, really. case OpInt64Hi: offset = x.hiOffset case OpInt64Lo: @@ -944,16 +1440,19 @@ func expandCalls(f *Func) { case OpComplexImag: offset = size } - sk := selKey{from: w, size: size, offset: offset, typ: typ} - dupe := x.common[sk] + sk := selKey{from: w, size: size, offsetOrIndex: offset, typ: typ} + dupe := x.commonSelectors[sk] if dupe == nil { - x.common[sk] = v + x.commonSelectors[sk] = v } else if x.sdom.IsAncestorEq(dupe.Block, v.Block) { + if x.debug { + x.Printf("Duplicate, make %s copy of %s\n", v, dupe) + } v.copyOf(dupe) } else { // Because values are processed in dominator order, the old common[s] will never dominate after a miss is seen. // Installing the new value might match some future values. - x.common[sk] = v + x.commonSelectors[sk] = v } } @@ -964,16 +1463,16 @@ func expandCalls(f *Func) { for i, v := range allOrdered { if x.debug { b := v.Block - fmt.Printf("allOrdered[%d] = b%d, %s, uses=%d\n", i, b.ID, v.LongString(), v.Uses) + x.Printf("allOrdered[%d] = b%d, %s, uses=%d\n", i, b.ID, v.LongString(), v.Uses) } if v.Uses == 0 { - v.reset(OpInvalid) + v.invalidateRecursively() continue } if v.Op == OpCopy { continue } - locs := x.rewriteSelect(v, v, 0) + locs := x.rewriteSelect(v, v, 0, 0) // Install new names. if v.Type.IsMemory() { continue @@ -981,37 +1480,93 @@ func expandCalls(f *Func) { // Leaf types may have debug locations if !x.isAlreadyExpandedAggregateType(v.Type) { for _, l := range locs { - f.NamedValues[l] = append(f.NamedValues[l], v) + if _, ok := f.NamedValues[*l]; !ok { + f.Names = append(f.Names, l) + } + f.NamedValues[*l] = append(f.NamedValues[*l], v) } - f.Names = append(f.Names, locs...) continue } - // Not-leaf types that had debug locations need to lose them. if ns, ok := x.namedSelects[v]; ok { + // Not-leaf types that had debug locations need to lose them. + toDelete = append(toDelete, ns...) } } deleteNamedVals(f, toDelete) - // Step 4: rewrite the calls themselves, correcting the type + // Step 4: rewrite the calls themselves, correcting the type. for _, b := range f.Blocks { for _, v := range b.Values { switch v.Op { + case OpArg: + x.rewriteArgToMemOrRegs(v) case OpStaticLECall: v.Op = OpStaticCall - v.Type = types.TypeMem + rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams()) + v.Type = types.NewResults(append(rts, types.TypeMem)) case OpClosureLECall: v.Op = OpClosureCall - v.Type = types.TypeMem + rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams()) + v.Type = types.NewResults(append(rts, types.TypeMem)) case OpInterLECall: v.Op = OpInterCall - v.Type = types.TypeMem + rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams()) + v.Type = types.NewResults(append(rts, types.TypeMem)) } } } - // Step 5: elide any copies introduced. + // Step 5: dedup OpArgXXXReg values. Mostly it is already dedup'd by commonArgs, + // but there are cases that we have same OpArgXXXReg values with different types. + // E.g. string is sometimes decomposed as { *int8, int }, sometimes as { unsafe.Pointer, uintptr }. + // (Can we avoid that?) + var IArg, FArg [32]*Value + for _, v := range f.Entry.Values { + switch v.Op { + case OpArgIntReg: + i := v.AuxInt + if w := IArg[i]; w != nil { + if w.Type.Width != v.Type.Width { + f.Fatalf("incompatible OpArgIntReg [%d]: %s and %s", i, v.LongString(), w.LongString()) + } + if w.Type.IsUnsafePtr() && !v.Type.IsUnsafePtr() { + // Update unsafe.Pointer type if we know the actual pointer type. + w.Type = v.Type + } + // TODO: don't dedup pointer and scalar? Rewrite to OpConvert? Can it happen? + v.copyOf(w) + } else { + IArg[i] = v + } + case OpArgFloatReg: + i := v.AuxInt + if w := FArg[i]; w != nil { + if w.Type.Width != v.Type.Width { + f.Fatalf("incompatible OpArgFloatReg [%d]: %v and %v", i, v, w) + } + v.copyOf(w) + } else { + FArg[i] = v + } + } + } + + // Step 6: elide any copies introduced. + // Update named values. + for _, name := range f.Names { + values := f.NamedValues[*name] + for i, v := range values { + if v.Op == OpCopy { + a := v.Args[0] + for a.Op == OpCopy { + a = a.Args[0] + } + values[i] = a + } + } + } for _, b := range f.Blocks { for _, v := range b.Values { for i, a := range v.Args { @@ -1022,10 +1577,179 @@ func expandCalls(f *Func) { v.SetArg(i, aa) for a.Uses == 0 { b := a.Args[0] - a.reset(OpInvalid) + a.invalidateRecursively() a = b } } } } + + // Rewriting can attach lines to values that are unlikely to survive code generation, so move them to a use. + for _, b := range f.Blocks { + for _, v := range b.Values { + for _, a := range v.Args { + if a.Pos.IsStmt() != src.PosIsStmt { + continue + } + if a.Type.IsMemory() { + continue + } + if a.Pos.Line() != v.Pos.Line() { + continue + } + if !a.Pos.SameFile(v.Pos) { + continue + } + switch a.Op { + case OpArgIntReg, OpArgFloatReg, OpSelectN: + v.Pos = v.Pos.WithIsStmt() + a.Pos = a.Pos.WithDefaultStmt() + } + } + } + } +} + +// rewriteArgToMemOrRegs converts OpArg v in-place into the register version of v, +// if that is appropriate. +func (x *expandState) rewriteArgToMemOrRegs(v *Value) *Value { + if x.debug { + x.indent(3) + defer x.indent(-3) + x.Printf("rewriteArgToMemOrRegs(%s)\n", v.LongString()) + } + pa := x.prAssignForArg(v) + switch len(pa.Registers) { + case 0: + frameOff := v.Aux.(*ir.Name).FrameOffset() + if pa.Offset() != int32(frameOff+x.f.ABISelf.LocalsOffset()) { + panic(fmt.Errorf("Parameter assignment %d and OpArg.Aux frameOffset %d disagree, op=%s", + pa.Offset(), frameOff, v.LongString())) + } + case 1: + t := v.Type + key := selKey{v, 0, t.Width, t} + w := x.commonArgs[key] + if w != nil { + v.copyOf(w) + break + } + r := pa.Registers[0] + var i int64 + v.Op, i = ArgOpAndRegisterFor(r, x.f.ABISelf) + v.Aux = &AuxNameOffset{v.Aux.(*ir.Name), 0} + v.AuxInt = i + x.commonArgs[key] = v + + default: + panic(badVal("Saw unexpanded OpArg", v)) + } + if x.debug { + x.Printf("-->%s\n", v.LongString()) + } + return v +} + +// newArgToMemOrRegs either rewrites toReplace into an OpArg referencing memory or into an OpArgXXXReg to a register, +// or rewrites it into a copy of the appropriate OpArgXXX. The actual OpArgXXX is determined by combining baseArg (an OpArg) +// with offset, regOffset, and t to determine which portion of it to reference (either all or a part, in memory or in registers). +func (x *expandState) newArgToMemOrRegs(baseArg, toReplace *Value, offset int64, regOffset Abi1RO, t *types.Type, pos src.XPos) *Value { + if x.debug { + x.indent(3) + defer x.indent(-3) + x.Printf("newArgToMemOrRegs(base=%s; toReplace=%s; t=%s; memOff=%d; regOff=%d)\n", baseArg.String(), toReplace.LongString(), t.String(), offset, regOffset) + } + key := selKey{baseArg, offset, t.Width, t} + w := x.commonArgs[key] + if w != nil { + if toReplace != nil { + toReplace.copyOf(w) + } + return w + } + + pa := x.prAssignForArg(baseArg) + if len(pa.Registers) == 0 { // Arg is on stack + frameOff := baseArg.Aux.(*ir.Name).FrameOffset() + if pa.Offset() != int32(frameOff+x.f.ABISelf.LocalsOffset()) { + panic(fmt.Errorf("Parameter assignment %d and OpArg.Aux frameOffset %d disagree, op=%s", + pa.Offset(), frameOff, baseArg.LongString())) + } + aux := baseArg.Aux + auxInt := baseArg.AuxInt + offset + if toReplace != nil && toReplace.Block == baseArg.Block { + toReplace.reset(OpArg) + toReplace.Aux = aux + toReplace.AuxInt = auxInt + toReplace.Type = t + w = toReplace + } else { + w = baseArg.Block.NewValue0IA(pos, OpArg, t, auxInt, aux) + } + x.commonArgs[key] = w + if toReplace != nil { + toReplace.copyOf(w) + } + if x.debug { + x.Printf("-->%s\n", w.LongString()) + } + return w + } + // Arg is in registers + r := pa.Registers[regOffset] + op, auxInt := ArgOpAndRegisterFor(r, x.f.ABISelf) + if op == OpArgIntReg && t.IsFloat() || op == OpArgFloatReg && t.IsInteger() { + fmt.Printf("pa=%v\nx.f.OwnAux.abiInfo=%s\n", + pa.ToString(x.f.ABISelf, true), + x.f.OwnAux.abiInfo.String()) + panic(fmt.Errorf("Op/Type mismatch, op=%s, type=%s", op.String(), t.String())) + } + if baseArg.AuxInt != 0 { + base.Fatalf("BaseArg %s bound to registers has non-zero AuxInt", baseArg.LongString()) + } + aux := &AuxNameOffset{baseArg.Aux.(*ir.Name), offset} + if toReplace != nil && toReplace.Block == baseArg.Block { + toReplace.reset(op) + toReplace.Aux = aux + toReplace.AuxInt = auxInt + toReplace.Type = t + w = toReplace + } else { + w = baseArg.Block.NewValue0IA(pos, op, t, auxInt, aux) + } + // If we are creating an OpArgIntReg/OpArgFloatReg that + // corresponds to an in-param that fits entirely in a register, + // then enter it into the name/value table. The LocalSlot + // is somewhat fictitious, since there is no incoming live + // memory version of the parameter, but we need an entry in + // NamedValues in order for ssa debug tracking to include + // the value in the tracking analysis. + if len(pa.Registers) == 1 { + loc := LocalSlot{N: aux.Name, Type: t, Off: 0} + values, ok := x.f.NamedValues[loc] + if !ok { + ploc := x.f.localSlotAddr(loc) + x.f.Names = append(x.f.Names, ploc) + } + x.f.NamedValues[loc] = append(values, w) + } + x.commonArgs[key] = w + if toReplace != nil { + toReplace.copyOf(w) + } + if x.debug { + x.Printf("-->%s\n", w.LongString()) + } + return w + +} + +// argOpAndRegisterFor converts an abi register index into an ssa Op and corresponding +// arg register index. +func ArgOpAndRegisterFor(r abi.RegIndex, abiConfig *abi.ABIConfig) (Op, int64) { + i := abiConfig.FloatIndexFor(r) + if i >= 0 { // float PR + return OpArgFloatReg, i + } + return OpArgIntReg, int64(r) } diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go index 32e6d09d1bc..8ed8a0c4a6e 100644 --- a/src/cmd/compile/internal/ssa/export_test.go +++ b/src/cmd/compile/internal/ssa/export_test.go @@ -73,36 +73,6 @@ func (TestFrontend) Auto(pos src.XPos, t *types.Type) *ir.Name { n.Class = ir.PAUTO return n } -func (d TestFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) { - return LocalSlot{N: s.N, Type: testTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.Int, Off: s.Off + 8} -} -func (d TestFrontend) SplitInterface(s LocalSlot) (LocalSlot, LocalSlot) { - return LocalSlot{N: s.N, Type: testTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.BytePtr, Off: s.Off + 8} -} -func (d TestFrontend) SplitSlice(s LocalSlot) (LocalSlot, LocalSlot, LocalSlot) { - return LocalSlot{N: s.N, Type: s.Type.Elem().PtrTo(), Off: s.Off}, - LocalSlot{N: s.N, Type: testTypes.Int, Off: s.Off + 8}, - LocalSlot{N: s.N, Type: testTypes.Int, Off: s.Off + 16} -} -func (d TestFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) { - if s.Type.Size() == 16 { - return LocalSlot{N: s.N, Type: testTypes.Float64, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.Float64, Off: s.Off + 8} - } - return LocalSlot{N: s.N, Type: testTypes.Float32, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.Float32, Off: s.Off + 4} -} -func (d TestFrontend) SplitInt64(s LocalSlot) (LocalSlot, LocalSlot) { - if s.Type.IsSigned() { - return LocalSlot{N: s.N, Type: testTypes.Int32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: testTypes.UInt32, Off: s.Off} - } - return LocalSlot{N: s.N, Type: testTypes.UInt32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: testTypes.UInt32, Off: s.Off} -} -func (d TestFrontend) SplitStruct(s LocalSlot, i int) LocalSlot { - return LocalSlot{N: s.N, Type: s.Type.FieldType(i), Off: s.Off + s.Type.FieldOff(i)} -} -func (d TestFrontend) SplitArray(s LocalSlot) LocalSlot { - return LocalSlot{N: s.N, Type: s.Type.Elem(), Off: s.Off} -} - func (d TestFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot { return LocalSlot{N: parent.N, Type: t, Off: offset} } diff --git a/src/cmd/compile/internal/ssa/flags_test.go b/src/cmd/compile/internal/ssa/flags_test.go index d64abf652cd..0bc10971994 100644 --- a/src/cmd/compile/internal/ssa/flags_test.go +++ b/src/cmd/compile/internal/ssa/flags_test.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 || arm64 // +build amd64 arm64 package ssa diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index de99a8d4af9..fac876c23eb 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -5,6 +5,8 @@ package ssa import ( + "cmd/compile/internal/abi" + "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/src" "crypto/sha1" @@ -43,6 +45,10 @@ type Func struct { DebugTest bool // default true unless $GOSSAHASH != ""; as a debugging aid, make new code conditional on this and use GOSSAHASH to binary search for failing cases PrintOrHtmlSSA bool // true if GOSSAFUNC matches, true even if fe.Log() (spew phase results to stdout) is false. ruleMatches map[string]int // number of times countRule was called during compilation for any given string + ABI0 *abi.ABIConfig // A copy, for no-sync access + ABI1 *abi.ABIConfig // A copy, for no-sync access + ABISelf *abi.ABIConfig // ABI for function being compiled + ABIDefault *abi.ABIConfig // ABI for rtcall and other no-parsed-signature/pragma functions. scheduled bool // Values in Blocks are in final order laidout bool // Blocks are ordered @@ -56,10 +62,14 @@ type Func struct { NamedValues map[LocalSlot][]*Value // Names is a copy of NamedValues.Keys. We keep a separate list // of keys to make iteration order deterministic. - Names []LocalSlot + Names []*LocalSlot + // Canonicalize root/top-level local slots, and canonicalize their pieces. + // Because LocalSlot pieces refer to their parents with a pointer, this ensures that equivalent slots really are equal. + CanonicalLocalSlots map[LocalSlot]*LocalSlot + CanonicalLocalSplits map[LocalSlotSplitKey]*LocalSlot // RegArgs is a slice of register-memory pairs that must be spilled and unspilled in the uncommon path of function entry. - RegArgs []ArgPair + RegArgs []Spill // AuxCall describing parameters and results for this function. OwnAux *AuxCall @@ -82,10 +92,16 @@ type Func struct { constants map[int64][]*Value // constants cache, keyed by constant value; users must check value's Op and Type } +type LocalSlotSplitKey struct { + parent *LocalSlot + Off int64 // offset of slot in N + Type *types.Type // type of slot +} + // NewFunc returns a new, empty function object. // Caller must set f.Config and f.Cache before using f. func NewFunc(fe Frontend) *Func { - return &Func{fe: fe, NamedValues: make(map[LocalSlot][]*Value)} + return &Func{fe: fe, NamedValues: make(map[LocalSlot][]*Value), CanonicalLocalSlots: make(map[LocalSlot]*LocalSlot), CanonicalLocalSplits: make(map[LocalSlotSplitKey]*LocalSlot)} } // NumBlocks returns an integer larger than the id of any Block in the Func. @@ -188,6 +204,101 @@ func (f *Func) retDeadcodeLiveOrderStmts(liveOrderStmts []*Value) { f.Cache.deadcode.liveOrderStmts = liveOrderStmts } +func (f *Func) localSlotAddr(slot LocalSlot) *LocalSlot { + a, ok := f.CanonicalLocalSlots[slot] + if !ok { + a = new(LocalSlot) + *a = slot // don't escape slot + f.CanonicalLocalSlots[slot] = a + } + return a +} + +func (f *Func) SplitString(name *LocalSlot) (*LocalSlot, *LocalSlot) { + ptrType := types.NewPtr(types.Types[types.TUINT8]) + lenType := types.Types[types.TINT] + // Split this string up into two separate variables. + p := f.SplitSlot(name, ".ptr", 0, ptrType) + l := f.SplitSlot(name, ".len", ptrType.Size(), lenType) + return p, l +} + +func (f *Func) SplitInterface(name *LocalSlot) (*LocalSlot, *LocalSlot) { + n := name.N + u := types.Types[types.TUINTPTR] + t := types.NewPtr(types.Types[types.TUINT8]) + // Split this interface up into two separate variables. + sfx := ".itab" + if n.Type().IsEmptyInterface() { + sfx = ".type" + } + c := f.SplitSlot(name, sfx, 0, u) // see comment in typebits.Set + d := f.SplitSlot(name, ".data", u.Size(), t) + return c, d +} + +func (f *Func) SplitSlice(name *LocalSlot) (*LocalSlot, *LocalSlot, *LocalSlot) { + ptrType := types.NewPtr(name.Type.Elem()) + lenType := types.Types[types.TINT] + p := f.SplitSlot(name, ".ptr", 0, ptrType) + l := f.SplitSlot(name, ".len", ptrType.Size(), lenType) + c := f.SplitSlot(name, ".cap", ptrType.Size()+lenType.Size(), lenType) + return p, l, c +} + +func (f *Func) SplitComplex(name *LocalSlot) (*LocalSlot, *LocalSlot) { + s := name.Type.Size() / 2 + var t *types.Type + if s == 8 { + t = types.Types[types.TFLOAT64] + } else { + t = types.Types[types.TFLOAT32] + } + r := f.SplitSlot(name, ".real", 0, t) + i := f.SplitSlot(name, ".imag", t.Size(), t) + return r, i +} + +func (f *Func) SplitInt64(name *LocalSlot) (*LocalSlot, *LocalSlot) { + var t *types.Type + if name.Type.IsSigned() { + t = types.Types[types.TINT32] + } else { + t = types.Types[types.TUINT32] + } + if f.Config.BigEndian { + return f.SplitSlot(name, ".hi", 0, t), f.SplitSlot(name, ".lo", t.Size(), types.Types[types.TUINT32]) + } + return f.SplitSlot(name, ".hi", t.Size(), t), f.SplitSlot(name, ".lo", 0, types.Types[types.TUINT32]) +} + +func (f *Func) SplitStruct(name *LocalSlot, i int) *LocalSlot { + st := name.Type + return f.SplitSlot(name, st.FieldName(i), st.FieldOff(i), st.FieldType(i)) +} +func (f *Func) SplitArray(name *LocalSlot) *LocalSlot { + n := name.N + at := name.Type + if at.NumElem() != 1 { + base.FatalfAt(n.Pos(), "bad array size") + } + et := at.Elem() + return f.SplitSlot(name, "[0]", 0, et) +} + +func (f *Func) SplitSlot(name *LocalSlot, sfx string, offset int64, t *types.Type) *LocalSlot { + lssk := LocalSlotSplitKey{name, offset, t} + if als, ok := f.CanonicalLocalSplits[lssk]; ok { + return als + } + // Note: the _ field may appear several times. But + // have no fear, identically-named but distinct Autos are + // ok, albeit maybe confusing for a debugger. + ls := f.fe.SplitSlot(name, sfx, offset, t) + f.CanonicalLocalSplits[lssk] = &ls + return &ls +} + // newValue allocates a new Value with the given fields and places it at the end of b.Values. func (f *Func) newValue(op Op, t *types.Type, b *Block, pos src.XPos) *Value { var v *Value @@ -546,7 +657,7 @@ func (b *Block) NewValue4(pos src.XPos, op Op, t *types.Type, arg0, arg1, arg2, return v } -// NewValue4I returns a new value in the block with four arguments and and auxint value. +// NewValue4I returns a new value in the block with four arguments and auxint value. func (b *Block) NewValue4I(pos src.XPos, op Op, t *types.Type, auxint int64, arg0, arg1, arg2, arg3 *Value) *Value { v := b.Func.newValue(op, t, b, pos) v.AuxInt = auxint @@ -648,7 +759,19 @@ func (f *Func) Frontend() Frontend { return f.f func (f *Func) Warnl(pos src.XPos, msg string, args ...interface{}) { f.fe.Warnl(pos, msg, args...) } func (f *Func) Logf(msg string, args ...interface{}) { f.fe.Logf(msg, args...) } func (f *Func) Log() bool { return f.fe.Log() } -func (f *Func) Fatalf(msg string, args ...interface{}) { f.fe.Fatalf(f.Entry.Pos, msg, args...) } + +func (f *Func) Fatalf(msg string, args ...interface{}) { + stats := "crashed" + if f.Log() { + f.Logf(" pass %s end %s\n", f.pass.name, stats) + printFunc(f) + } + if f.HTMLWriter != nil { + f.HTMLWriter.WritePhase(f.pass.name, fmt.Sprintf("%s %s", f.pass.name, stats)) + f.HTMLWriter.flushPhases() + } + f.fe.Fatalf(f.Entry.Pos, msg, args...) +} // postorder returns the reachable blocks in f in a postorder traversal. func (f *Func) postorder() []*Block { diff --git a/src/cmd/compile/internal/ssa/fuse.go b/src/cmd/compile/internal/ssa/fuse.go index c51461cbfff..fec2ba87737 100644 --- a/src/cmd/compile/internal/ssa/fuse.go +++ b/src/cmd/compile/internal/ssa/fuse.go @@ -11,8 +11,8 @@ import ( // fuseEarly runs fuse(f, fuseTypePlain|fuseTypeIntInRange). func fuseEarly(f *Func) { fuse(f, fuseTypePlain|fuseTypeIntInRange) } -// fuseLate runs fuse(f, fuseTypePlain|fuseTypeIf). -func fuseLate(f *Func) { fuse(f, fuseTypePlain|fuseTypeIf) } +// fuseLate runs fuse(f, fuseTypePlain|fuseTypeIf|fuseTypeBranchRedirect). +func fuseLate(f *Func) { fuse(f, fuseTypePlain|fuseTypeIf|fuseTypeBranchRedirect) } type fuseType uint8 @@ -20,6 +20,7 @@ const ( fuseTypePlain fuseType = 1 << iota fuseTypeIf fuseTypeIntInRange + fuseTypeBranchRedirect fuseTypeShortCircuit ) @@ -43,6 +44,9 @@ func fuse(f *Func, typ fuseType) { changed = shortcircuitBlock(b) || changed } } + if typ&fuseTypeBranchRedirect != 0 { + changed = fuseBranchRedirect(f) || changed + } if changed { f.invalidateCFG() } @@ -51,11 +55,11 @@ func fuse(f *Func, typ fuseType) { // fuseBlockIf handles the following cases where s0 and s1 are empty blocks. // -// b b b b -// / \ | \ / | | | -// s0 s1 | s1 s0 | | | -// \ / | / \ | | | -// ss ss ss ss +// b b b b +// \ / \ / | \ / \ / | | | +// s0 s1 | s1 s0 | | | +// \ / | / \ | | | +// ss ss ss ss // // If all Phi ops in ss have identical variables for slots corresponding to // s0, s1 and b then the branch can be dropped. @@ -69,11 +73,11 @@ func fuseBlockIf(b *Block) bool { if b.Kind != BlockIf { return false } - + // It doesn't matter how much Preds does s0 or s1 have. var ss0, ss1 *Block s0 := b.Succs[0].b i0 := b.Succs[0].i - if s0.Kind != BlockPlain || len(s0.Preds) != 1 || !isEmpty(s0) { + if s0.Kind != BlockPlain || !isEmpty(s0) { s0, ss0 = b, s0 } else { ss0 = s0.Succs[0].b @@ -81,15 +85,25 @@ func fuseBlockIf(b *Block) bool { } s1 := b.Succs[1].b i1 := b.Succs[1].i - if s1.Kind != BlockPlain || len(s1.Preds) != 1 || !isEmpty(s1) { + if s1.Kind != BlockPlain || !isEmpty(s1) { s1, ss1 = b, s1 } else { ss1 = s1.Succs[0].b i1 = s1.Succs[0].i } - if ss0 != ss1 { - return false + if s0.Kind == BlockPlain && isEmpty(s0) && s1.Kind == BlockPlain && isEmpty(s1) { + // Two special cases where both s0, s1 and ss are empty blocks. + if s0 == ss1 { + s0, ss0 = b, ss1 + } else if ss0 == s1 { + s1, ss1 = b, ss0 + } else { + return false + } + } else { + return false + } } ss := ss0 @@ -102,48 +116,45 @@ func fuseBlockIf(b *Block) bool { } } - // Now we have two of following b->ss, b->s0->ss and b->s1->ss, - // with s0 and s1 empty if exist. - // We can replace it with b->ss without if all OpPhis in ss - // have identical predecessors (verified above). - // No critical edge is introduced because b will have one successor. - if s0 != b && s1 != b { - // Replace edge b->s0->ss with b->ss. - // We need to keep a slot for Phis corresponding to b. - b.Succs[0] = Edge{ss, i0} - ss.Preds[i0] = Edge{b, 0} - b.removeEdge(1) - s1.removeEdge(0) - } else if s0 != b { - b.removeEdge(0) + // We do not need to redirect the Preds of s0 and s1 to ss, + // the following optimization will do this. + b.removeEdge(0) + if s0 != b && len(s0.Preds) == 0 { s0.removeEdge(0) - } else if s1 != b { - b.removeEdge(1) - s1.removeEdge(0) - } else { - b.removeEdge(1) + // Move any (dead) values in s0 to b, + // where they will be eliminated by the next deadcode pass. + for _, v := range s0.Values { + v.Block = b + } + b.Values = append(b.Values, s0.Values...) + // Clear s0. + s0.Kind = BlockInvalid + s0.Values = nil + s0.Succs = nil + s0.Preds = nil } + b.Kind = BlockPlain b.Likely = BranchUnknown b.ResetControls() - - // Trash the empty blocks s0 and s1. - blocks := [...]*Block{s0, s1} - for _, s := range &blocks { - if s == b { - continue + // The values in b may be dead codes, and clearing them in time may + // obtain new optimization opportunities. + // First put dead values that can be deleted into a slice walkValues. + // Then put their arguments in walkValues before resetting the dead values + // in walkValues, because the arguments may also become dead values. + walkValues := []*Value{} + for _, v := range b.Values { + if v.Uses == 0 && v.removeable() { + walkValues = append(walkValues, v) } - // Move any (dead) values in s0 or s1 to b, - // where they will be eliminated by the next deadcode pass. - for _, v := range s.Values { - v.Block = b + } + for len(walkValues) != 0 { + v := walkValues[len(walkValues)-1] + walkValues = walkValues[:len(walkValues)-1] + if v.Uses == 0 && v.removeable() { + walkValues = append(walkValues, v.Args...) + v.reset(OpInvalid) } - b.Values = append(b.Values, s.Values...) - // Clear s. - s.Kind = BlockInvalid - s.Values = nil - s.Succs = nil - s.Preds = nil } return true } diff --git a/src/cmd/compile/internal/ssa/fuse_branchredirect.go b/src/cmd/compile/internal/ssa/fuse_branchredirect.go new file mode 100644 index 00000000000..1b8b307bcac --- /dev/null +++ b/src/cmd/compile/internal/ssa/fuse_branchredirect.go @@ -0,0 +1,110 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// fuseBranchRedirect checks for a CFG in which the outbound branch +// of an If block can be derived from its predecessor If block, in +// some such cases, we can redirect the predecessor If block to the +// corresponding successor block directly. For example: +// p: +// v11 = Less64 v10 v8 +// If v11 goto b else u +// b: <- p ... +// v17 = Leq64 v10 v8 +// If v17 goto s else o +// We can redirect p to s directly. +// +// The implementation here borrows the framework of the prove pass. +// 1, Traverse all blocks of function f to find If blocks. +// 2, For any If block b, traverse all its predecessors to find If blocks. +// 3, For any If block predecessor p, update relationship p->b. +// 4, Traverse all successors of b. +// 5, For any successor s of b, try to update relationship b->s, if a +// contradiction is found then redirect p to another successor of b. +func fuseBranchRedirect(f *Func) bool { + ft := newFactsTable(f) + ft.checkpoint() + + changed := false + for i := len(f.Blocks) - 1; i >= 0; i-- { + b := f.Blocks[i] + if b.Kind != BlockIf { + continue + } + // b is either empty or only contains the control value. + // TODO: if b contains only OpCopy or OpNot related to b.Controls, + // such as Copy(Not(Copy(Less64(v1, v2)))), perhaps it can be optimized. + bCtl := b.Controls[0] + if bCtl.Block != b && len(b.Values) != 0 || (len(b.Values) != 1 || bCtl.Uses != 1) && bCtl.Block == b { + continue + } + + for k := 0; k < len(b.Preds); k++ { + pk := b.Preds[k] + p := pk.b + if p.Kind != BlockIf || p == b { + continue + } + pbranch := positive + if pk.i == 1 { + pbranch = negative + } + ft.checkpoint() + // Assume branch p->b is taken. + addBranchRestrictions(ft, p, pbranch) + // Check if any outgoing branch is unreachable based on the above condition. + parent := b + for j, bbranch := range [...]branch{positive, negative} { + ft.checkpoint() + // Try to update relationship b->child, and check if the contradiction occurs. + addBranchRestrictions(ft, parent, bbranch) + unsat := ft.unsat + ft.restore() + if !unsat { + continue + } + // This branch is impossible,so redirect p directly to another branch. + out := 1 ^ j + child := parent.Succs[out].b + if child == b { + continue + } + b.removePred(k) + p.Succs[pk.i] = Edge{child, len(child.Preds)} + // Fix up Phi value in b to have one less argument. + for _, v := range b.Values { + if v.Op != OpPhi { + continue + } + v.RemoveArg(k) + phielimValue(v) + } + // Fix up child to have one more predecessor. + child.Preds = append(child.Preds, Edge{p, pk.i}) + ai := b.Succs[out].i + for _, v := range child.Values { + if v.Op != OpPhi { + continue + } + v.AddArg(v.Args[ai]) + } + if b.Func.pass.debug > 0 { + b.Func.Warnl(b.Controls[0].Pos, "Redirect %s based on %s", b.Controls[0].Op, p.Controls[0].Op) + } + changed = true + k-- + break + } + ft.restore() + } + if len(b.Preds) == 0 && b != f.Entry { + // Block is now dead. + b.Kind = BlockInvalid + } + } + ft.restore() + ft.cleanup(f) + return changed +} diff --git a/src/cmd/compile/internal/ssa/fuse_test.go b/src/cmd/compile/internal/ssa/fuse_test.go index 15190997f22..27a14b17819 100644 --- a/src/cmd/compile/internal/ssa/fuse_test.go +++ b/src/cmd/compile/internal/ssa/fuse_test.go @@ -104,6 +104,18 @@ func TestFuseHandlesPhis(t *testing.T) { func TestFuseEliminatesEmptyBlocks(t *testing.T) { c := testConfig(t) + // Case 1, plain type empty blocks z0 ~ z3 will be eliminated. + // entry + // | + // z0 + // | + // z1 + // | + // z2 + // | + // z3 + // | + // exit fun := c.Fun("entry", Bloc("entry", Valu("mem", OpInitMem, types.TypeMem, 0, nil), @@ -126,16 +138,77 @@ func TestFuseEliminatesEmptyBlocks(t *testing.T) { for k, b := range fun.blocks { if k[:1] == "z" && b.Kind != BlockInvalid { - t.Errorf("%s was not eliminated, but should have", k) + t.Errorf("case1 %s was not eliminated, but should have", k) + } + } + + // Case 2, empty blocks with If branch, z0 and z1 will be eliminated. + // entry + // / \ + // z0 z1 + // \ / + // exit + fun = c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("c", OpArg, c.config.Types.Bool, 0, nil), + If("c", "z0", "z1")), + Bloc("z0", + Goto("exit")), + Bloc("z1", + Goto("exit")), + Bloc("exit", + Exit("mem"), + )) + + CheckFunc(fun.f) + fuseLate(fun.f) + + for k, b := range fun.blocks { + if k[:1] == "z" && b.Kind != BlockInvalid { + t.Errorf("case2 %s was not eliminated, but should have", k) + } + } + + // Case 3, empty blocks with multiple predecessors, z0 and z1 will be eliminated. + // entry + // | \ + // | b0 + // | / \ + // z0 z1 + // \ / + // exit + fun = c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("c1", OpArg, c.config.Types.Bool, 0, nil), + If("c1", "b0", "z0")), + Bloc("b0", + Valu("c2", OpArg, c.config.Types.Bool, 0, nil), + If("c2", "z1", "z0")), + Bloc("z0", + Goto("exit")), + Bloc("z1", + Goto("exit")), + Bloc("exit", + Exit("mem"), + )) + + CheckFunc(fun.f) + fuseLate(fun.f) + + for k, b := range fun.blocks { + if k[:1] == "z" && b.Kind != BlockInvalid { + t.Errorf("case3 %s was not eliminated, but should have", k) } } } func TestFuseSideEffects(t *testing.T) { - // Test that we don't fuse branches that have side effects but + c := testConfig(t) + // Case1, test that we don't fuse branches that have side effects but // have no use (e.g. followed by infinite loop). // See issue #36005. - c := testConfig(t) fun := c.Fun("entry", Bloc("entry", Valu("mem", OpInitMem, types.TypeMem, 0, nil), @@ -163,6 +236,31 @@ func TestFuseSideEffects(t *testing.T) { t.Errorf("else is eliminated, but should not") } } + + // Case2, z0 contains a value that has side effect, z0 shouldn't be eliminated. + // entry + // | \ + // | z0 + // | / + // exit + fun = c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("c1", OpArg, c.config.Types.Bool, 0, nil), + Valu("p", OpArg, c.config.Types.IntPtr, 0, nil), + If("c1", "z0", "exit")), + Bloc("z0", + Valu("nilcheck", OpNilCheck, types.TypeVoid, 0, nil, "p", "mem"), + Goto("exit")), + Bloc("exit", + Exit("mem"), + )) + CheckFunc(fun.f) + fuseLate(fun.f) + z0, ok := fun.blocks["z0"] + if !ok || z0.Kind == BlockInvalid { + t.Errorf("case2 z0 is eliminated, but should not") + } } func BenchmarkFuse(b *testing.B) { diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules index df03cb71a6e..199b73c42f4 100644 --- a/src/cmd/compile/internal/ssa/gen/386.rules +++ b/src/cmd/compile/internal/ssa/gen/386.rules @@ -54,6 +54,7 @@ (Bswap32 ...) => (BSWAPL ...) (Sqrt ...) => (SQRTSD ...) +(Sqrt32 ...) => (SQRTSS ...) (Ctz16 x) => (BSFL (ORLconst [0x10000] x)) (Ctz16NonZero ...) => (BSFL ...) @@ -257,17 +258,17 @@ (Zero [4] destptr mem) => (MOVLstoreconst [0] destptr mem) (Zero [3] destptr mem) => - (MOVBstoreconst [makeValAndOff32(0,2)] destptr - (MOVWstoreconst [makeValAndOff32(0,0)] destptr mem)) + (MOVBstoreconst [makeValAndOff(0,2)] destptr + (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)) (Zero [5] destptr mem) => - (MOVBstoreconst [makeValAndOff32(0,4)] destptr - (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)) + (MOVBstoreconst [makeValAndOff(0,4)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) (Zero [6] destptr mem) => - (MOVWstoreconst [makeValAndOff32(0,4)] destptr - (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)) + (MOVWstoreconst [makeValAndOff(0,4)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) (Zero [7] destptr mem) => - (MOVLstoreconst [makeValAndOff32(0,3)] destptr - (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)) + (MOVLstoreconst [makeValAndOff(0,3)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) // Strip off any fractional word zeroing. (Zero [s] destptr mem) && s%4 != 0 && s > 4 => @@ -276,17 +277,17 @@ // Zero small numbers of words directly. (Zero [8] destptr mem) => - (MOVLstoreconst [makeValAndOff32(0,4)] destptr - (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)) + (MOVLstoreconst [makeValAndOff(0,4)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) (Zero [12] destptr mem) => - (MOVLstoreconst [makeValAndOff32(0,8)] destptr - (MOVLstoreconst [makeValAndOff32(0,4)] destptr - (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))) + (MOVLstoreconst [makeValAndOff(0,8)] destptr + (MOVLstoreconst [makeValAndOff(0,4)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))) (Zero [16] destptr mem) => - (MOVLstoreconst [makeValAndOff32(0,12)] destptr - (MOVLstoreconst [makeValAndOff32(0,8)] destptr - (MOVLstoreconst [makeValAndOff32(0,4)] destptr - (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)))) + (MOVLstoreconst [makeValAndOff(0,12)] destptr + (MOVLstoreconst [makeValAndOff(0,8)] destptr + (MOVLstoreconst [makeValAndOff(0,4)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)))) // Medium zeroing uses a duff device. (Zero [s] destptr mem) @@ -620,12 +621,12 @@ ((ADD|AND|OR|XOR)Lconstmodify [valoff1.addOffset32(off2)] {sym} base mem) // Fold constants into stores. -(MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(int64(off)) => - (MOVLstoreconst [makeValAndOff32(c,off)] {sym} ptr mem) -(MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(int64(off)) => - (MOVWstoreconst [makeValAndOff32(c,off)] {sym} ptr mem) -(MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(int64(off)) => - (MOVBstoreconst [makeValAndOff32(c,off)] {sym} ptr mem) +(MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) => + (MOVLstoreconst [makeValAndOff(c,off)] {sym} ptr mem) +(MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) => + (MOVWstoreconst [makeValAndOff(c,off)] {sym} ptr mem) +(MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) => + (MOVBstoreconst [makeValAndOff(c,off)] {sym} ptr mem) // Fold address offsets into constant stores. (MOV(L|W|B)storeconst [sc] {s} (ADDLconst [off] ptr) mem) && sc.canAdd32(off) => @@ -675,8 +676,8 @@ (MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) => ((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) (MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) - && y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(int64(c),int64(off)) => - ((ADD|AND|OR|XOR)Lconstmodify [makeValAndOff32(c,off)] {sym} ptr mem) + && y.Uses==1 && l.Uses==1 && clobber(y, l) => + ((ADD|AND|OR|XOR)Lconstmodify [makeValAndOff(c,off)] {sym} ptr mem) // fold LEALs together (LEAL [off1] {sym1} (LEAL [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => @@ -994,49 +995,49 @@ && x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x) - => (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p mem) + => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem) (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem)) && x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x) - => (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p mem) + => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem) (MOVBstoreconst [c] {s} p1 x:(MOVBstoreconst [a] {s} p0 mem)) && x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 1) && clobber(x) - => (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p0 mem) + => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p0 mem) (MOVBstoreconst [a] {s} p0 x:(MOVBstoreconst [c] {s} p1 mem)) && x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 1) && clobber(x) - => (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p0 mem) + => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p0 mem) (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) && x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x) - => (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p mem) + => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem) (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem)) && x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) - => (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p mem) + => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem) (MOVWstoreconst [c] {s} p1 x:(MOVWstoreconst [a] {s} p0 mem)) && x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 2) && clobber(x) - => (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p0 mem) + => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p0 mem) (MOVWstoreconst [a] {s} p0 x:(MOVWstoreconst [c] {s} p1 mem)) && x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 2) && clobber(x) - => (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p0 mem) + => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p0 mem) // Combine stores into larger (unaligned) stores. (MOVBstore [i] {s} p (SHR(W|L)const [8] w) x:(MOVBstore [i-1] {s} p w mem)) @@ -1098,13 +1099,12 @@ (CMP(L|W|B)const l:(MOV(L|W|B)load {sym} [off] ptr mem) [c]) && l.Uses == 1 - && validValAndOff(int64(c), int64(off)) && clobber(l) => - @l.Block (CMP(L|W|B)constload {sym} [makeValAndOff32(int32(c),int32(off))] ptr mem) + @l.Block (CMP(L|W|B)constload {sym} [makeValAndOff(int32(c),off)] ptr mem) -(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(c),int64(off)) => (CMPLconstload {sym} [makeValAndOff32(c,off)] ptr mem) -(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int16(c)),int64(off)) => (CMPWconstload {sym} [makeValAndOff32(int32(int16(c)),off)] ptr mem) -(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int8(c)),int64(off)) => (CMPBconstload {sym} [makeValAndOff32(int32(int8(c)),off)] ptr mem) +(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) +(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem) +(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) (MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))]) (MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) diff --git a/src/cmd/compile/internal/ssa/gen/386Ops.go b/src/cmd/compile/internal/ssa/gen/386Ops.go index 737b99c3716..c4b49fbb230 100644 --- a/src/cmd/compile/internal/ssa/gen/386Ops.go +++ b/src/cmd/compile/internal/ssa/gen/386Ops.go @@ -146,14 +146,14 @@ func init() { var _386ops = []opData{ // fp ops - {name: "ADDSS", argLength: 2, reg: fp21, asm: "ADDSS", commutative: true, resultInArg0: true, usesScratch: true}, // fp32 add - {name: "ADDSD", argLength: 2, reg: fp21, asm: "ADDSD", commutative: true, resultInArg0: true}, // fp64 add - {name: "SUBSS", argLength: 2, reg: fp21, asm: "SUBSS", resultInArg0: true, usesScratch: true}, // fp32 sub - {name: "SUBSD", argLength: 2, reg: fp21, asm: "SUBSD", resultInArg0: true}, // fp64 sub - {name: "MULSS", argLength: 2, reg: fp21, asm: "MULSS", commutative: true, resultInArg0: true, usesScratch: true}, // fp32 mul - {name: "MULSD", argLength: 2, reg: fp21, asm: "MULSD", commutative: true, resultInArg0: true}, // fp64 mul - {name: "DIVSS", argLength: 2, reg: fp21, asm: "DIVSS", resultInArg0: true, usesScratch: true}, // fp32 div - {name: "DIVSD", argLength: 2, reg: fp21, asm: "DIVSD", resultInArg0: true}, // fp64 div + {name: "ADDSS", argLength: 2, reg: fp21, asm: "ADDSS", commutative: true, resultInArg0: true}, // fp32 add + {name: "ADDSD", argLength: 2, reg: fp21, asm: "ADDSD", commutative: true, resultInArg0: true}, // fp64 add + {name: "SUBSS", argLength: 2, reg: fp21, asm: "SUBSS", resultInArg0: true}, // fp32 sub + {name: "SUBSD", argLength: 2, reg: fp21, asm: "SUBSD", resultInArg0: true}, // fp64 sub + {name: "MULSS", argLength: 2, reg: fp21, asm: "MULSS", commutative: true, resultInArg0: true}, // fp32 mul + {name: "MULSD", argLength: 2, reg: fp21, asm: "MULSD", commutative: true, resultInArg0: true}, // fp64 mul + {name: "DIVSS", argLength: 2, reg: fp21, asm: "DIVSS", resultInArg0: true}, // fp32 div + {name: "DIVSD", argLength: 2, reg: fp21, asm: "DIVSD", resultInArg0: true}, // fp64 div {name: "MOVSSload", argLength: 2, reg: fpload, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp32 load {name: "MOVSDload", argLength: 2, reg: fpload, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp64 load @@ -246,8 +246,8 @@ func init() { {name: "CMPWconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPW", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true}, {name: "CMPBconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPB", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true}, - {name: "UCOMISS", argLength: 2, reg: fp2flags, asm: "UCOMISS", typ: "Flags", usesScratch: true}, // arg0 compare to arg1, f32 - {name: "UCOMISD", argLength: 2, reg: fp2flags, asm: "UCOMISD", typ: "Flags", usesScratch: true}, // arg0 compare to arg1, f64 + {name: "UCOMISS", argLength: 2, reg: fp2flags, asm: "UCOMISS", typ: "Flags"}, // arg0 compare to arg1, f32 + {name: "UCOMISD", argLength: 2, reg: fp2flags, asm: "UCOMISD", typ: "Flags"}, // arg0 compare to arg1, f64 {name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0 {name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0 @@ -308,6 +308,7 @@ func init() { {name: "BSWAPL", argLength: 1, reg: gp11, asm: "BSWAPL", resultInArg0: true, clobberFlags: true}, // arg0 swap bytes {name: "SQRTSD", argLength: 1, reg: fp11, asm: "SQRTSD"}, // sqrt(arg0) + {name: "SQRTSS", argLength: 1, reg: fp11, asm: "SQRTSS"}, // sqrt(arg0), float32 {name: "SBBLcarrymask", argLength: 1, reg: flagsgp, asm: "SBBL"}, // (int32)(-1) if carry is set, 0 if carry is clear. // Note: SBBW and SBBB are subsumed by SBBL @@ -341,12 +342,12 @@ func init() { {name: "MOVLconst", reg: gp01, asm: "MOVL", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint - {name: "CVTTSD2SL", argLength: 1, reg: fpgp, asm: "CVTTSD2SL", usesScratch: true}, // convert float64 to int32 - {name: "CVTTSS2SL", argLength: 1, reg: fpgp, asm: "CVTTSS2SL", usesScratch: true}, // convert float32 to int32 - {name: "CVTSL2SS", argLength: 1, reg: gpfp, asm: "CVTSL2SS", usesScratch: true}, // convert int32 to float32 - {name: "CVTSL2SD", argLength: 1, reg: gpfp, asm: "CVTSL2SD", usesScratch: true}, // convert int32 to float64 - {name: "CVTSD2SS", argLength: 1, reg: fp11, asm: "CVTSD2SS", usesScratch: true}, // convert float64 to float32 - {name: "CVTSS2SD", argLength: 1, reg: fp11, asm: "CVTSS2SD"}, // convert float32 to float64 + {name: "CVTTSD2SL", argLength: 1, reg: fpgp, asm: "CVTTSD2SL"}, // convert float64 to int32 + {name: "CVTTSS2SL", argLength: 1, reg: fpgp, asm: "CVTTSS2SL"}, // convert float32 to int32 + {name: "CVTSL2SS", argLength: 1, reg: gpfp, asm: "CVTSL2SS"}, // convert int32 to float32 + {name: "CVTSL2SD", argLength: 1, reg: gpfp, asm: "CVTSL2SD"}, // convert int32 to float64 + {name: "CVTSD2SS", argLength: 1, reg: fp11, asm: "CVTSD2SS"}, // convert float64 to float32 + {name: "CVTSS2SD", argLength: 1, reg: fp11, asm: "CVTSS2SD"}, // convert float32 to float64 {name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs for float negation. diff --git a/src/cmd/compile/internal/ssa/gen/386splitload.rules b/src/cmd/compile/internal/ssa/gen/386splitload.rules index ed93b90b733..29d4f8c227f 100644 --- a/src/cmd/compile/internal/ssa/gen/386splitload.rules +++ b/src/cmd/compile/internal/ssa/gen/386splitload.rules @@ -6,6 +6,6 @@ (CMP(L|W|B)load {sym} [off] ptr x mem) => (CMP(L|W|B) (MOV(L|W|B)load {sym} [off] ptr mem) x) -(CMPLconstload {sym} [vo] ptr mem) => (CMPLconst (MOVLload {sym} [vo.Off32()] ptr mem) [vo.Val32()]) -(CMPWconstload {sym} [vo] ptr mem) => (CMPWconst (MOVWload {sym} [vo.Off32()] ptr mem) [vo.Val16()]) -(CMPBconstload {sym} [vo] ptr mem) => (CMPBconst (MOVBload {sym} [vo.Off32()] ptr mem) [vo.Val8()]) +(CMPLconstload {sym} [vo] ptr mem) => (CMPLconst (MOVLload {sym} [vo.Off()] ptr mem) [vo.Val()]) +(CMPWconstload {sym} [vo] ptr mem) => (CMPWconst (MOVWload {sym} [vo.Off()] ptr mem) [vo.Val16()]) +(CMPBconstload {sym} [vo] ptr mem) => (CMPBconst (MOVBload {sym} [vo.Off()] ptr mem) [vo.Val8()]) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 3c75bcfa05f..ec91ea1513b 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -104,6 +104,7 @@ (PopCount8 x) => (POPCNTL (MOVBQZX x)) (Sqrt ...) => (SQRTSD ...) +(Sqrt32 ...) => (SQRTSS ...) (RoundToEven x) => (ROUNDSD [0] x) (Floor x) => (ROUNDSD [1] x) @@ -317,46 +318,46 @@ // Lowering Zero instructions (Zero [0] _ mem) => mem -(Zero [1] destptr mem) => (MOVBstoreconst [makeValAndOff32(0,0)] destptr mem) -(Zero [2] destptr mem) => (MOVWstoreconst [makeValAndOff32(0,0)] destptr mem) -(Zero [4] destptr mem) => (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem) -(Zero [8] destptr mem) => (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem) +(Zero [1] destptr mem) => (MOVBstoreconst [makeValAndOff(0,0)] destptr mem) +(Zero [2] destptr mem) => (MOVWstoreconst [makeValAndOff(0,0)] destptr mem) +(Zero [4] destptr mem) => (MOVLstoreconst [makeValAndOff(0,0)] destptr mem) +(Zero [8] destptr mem) => (MOVQstoreconst [makeValAndOff(0,0)] destptr mem) (Zero [3] destptr mem) => - (MOVBstoreconst [makeValAndOff32(0,2)] destptr - (MOVWstoreconst [makeValAndOff32(0,0)] destptr mem)) + (MOVBstoreconst [makeValAndOff(0,2)] destptr + (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)) (Zero [5] destptr mem) => - (MOVBstoreconst [makeValAndOff32(0,4)] destptr - (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)) + (MOVBstoreconst [makeValAndOff(0,4)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) (Zero [6] destptr mem) => - (MOVWstoreconst [makeValAndOff32(0,4)] destptr - (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)) + (MOVWstoreconst [makeValAndOff(0,4)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) (Zero [7] destptr mem) => - (MOVLstoreconst [makeValAndOff32(0,3)] destptr - (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)) + (MOVLstoreconst [makeValAndOff(0,3)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) // Strip off any fractional word zeroing. (Zero [s] destptr mem) && s%8 != 0 && s > 8 && !config.useSSE => (Zero [s-s%8] (OffPtr destptr [s%8]) - (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)) + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) // Zero small numbers of words directly. (Zero [16] destptr mem) && !config.useSSE => - (MOVQstoreconst [makeValAndOff32(0,8)] destptr - (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)) + (MOVQstoreconst [makeValAndOff(0,8)] destptr + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) (Zero [24] destptr mem) && !config.useSSE => - (MOVQstoreconst [makeValAndOff32(0,16)] destptr - (MOVQstoreconst [makeValAndOff32(0,8)] destptr - (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))) + (MOVQstoreconst [makeValAndOff(0,16)] destptr + (MOVQstoreconst [makeValAndOff(0,8)] destptr + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))) (Zero [32] destptr mem) && !config.useSSE => - (MOVQstoreconst [makeValAndOff32(0,24)] destptr - (MOVQstoreconst [makeValAndOff32(0,16)] destptr - (MOVQstoreconst [makeValAndOff32(0,8)] destptr - (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)))) + (MOVQstoreconst [makeValAndOff(0,24)] destptr + (MOVQstoreconst [makeValAndOff(0,16)] destptr + (MOVQstoreconst [makeValAndOff(0,8)] destptr + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)))) (Zero [s] destptr mem) && s > 8 && s < 16 && config.useSSE => - (MOVQstoreconst [makeValAndOff32(0,int32(s-8))] destptr - (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)) + (MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) // Adjust zeros to be a multiple of 16 bytes. (Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE => @@ -365,7 +366,7 @@ (Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE => (Zero [s-s%16] (OffPtr destptr [s%16]) - (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)) + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) (Zero [16] destptr mem) && config.useSSE => (MOVOstorezero destptr mem) @@ -459,7 +460,7 @@ (IsInBounds idx len) => (SETB (CMPQ idx len)) (IsSliceInBounds idx len) => (SETBE (CMPQ idx len)) (NilCheck ...) => (LoweredNilCheck ...) -(GetG mem) && !base.Flag.ABIWrap => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register. +(GetG mem) && !(buildcfg.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal) => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register. (GetClosurePtr ...) => (LoweredGetClosurePtr ...) (GetCallerPC ...) => (LoweredGetCallerPC ...) (GetCallerSP ...) => (LoweredGetCallerSP ...) @@ -901,6 +902,9 @@ ((SHRB|SARB)const x [0]) => x ((ROLQ|ROLL|ROLW|ROLB)const x [0]) => x +// Multi-register shifts +(ORQ (SH(R|L)Q lo bits) (SH(L|R)Q hi (NEGQ bits))) => (SH(R|L)DQ lo hi bits) + // Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits) // because the x86 instructions are defined to use all 5 bits of the shift even // for the small shifts. I don't think we'll ever generate a weird shift (e.g. @@ -1110,24 +1114,24 @@ ((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem) ((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) => ((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem) -((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) => - ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) -((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) => - ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) -((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) => - ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1+off2] {sym} base val mem) -((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) => - ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1+off2] {sym} base val mem) +((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) => + ((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) +((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) => + ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) +((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) => + ((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {sym} base val mem) +((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) => + ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem) // Fold constants into stores. (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validVal(c) => - (MOVQstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem) + (MOVQstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) (MOVLstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) => - (MOVLstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem) + (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) (MOVWstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) => - (MOVWstoreconst [makeValAndOff32(int32(int16(c)),off)] {sym} ptr mem) + (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem) (MOVBstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) => - (MOVBstoreconst [makeValAndOff32(int32(int8(c)),off)] {sym} ptr mem) + (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem) // Fold address offsets into constant stores. (MOV(Q|L|W|B)storeconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd32(off) => @@ -1165,18 +1169,18 @@ ((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => ((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) -((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) +((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) => - ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) -((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) + ((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) +((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) => - ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) -((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) +((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => - ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) -((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + ((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) +((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => - ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) // fold LEAQs together (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => @@ -1867,32 +1871,32 @@ && x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x) - => (MOVWstoreconst [makeValAndOff64(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem) + => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem) (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem)) && x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x) - => (MOVWstoreconst [makeValAndOff64(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem) + => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem) (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) && x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x) - => (MOVLstoreconst [makeValAndOff64(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem) + => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem) (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem)) && x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x) - => (MOVLstoreconst [makeValAndOff64(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem) + => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem) (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) && x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x) - => (MOVQstore [a.Off32()] {s} p (MOVQconst [a.Val()&0xffffffff | c.Val()<<32]) mem) + => (MOVQstore [a.Off()] {s} p (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem) (MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem)) && x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x) - => (MOVQstore [a.Off32()] {s} p (MOVQconst [a.Val()&0xffffffff | c.Val()<<32]) mem) + => (MOVQstore [a.Off()] {s} p (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem) (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem)) && config.useSSE && x.Uses == 1 @@ -1900,7 +1904,7 @@ && c.Val() == 0 && c2.Val() == 0 && clobber(x) - => (MOVOstorezero [c2.Off32()] {s} p mem) + => (MOVOstorezero [c2.Off()] {s} p mem) // Combine stores into larger (unaligned) stores. Little endian. (MOVBstore [i] {s} p (SHR(W|L|Q)const [8] w) x:(MOVBstore [i-1] {s} p w mem)) @@ -1969,6 +1973,16 @@ && clobber(x) => (MOVQstore [i] {s} p0 w0 mem) +(MOVBstore [7] {s} p1 (SHRQconst [56] w) + x1:(MOVWstore [5] {s} p1 (SHRQconst [40] w) + x2:(MOVLstore [1] {s} p1 (SHRQconst [8] w) + x3:(MOVBstore [0] {s} p1 w mem)))) + && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && clobber(x1, x2, x3) + => (MOVQstore {s} p1 w mem) + (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p @@ -2050,11 +2064,11 @@ ((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem) ((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem) (MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) -(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) => - ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off] {sym} ptr x mem) +(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) => + ((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) (MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem) -(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) => - ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off] {sym} ptr x mem) +(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) => + ((ADD|SUB|AND|OR|XOR)Qmodify [off] {sym} ptr x mem) // Merge ADDQconst and LEAQ into atomic loads. (MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => @@ -2108,12 +2122,12 @@ (MOVWQZX (MOVBQZX x)) => (MOVBQZX x) (MOVBQZX (MOVBQZX x)) => (MOVBQZX x) -(MOVQstore [off] {sym} ptr a:((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) - && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) => - ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) -(MOVLstore [off] {sym} ptr a:((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) - && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) => - ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) +(MOVQstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Qconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) + && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) => + ((ADD|AND|OR|XOR)Qconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) +(MOVLstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) + && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) => + ((ADD|AND|OR|XOR)Lconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) // float <-> int register moves, with no conversion. // These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}. @@ -2173,23 +2187,27 @@ (CMP(Q|L)const l:(MOV(Q|L)load {sym} [off] ptr mem) [c]) && l.Uses == 1 && clobber(l) => -@l.Block (CMP(Q|L)constload {sym} [makeValAndOff32(c,off)] ptr mem) +@l.Block (CMP(Q|L)constload {sym} [makeValAndOff(c,off)] ptr mem) (CMP(W|B)const l:(MOV(W|B)load {sym} [off] ptr mem) [c]) && l.Uses == 1 && clobber(l) => -@l.Block (CMP(W|B)constload {sym} [makeValAndOff32(int32(c),off)] ptr mem) +@l.Block (CMP(W|B)constload {sym} [makeValAndOff(int32(c),off)] ptr mem) -(CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validValAndOff(c,int64(off)) => (CMPQconstload {sym} [makeValAndOff64(c,int64(off))] ptr mem) -(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(c),int64(off)) => (CMPLconstload {sym} [makeValAndOff32(c,off)] ptr mem) -(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int16(c)),int64(off)) => (CMPWconstload {sym} [makeValAndOff32(int32(int16(c)),off)] ptr mem) -(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int8(c)),int64(off)) => (CMPBconstload {sym} [makeValAndOff32(int32(int8(c)),off)] ptr mem) +(CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validVal(c) => (CMPQconstload {sym} [makeValAndOff(int32(c),off)] ptr mem) +(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) +(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem) +(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) (TEST(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) l2) && l == l2 && l.Uses == 2 - && validValAndOff(0, int64(off)) && clobber(l) => - @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff64(0, int64(off))] ptr mem) + @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff(0, off)] ptr mem) + +// Convert ANDload to MOVload when we can do the AND in a containing TEST op. +// Only do when it's within the same block, so we don't have flags live across basic block boundaries. +// See issue 44228. +(TEST(Q|L) a:(AND(Q|L)load [off] {sym} x ptr mem) a) && a.Uses == 2 && a.Block == v.Block && clobber(a) => (TEST(Q|L) (MOV(Q|L)load [off] {sym} ptr mem) x) (MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))]) (MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go index 043162e544e..67b3293903c 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build ignore // +build ignore package main @@ -121,6 +122,7 @@ func init() { gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly} gp21sb = regInfo{inputs: []regMask{gpspsbg, gpsp}, outputs: gponly} gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}} + gp31shift = regInfo{inputs: []regMask{gp, gp, cx}, outputs: []regMask{gp}} gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax, dx}} gp21hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax} gp21flags = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}} @@ -360,20 +362,6 @@ func init() { {name: "BTSLconst", argLength: 1, reg: gp11, asm: "BTSL", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // set bit auxint in arg0, 0 <= auxint < 32 {name: "BTSQconst", argLength: 1, reg: gp11, asm: "BTSQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // set bit auxint in arg0, 0 <= auxint < 64 - // direct bit operation on memory operand - {name: "BTCQmodify", argLength: 3, reg: gpstore, asm: "BTCQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // complement bit arg1 in 64-bit arg0+auxint+aux, arg2=mem - {name: "BTCLmodify", argLength: 3, reg: gpstore, asm: "BTCL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // complement bit arg1 in 32-bit arg0+auxint+aux, arg2=mem - {name: "BTSQmodify", argLength: 3, reg: gpstore, asm: "BTSQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // set bit arg1 in 64-bit arg0+auxint+aux, arg2=mem - {name: "BTSLmodify", argLength: 3, reg: gpstore, asm: "BTSL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // set bit arg1 in 32-bit arg0+auxint+aux, arg2=mem - {name: "BTRQmodify", argLength: 3, reg: gpstore, asm: "BTRQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // reset bit arg1 in 64-bit arg0+auxint+aux, arg2=mem - {name: "BTRLmodify", argLength: 3, reg: gpstore, asm: "BTRL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // reset bit arg1 in 32-bit arg0+auxint+aux, arg2=mem - {name: "BTCQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTCQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // complement bit ValAndOff(AuxInt).Val() in 64-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem - {name: "BTCLconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTCL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // complement bit ValAndOff(AuxInt).Val() in 32-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem - {name: "BTSQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTSQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // set bit ValAndOff(AuxInt).Val() in 64-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem - {name: "BTSLconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTSL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // set bit ValAndOff(AuxInt).Val() in 32-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem - {name: "BTRQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTRQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // reset bit ValAndOff(AuxInt).Val() in 64-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem - {name: "BTRLconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTRL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // reset bit ValAndOff(AuxInt).Val() in 32-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem - {name: "TESTQ", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTQ", typ: "Flags"}, // (arg0 & arg1) compare to 0 {name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0 {name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0 @@ -407,6 +395,9 @@ func init() { {name: "SARWconst", argLength: 1, reg: gp11, asm: "SARW", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed int16(arg0) >> auxint, shift amount 0-15 {name: "SARBconst", argLength: 1, reg: gp11, asm: "SARB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed int8(arg0) >> auxint, shift amount 0-7 + {name: "SHRDQ", argLength: 3, reg: gp31shift, asm: "SHRQ", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg2, shifting in bits from arg1 (==(arg1<<64+arg0)>>arg2, keeping low 64 bits), shift amount is mod 64 + {name: "SHLDQ", argLength: 3, reg: gp31shift, asm: "SHLQ", resultInArg0: true, clobberFlags: true}, // unsigned arg0 << arg2, shifting in bits from arg1 (==(arg0<<64+arg1)< (CMP(Q|L|W|B) (MOV(Q|L|W|B)load {sym} [off] ptr mem) x) -(CMP(Q|L|W|B)constload {sym} [vo] ptr mem) && vo.Val() == 0 => (TEST(Q|L|W|B) x:(MOV(Q|L|W|B)load {sym} [vo.Off32()] ptr mem) x) +(CMP(Q|L|W|B)constload {sym} [vo] ptr mem) && vo.Val() == 0 => (TEST(Q|L|W|B) x:(MOV(Q|L|W|B)load {sym} [vo.Off()] ptr mem) x) -(CMPQconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPQconst (MOVQload {sym} [vo.Off32()] ptr mem) [vo.Val32()]) -(CMPLconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPLconst (MOVLload {sym} [vo.Off32()] ptr mem) [vo.Val32()]) -(CMPWconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPWconst (MOVWload {sym} [vo.Off32()] ptr mem) [vo.Val16()]) -(CMPBconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPBconst (MOVBload {sym} [vo.Off32()] ptr mem) [vo.Val8()]) +(CMPQconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPQconst (MOVQload {sym} [vo.Off()] ptr mem) [vo.Val()]) +(CMPLconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPLconst (MOVLload {sym} [vo.Off()] ptr mem) [vo.Val()]) +(CMPWconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPWconst (MOVWload {sym} [vo.Off()] ptr mem) [vo.Val16()]) +(CMPBconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPBconst (MOVBload {sym} [vo.Off()] ptr mem) [vo.Val8()]) (CMP(Q|L|W|B)loadidx1 {sym} [off] ptr idx x mem) => (CMP(Q|L|W|B) (MOV(Q|L|W|B)loadidx1 {sym} [off] ptr idx mem) x) (CMPQloadidx8 {sym} [off] ptr idx x mem) => (CMPQ (MOVQloadidx8 {sym} [off] ptr idx mem) x) (CMPLloadidx4 {sym} [off] ptr idx x mem) => (CMPL (MOVLloadidx4 {sym} [off] ptr idx mem) x) (CMPWloadidx2 {sym} [off] ptr idx x mem) => (CMPW (MOVWloadidx2 {sym} [off] ptr idx mem) x) -(CMP(Q|L|W|B)constloadidx1 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TEST(Q|L|W|B) x:(MOV(Q|L|W|B)loadidx1 {sym} [vo.Off32()] ptr idx mem) x) -(CMPQconstloadidx8 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTQ x:(MOVQloadidx8 {sym} [vo.Off32()] ptr idx mem) x) -(CMPLconstloadidx4 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTL x:(MOVLloadidx4 {sym} [vo.Off32()] ptr idx mem) x) -(CMPWconstloadidx2 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTW x:(MOVWloadidx2 {sym} [vo.Off32()] ptr idx mem) x) +(CMP(Q|L|W|B)constloadidx1 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TEST(Q|L|W|B) x:(MOV(Q|L|W|B)loadidx1 {sym} [vo.Off()] ptr idx mem) x) +(CMPQconstloadidx8 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTQ x:(MOVQloadidx8 {sym} [vo.Off()] ptr idx mem) x) +(CMPLconstloadidx4 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTL x:(MOVLloadidx4 {sym} [vo.Off()] ptr idx mem) x) +(CMPWconstloadidx2 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTW x:(MOVWloadidx2 {sym} [vo.Off()] ptr idx mem) x) -(CMPQconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPQconst (MOVQloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()]) -(CMPLconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPLconst (MOVLloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()]) -(CMPWconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPWconst (MOVWloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val16()]) -(CMPBconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPBconst (MOVBloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val8()]) +(CMPQconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPQconst (MOVQloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val()]) +(CMPLconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPLconst (MOVLloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val()]) +(CMPWconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPWconst (MOVWloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val16()]) +(CMPBconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPBconst (MOVBloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val8()]) -(CMPQconstloadidx8 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPQconst (MOVQloadidx8 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()]) -(CMPLconstloadidx4 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPLconst (MOVLloadidx4 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()]) -(CMPWconstloadidx2 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPWconst (MOVWloadidx2 {sym} [vo.Off32()] ptr idx mem) [vo.Val16()]) +(CMPQconstloadidx8 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPQconst (MOVQloadidx8 {sym} [vo.Off()] ptr idx mem) [vo.Val()]) +(CMPLconstloadidx4 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPLconst (MOVLloadidx4 {sym} [vo.Off()] ptr idx mem) [vo.Val()]) +(CMPWconstloadidx2 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPWconst (MOVWloadidx2 {sym} [vo.Off()] ptr idx mem) [vo.Val16()]) diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules index de0df363e49..bcacbafe3a5 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM.rules @@ -56,6 +56,7 @@ (Com(32|16|8) ...) => (MVN ...) (Sqrt ...) => (SQRTD ...) +(Sqrt32 ...) => (SQRTF ...) (Abs ...) => (ABSD ...) // TODO: optimize this for ARMv5 and ARMv6 @@ -65,17 +66,17 @@ // count trailing zero for ARMv5 and ARMv6 // 32 - CLZ(x&-x - 1) -(Ctz32 x) && objabi.GOARM<=6 => +(Ctz32 x) && buildcfg.GOARM<=6 => (RSBconst [32] (CLZ (SUBconst (AND x (RSBconst [0] x)) [1]))) -(Ctz16 x) && objabi.GOARM<=6 => +(Ctz16 x) && buildcfg.GOARM<=6 => (RSBconst [32] (CLZ (SUBconst (AND (ORconst [0x10000] x) (RSBconst [0] (ORconst [0x10000] x))) [1]))) -(Ctz8 x) && objabi.GOARM<=6 => +(Ctz8 x) && buildcfg.GOARM<=6 => (RSBconst [32] (CLZ (SUBconst (AND (ORconst [0x100] x) (RSBconst [0] (ORconst [0x100] x))) [1]))) // count trailing zero for ARMv7 -(Ctz32 x) && objabi.GOARM==7 => (CLZ (RBIT x)) -(Ctz16 x) && objabi.GOARM==7 => (CLZ (RBIT (ORconst [0x10000] x))) -(Ctz8 x) && objabi.GOARM==7 => (CLZ (RBIT (ORconst [0x100] x))) +(Ctz32 x) && buildcfg.GOARM==7 => (CLZ (RBIT x)) +(Ctz16 x) && buildcfg.GOARM==7 => (CLZ (RBIT (ORconst [0x10000] x))) +(Ctz8 x) && buildcfg.GOARM==7 => (CLZ (RBIT (ORconst [0x100] x))) // bit length (BitLen32 x) => (RSBconst [32] (CLZ x)) @@ -89,13 +90,13 @@ // t5 = x right rotate 8 bits -- (d, a, b, c ) // result = t4 ^ t5 -- (d, c, b, a ) // using shifted ops this can be done in 4 instructions. -(Bswap32 x) && objabi.GOARM==5 => +(Bswap32 x) && buildcfg.GOARM==5 => (XOR (SRLconst (BICconst (XOR x (SRRconst [16] x)) [0xff0000]) [8]) (SRRconst x [8])) // byte swap for ARMv6 and above -(Bswap32 x) && objabi.GOARM>=6 => (REV x) +(Bswap32 x) && buildcfg.GOARM>=6 => (REV x) // boolean ops -- booleans are represented with 0=false, 1=true (AndB ...) => (AND ...) @@ -172,7 +173,7 @@ (Const(8|16|32) [val]) => (MOVWconst [int32(val)]) (Const(32|64)F [val]) => (MOV(F|D)const [float64(val)]) (ConstNil) => (MOVWconst [0]) -(ConstBool [b]) => (MOVWconst [b2i32(b)]) +(ConstBool [t]) => (MOVWconst [b2i32(t)]) // truncations // Because we ignore high parts of registers, truncates are just copies. @@ -546,6 +547,10 @@ // MOVWnop doesn't emit instruction, only for ensuring the type. (MOVWreg x) && x.Uses == 1 => (MOVWnop x) +// TODO: we should be able to get rid of MOVWnop all together. +// But for now, this is enough to get rid of lots of them. +(MOVWnop (MOVWconst [c])) => (MOVWconst [c]) + // mul by constant (MUL x (MOVWconst [c])) && int32(c) == -1 => (RSBconst [0] x) (MUL _ (MOVWconst [0])) => (MOVWconst [0]) @@ -732,10 +737,10 @@ (SUBconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) => (ADDconst [-c] x) (ANDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (BICconst [int32(^uint32(c))] x) (BICconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (ANDconst [int32(^uint32(c))] x) -(ADDconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (SUBconst [-c] x) -(SUBconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (ADDconst [-c] x) -(ANDconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (BICconst [int32(^uint32(c))] x) -(BICconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (ANDconst [int32(^uint32(c))] x) +(ADDconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (SUBconst [-c] x) +(SUBconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (ADDconst [-c] x) +(ANDconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (BICconst [int32(^uint32(c))] x) +(BICconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (ANDconst [int32(^uint32(c))] x) (ADDconst [c] (MOVWconst [d])) => (MOVWconst [c+d]) (ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x) (ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x) @@ -1139,7 +1144,7 @@ // UBFX instruction is supported by ARMv6T2, ARMv7 and above versions, REV16 is supported by // ARMv6 and above versions. So for ARMv6, we need to match SLLconst, SRLconst and ORshiftLL. ((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (BFXU [int32(armBFAuxInt(8, 8))] x) x) => (REV16 x) -((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [24] (SLLconst [16] x)) x) && objabi.GOARM>=6 => (REV16 x) +((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [24] (SLLconst [16] x)) x) && buildcfg.GOARM>=6 => (REV16 x) // use indexed loads and stores (MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVWloadidx ptr idx mem) @@ -1209,25 +1214,25 @@ (BIC x x) => (MOVWconst [0]) (ADD (MUL x y) a) => (MULA x y a) -(SUB a (MUL x y)) && objabi.GOARM == 7 => (MULS x y a) -(RSB (MUL x y) a) && objabi.GOARM == 7 => (MULS x y a) +(SUB a (MUL x y)) && buildcfg.GOARM == 7 => (MULS x y a) +(RSB (MUL x y) a) && buildcfg.GOARM == 7 => (MULS x y a) -(NEGF (MULF x y)) && objabi.GOARM >= 6 => (NMULF x y) -(NEGD (MULD x y)) && objabi.GOARM >= 6 => (NMULD x y) -(MULF (NEGF x) y) && objabi.GOARM >= 6 => (NMULF x y) -(MULD (NEGD x) y) && objabi.GOARM >= 6 => (NMULD x y) +(NEGF (MULF x y)) && buildcfg.GOARM >= 6 => (NMULF x y) +(NEGD (MULD x y)) && buildcfg.GOARM >= 6 => (NMULD x y) +(MULF (NEGF x) y) && buildcfg.GOARM >= 6 => (NMULF x y) +(MULD (NEGD x) y) && buildcfg.GOARM >= 6 => (NMULD x y) (NMULF (NEGF x) y) => (MULF x y) (NMULD (NEGD x) y) => (MULD x y) // the result will overwrite the addend, since they are in the same register -(ADDF a (MULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULAF a x y) -(ADDF a (NMULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULSF a x y) -(ADDD a (MULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULAD a x y) -(ADDD a (NMULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULSD a x y) -(SUBF a (MULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULSF a x y) -(SUBF a (NMULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULAF a x y) -(SUBD a (MULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULSD a x y) -(SUBD a (NMULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULAD a x y) +(ADDF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAF a x y) +(ADDF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSF a x y) +(ADDD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAD a x y) +(ADDD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSD a x y) +(SUBF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSF a x y) +(SUBF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAF a x y) +(SUBD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSD a x y) +(SUBD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAD a x y) (AND x (MVN y)) => (BIC x y) @@ -1259,8 +1264,8 @@ (CMPD x (MOVDconst [0])) => (CMPD0 x) // bit extraction -(SRAconst (SLLconst x [c]) [d]) && objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFX [(d-c)|(32-d)<<8] x) -(SRLconst (SLLconst x [c]) [d]) && objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x) +(SRAconst (SLLconst x [c]) [d]) && buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFX [(d-c)|(32-d)<<8] x) +(SRLconst (SLLconst x [c]) [d]) && buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x) // comparison simplification ((LT|LE|EQ|NE|GE|GT) (CMP x (RSBconst [0] y))) => ((LT|LE|EQ|NE|GE|GT) (CMN x y)) // sense of carry bit not preserved diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules index a0e2a0d5e27..3d2759493e5 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM64.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules @@ -60,6 +60,8 @@ (Trunc ...) => (FRINTZD ...) (FMA x y z) => (FMADDD z x y) +(Sqrt32 ...) => (FSQRTS ...) + // lowering rotates (RotateLeft8 x (MOVDconst [c])) => (Or8 (Lsh8x64 x (MOVDconst [c&7])) (Rsh8Ux64 x (MOVDconst [-c&7]))) (RotateLeft16 x (MOVDconst [c])) => (Or16 (Lsh16x64 x (MOVDconst [c&15])) (Rsh16Ux64 x (MOVDconst [-c&15]))) @@ -196,7 +198,7 @@ (Const(64|32|16|8) [val]) => (MOVDconst [int64(val)]) (Const(32F|64F) [val]) => (FMOV(S|D)const [float64(val)]) (ConstNil) => (MOVDconst [0]) -(ConstBool [b]) => (MOVDconst [b2i(b)]) +(ConstBool [t]) => (MOVDconst [b2i(t)]) (Slicemask x) => (SRAconst (NEG x) [63]) @@ -792,6 +794,15 @@ (MOVHUloadidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHUload [int32(c)<<1] ptr mem) (MOVHloadidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHload [int32(c)<<1] ptr mem) +(FMOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (FMOVDloadidx8 ptr idx mem) +(FMOVSload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (FMOVSloadidx4 ptr idx mem) +(FMOVDloadidx ptr (SLLconst [3] idx) mem) => (FMOVDloadidx8 ptr idx mem) +(FMOVSloadidx ptr (SLLconst [2] idx) mem) => (FMOVSloadidx4 ptr idx mem) +(FMOVDloadidx (SLLconst [3] idx) ptr mem) => (FMOVDloadidx8 ptr idx mem) +(FMOVSloadidx (SLLconst [2] idx) ptr mem) => (FMOVSloadidx4 ptr idx mem) +(FMOVDloadidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (FMOVDload ptr [int32(c)<<3] mem) +(FMOVSloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (FMOVSload ptr [int32(c)<<2] mem) + (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBstore [off1+int32(off2)] {sym} ptr val mem) @@ -865,6 +876,15 @@ (MOVWstoreidx4 ptr (MOVDconst [c]) val mem) && is32Bit(c<<2) => (MOVWstore [int32(c)<<2] ptr val mem) (MOVHstoreidx2 ptr (MOVDconst [c]) val mem) && is32Bit(c<<1) => (MOVHstore [int32(c)<<1] ptr val mem) +(FMOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) && off == 0 && sym == nil => (FMOVDstoreidx8 ptr idx val mem) +(FMOVSstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) && off == 0 && sym == nil => (FMOVSstoreidx4 ptr idx val mem) +(FMOVDstoreidx ptr (SLLconst [3] idx) val mem) => (FMOVDstoreidx8 ptr idx val mem) +(FMOVSstoreidx ptr (SLLconst [2] idx) val mem) => (FMOVSstoreidx4 ptr idx val mem) +(FMOVDstoreidx (SLLconst [3] idx) ptr val mem) => (FMOVDstoreidx8 ptr idx val mem) +(FMOVSstoreidx (SLLconst [2] idx) ptr val mem) => (FMOVSstoreidx4 ptr idx val mem) +(FMOVDstoreidx8 ptr (MOVDconst [c]) val mem) && is32Bit(c<<3) => (FMOVDstore [int32(c)<<3] ptr val mem) +(FMOVSstoreidx4 ptr (MOVDconst [c]) val mem) && is32Bit(c<<2) => (FMOVSstore [int32(c)<<2] ptr val mem) + (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => @@ -1127,6 +1147,10 @@ // MOVDnop doesn't emit instruction, only for ensuring the type. (MOVDreg x) && x.Uses == 1 => (MOVDnop x) +// TODO: we should be able to get rid of MOVDnop all together. +// But for now, this is enough to get rid of lots of them. +(MOVDnop (MOVDconst [c])) => (MOVDconst [c]) + // fold constant into arithmatic ops (ADD x (MOVDconst [c])) => (ADDconst [c] x) (SUB x (MOVDconst [c])) => (SUBconst [c] x) @@ -1335,8 +1359,18 @@ (XOR x (MVN y)) => (EON x y) (OR x (MVN y)) => (ORN x y) (MVN (XOR x y)) => (EON x y) + +(CSEL [cc] (MOVDconst [-1]) (MOVDconst [0]) flag) => (CSETM [cc] flag) +(CSEL [cc] (MOVDconst [0]) (MOVDconst [-1]) flag) => (CSETM [arm64Negate(cc)] flag) (CSEL [cc] x (MOVDconst [0]) flag) => (CSEL0 [cc] x flag) (CSEL [cc] (MOVDconst [0]) y flag) => (CSEL0 [arm64Negate(cc)] y flag) +(CSEL [cc] x (ADDconst [1] a) flag) => (CSINC [cc] x a flag) +(CSEL [cc] (ADDconst [1] a) x flag) => (CSINC [arm64Negate(cc)] x a flag) +(CSEL [cc] x (MVN a) flag) => (CSINV [cc] x a flag) +(CSEL [cc] (MVN a) x flag) => (CSINV [arm64Negate(cc)] x a flag) +(CSEL [cc] x (NEG a) flag) => (CSNEG [cc] x a flag) +(CSEL [cc] (NEG a) x flag) => (CSNEG [arm64Negate(cc)] x a flag) + (SUB x (SUB y z)) => (SUB (ADD x z) y) (SUB (SUB x y) z) => (SUB x (ADD y z)) @@ -1491,9 +1525,13 @@ (LEnoov (InvertFlags cmp) yes no) => (GEnoov cmp yes no) (GTnoov (InvertFlags cmp) yes no) => (LTnoov cmp yes no) -// absorb InvertFlags into CSEL(0) +// absorb InvertFlags into conditional instructions (CSEL [cc] x y (InvertFlags cmp)) => (CSEL [arm64Invert(cc)] x y cmp) (CSEL0 [cc] x (InvertFlags cmp)) => (CSEL0 [arm64Invert(cc)] x cmp) +(CSETM [cc] (InvertFlags cmp)) => (CSETM [arm64Invert(cc)] cmp) +(CSINC [cc] x y (InvertFlags cmp)) => (CSINC [arm64Invert(cc)] x y cmp) +(CSINV [cc] x y (InvertFlags cmp)) => (CSINV [arm64Invert(cc)] x y cmp) +(CSNEG [cc] x y (InvertFlags cmp)) => (CSNEG [arm64Invert(cc)] x y cmp) // absorb flag constants into boolean values (Equal (FlagConstant [fc])) => (MOVDconst [b2i(fc.eq())]) @@ -1532,6 +1570,14 @@ (CSEL [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => y (CSEL0 [cc] x flag) && ccARM64Eval(cc, flag) > 0 => x (CSEL0 [cc] _ flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0]) +(CSNEG [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x +(CSNEG [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (NEG y) +(CSINV [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x +(CSINV [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (Not y) +(CSINC [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x +(CSINC [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (ADDconst [1] y) +(CSETM [cc] flag) && ccARM64Eval(cc, flag) > 0 => (MOVDconst [-1]) +(CSETM [cc] flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0]) // absorb flags back into boolean CSEL (CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil => @@ -1724,9 +1770,25 @@ (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) && cc == OpARM64LessThanU => (RORW x y) +// rev16w | rev16 // ((x>>8) | (x<<8)) => (REV16W x), the type of x is uint16, "|" can also be "^" or "+". ((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (UBFX [armBFAuxInt(8, 8)] x) x) => (REV16W x) +// ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), "|" can also be "^" or "+". +((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x)) + && uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff + => (REV16W x) + +// ((x & 0xff00ff00ff00ff00)>>8) | ((x & 0x00ff00ff00ff00ff)<<8), "|" can also be "^" or "+". +((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) + && (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) + => (REV16 x) + +// ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), "|" can also be "^" or "+". +((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) + && (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) + => (REV16 (ANDconst [0xffffffff] x)) + // Extract from reg pair (ADDshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x) ( ORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x) @@ -1747,6 +1809,16 @@ // Special case setting bit as 1. An example is math.Copysign(c,-1) (ORconst [c1] (ANDconst [c2] x)) && c2|c1 == ^0 => (ORconst [c1] x) +// If the shift amount is larger than the datasize(32, 16, 8), we can optimize to constant 0. +(MOVWUreg (SLLconst [lc] x)) && lc >= 32 => (MOVDconst [0]) +(MOVHUreg (SLLconst [lc] x)) && lc >= 16 => (MOVDconst [0]) +(MOVBUreg (SLLconst [lc] x)) && lc >= 8 => (MOVDconst [0]) + +// After zero extension, the upper (64-datasize(32|16|8)) bits are zero, we can optimiza to constant 0. +(SRLconst [rc] (MOVWUreg x)) && rc >= 32 => (MOVDconst [0]) +(SRLconst [rc] (MOVHUreg x)) && rc >= 16 => (MOVDconst [0]) +(SRLconst [rc] (MOVBUreg x)) && rc >= 8 => (MOVDconst [0]) + // bitfield ops // sbfiz diff --git a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go index b0bc9c78ff6..18a5666b40f 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build ignore // +build ignore package main @@ -236,8 +237,10 @@ func init() { {name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS"}, // -arg0, float32 {name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD"}, // -arg0, float64 {name: "FSQRTD", argLength: 1, reg: fp11, asm: "FSQRTD"}, // sqrt(arg0), float64 + {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0), float32 {name: "REV", argLength: 1, reg: gp11, asm: "REV"}, // byte reverse, 64-bit {name: "REVW", argLength: 1, reg: gp11, asm: "REVW"}, // byte reverse, 32-bit + {name: "REV16", argLength: 1, reg: gp11, asm: "REV16"}, // byte reverse in each 16-bit halfword, 64-bit {name: "REV16W", argLength: 1, reg: gp11, asm: "REV16W"}, // byte reverse in each 16-bit halfword, 32-bit {name: "RBIT", argLength: 1, reg: gp11, asm: "RBIT"}, // bit reverse, 64-bit {name: "RBITW", argLength: 1, reg: gp11, asm: "RBITW"}, // bit reverse, 32-bit @@ -264,17 +267,17 @@ func init() { // shifts {name: "SLL", argLength: 2, reg: gp21, asm: "LSL"}, // arg0 << arg1, shift amount is mod 64 - {name: "SLLconst", argLength: 1, reg: gp11, asm: "LSL", aux: "Int64"}, // arg0 << auxInt + {name: "SLLconst", argLength: 1, reg: gp11, asm: "LSL", aux: "Int64"}, // arg0 << auxInt, auxInt should be in the range 0 to 63. {name: "SRL", argLength: 2, reg: gp21, asm: "LSR"}, // arg0 >> arg1, unsigned, shift amount is mod 64 - {name: "SRLconst", argLength: 1, reg: gp11, asm: "LSR", aux: "Int64"}, // arg0 >> auxInt, unsigned + {name: "SRLconst", argLength: 1, reg: gp11, asm: "LSR", aux: "Int64"}, // arg0 >> auxInt, unsigned, auxInt should be in the range 0 to 63. {name: "SRA", argLength: 2, reg: gp21, asm: "ASR"}, // arg0 >> arg1, signed, shift amount is mod 64 - {name: "SRAconst", argLength: 1, reg: gp11, asm: "ASR", aux: "Int64"}, // arg0 >> auxInt, signed + {name: "SRAconst", argLength: 1, reg: gp11, asm: "ASR", aux: "Int64"}, // arg0 >> auxInt, signed, auxInt should be in the range 0 to 63. {name: "ROR", argLength: 2, reg: gp21, asm: "ROR"}, // arg0 right rotate by (arg1 mod 64) bits {name: "RORW", argLength: 2, reg: gp21, asm: "RORW"}, // arg0 right rotate by (arg1 mod 32) bits - {name: "RORconst", argLength: 1, reg: gp11, asm: "ROR", aux: "Int64"}, // arg0 right rotate by auxInt bits - {name: "RORWconst", argLength: 1, reg: gp11, asm: "RORW", aux: "Int64"}, // uint32(arg0) right rotate by auxInt bits - {name: "EXTRconst", argLength: 2, reg: gp21, asm: "EXTR", aux: "Int64"}, // extract 64 bits from arg0:arg1 starting at lsb auxInt - {name: "EXTRWconst", argLength: 2, reg: gp21, asm: "EXTRW", aux: "Int64"}, // extract 32 bits from arg0[31:0]:arg1[31:0] starting at lsb auxInt and zero top 32 bits + {name: "RORconst", argLength: 1, reg: gp11, asm: "ROR", aux: "Int64"}, // arg0 right rotate by auxInt bits, auxInt should be in the range 0 to 63. + {name: "RORWconst", argLength: 1, reg: gp11, asm: "RORW", aux: "Int64"}, // uint32(arg0) right rotate by auxInt bits, auxInt should be in the range 0 to 31. + {name: "EXTRconst", argLength: 2, reg: gp21, asm: "EXTR", aux: "Int64"}, // extract 64 bits from arg0:arg1 starting at lsb auxInt, auxInt should be in the range 0 to 63. + {name: "EXTRWconst", argLength: 2, reg: gp21, asm: "EXTRW", aux: "Int64"}, // extract 32 bits from arg0[31:0]:arg1[31:0] starting at lsb auxInt and zero top 32 bits, auxInt should be in the range 0 to 31. // comparisons {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1 @@ -295,45 +298,45 @@ func init() { {name: "FCMPD0", argLength: 1, reg: fp1flags, asm: "FCMPD", typ: "Flags"}, // arg0 compare to 0, float64 // shifted ops - {name: "MVNshiftLL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0<>auxInt), unsigned shift - {name: "MVNshiftRA", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0>>auxInt), signed shift - {name: "NEGshiftLL", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0<>auxInt), unsigned shift - {name: "NEGshiftRA", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0>>auxInt), signed shift - {name: "ADDshiftLL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1<>auxInt, unsigned shift - {name: "ADDshiftRA", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1>>auxInt, signed shift - {name: "SUBshiftLL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int64"}, // arg0 - arg1<>auxInt, unsigned shift - {name: "SUBshiftRA", argLength: 2, reg: gp21, asm: "SUB", aux: "Int64"}, // arg0 - arg1>>auxInt, signed shift - {name: "ANDshiftLL", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1<>auxInt), unsigned shift - {name: "ANDshiftRA", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1>>auxInt), signed shift - {name: "ORshiftLL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1<>auxInt, unsigned shift - {name: "ORshiftRA", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1>>auxInt, signed shift - {name: "XORshiftLL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1<>auxInt, unsigned shift - {name: "XORshiftRA", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1>>auxInt, signed shift - {name: "BICshiftLL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1<>auxInt), unsigned shift - {name: "BICshiftRA", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1>>auxInt), signed shift - {name: "EONshiftLL", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1<>auxInt), unsigned shift - {name: "EONshiftRA", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1>>auxInt), signed shift - {name: "ORNshiftLL", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1<>auxInt), unsigned shift - {name: "ORNshiftRA", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1>>auxInt), signed shift - {name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1<>auxInt, unsigned shift - {name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift - {name: "CMNshiftLL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1<>auxInt) compare to 0, unsigned shift - {name: "CMNshiftRA", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1>>auxInt) compare to 0, signed shift - {name: "TSTshiftLL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1<>auxInt) compare to 0, unsigned shift - {name: "TSTshiftRA", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1>>auxInt) compare to 0, signed shift + {name: "MVNshiftLL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0<>auxInt), unsigned shift, auxInt should be in the range 0 to 63. + {name: "MVNshiftRA", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0>>auxInt), signed shift, auxInt should be in the range 0 to 63. + {name: "NEGshiftLL", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0<>auxInt), unsigned shift, auxInt should be in the range 0 to 63. + {name: "NEGshiftRA", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0>>auxInt), signed shift, auxInt should be in the range 0 to 63. + {name: "ADDshiftLL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1<>auxInt, unsigned shift, auxInt should be in the range 0 to 63. + {name: "ADDshiftRA", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63. + {name: "SUBshiftLL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int64"}, // arg0 - arg1<>auxInt, unsigned shift, auxInt should be in the range 0 to 63. + {name: "SUBshiftRA", argLength: 2, reg: gp21, asm: "SUB", aux: "Int64"}, // arg0 - arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63. + {name: "ANDshiftLL", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1<>auxInt), unsigned shift, auxInt should be in the range 0 to 63. + {name: "ANDshiftRA", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63. + {name: "ORshiftLL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1<>auxInt, unsigned shift, auxInt should be in the range 0 to 63. + {name: "ORshiftRA", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63. + {name: "XORshiftLL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1<>auxInt, unsigned shift, auxInt should be in the range 0 to 63. + {name: "XORshiftRA", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63. + {name: "BICshiftLL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1<>auxInt), unsigned shift, auxInt should be in the range 0 to 63. + {name: "BICshiftRA", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63. + {name: "EONshiftLL", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1<>auxInt), unsigned shift, auxInt should be in the range 0 to 63. + {name: "EONshiftRA", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63. + {name: "ORNshiftLL", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1<>auxInt), unsigned shift, auxInt should be in the range 0 to 63. + {name: "ORNshiftRA", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63. + {name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1<>auxInt, unsigned shift, auxInt should be in the range 0 to 63. + {name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63. + {name: "CMNshiftLL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1<>auxInt) compare to 0, unsigned shift, auxInt should be in the range 0 to 63. + {name: "CMNshiftRA", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1>>auxInt) compare to 0, signed shift, auxInt should be in the range 0 to 63. + {name: "TSTshiftLL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1<>auxInt) compare to 0, unsigned shift, auxInt should be in the range 0 to 63. + {name: "TSTshiftRA", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1>>auxInt) compare to 0, signed shift, auxInt should be in the range 0 to 63. // bitfield ops // for all bitfield ops lsb is auxInt>>8, width is auxInt&0xff @@ -379,11 +382,13 @@ func init() { {name: "FMOVDloadidx", argLength: 3, reg: fp2load, asm: "FMOVD", typ: "Float64"}, // load 64-bit float from arg0 + arg1, arg2=mem. // shifted register indexed load - {name: "MOVHloadidx2", argLength: 3, reg: gp2load, asm: "MOVH", typ: "Int16"}, // load 16-bit half-word from arg0 + arg1*2, sign-extended to 64-bit, arg2=mem. - {name: "MOVHUloadidx2", argLength: 3, reg: gp2load, asm: "MOVHU", typ: "UInt16"}, // load 16-bit half-word from arg0 + arg1*2, zero-extended to 64-bit, arg2=mem. - {name: "MOVWloadidx4", argLength: 3, reg: gp2load, asm: "MOVW", typ: "Int32"}, // load 32-bit word from arg0 + arg1*4, sign-extended to 64-bit, arg2=mem. - {name: "MOVWUloadidx4", argLength: 3, reg: gp2load, asm: "MOVWU", typ: "UInt32"}, // load 32-bit word from arg0 + arg1*4, zero-extended to 64-bit, arg2=mem. - {name: "MOVDloadidx8", argLength: 3, reg: gp2load, asm: "MOVD", typ: "UInt64"}, // load 64-bit double-word from arg0 + arg1*8, arg2 = mem. + {name: "MOVHloadidx2", argLength: 3, reg: gp2load, asm: "MOVH", typ: "Int16"}, // load 16-bit half-word from arg0 + arg1*2, sign-extended to 64-bit, arg2=mem. + {name: "MOVHUloadidx2", argLength: 3, reg: gp2load, asm: "MOVHU", typ: "UInt16"}, // load 16-bit half-word from arg0 + arg1*2, zero-extended to 64-bit, arg2=mem. + {name: "MOVWloadidx4", argLength: 3, reg: gp2load, asm: "MOVW", typ: "Int32"}, // load 32-bit word from arg0 + arg1*4, sign-extended to 64-bit, arg2=mem. + {name: "MOVWUloadidx4", argLength: 3, reg: gp2load, asm: "MOVWU", typ: "UInt32"}, // load 32-bit word from arg0 + arg1*4, zero-extended to 64-bit, arg2=mem. + {name: "MOVDloadidx8", argLength: 3, reg: gp2load, asm: "MOVD", typ: "UInt64"}, // load 64-bit double-word from arg0 + arg1*8, arg2 = mem. + {name: "FMOVSloadidx4", argLength: 3, reg: fp2load, asm: "FMOVS", typ: "Float32"}, // load 32-bit float from arg0 + arg1*4, arg2 = mem. + {name: "FMOVDloadidx8", argLength: 3, reg: fp2load, asm: "FMOVD", typ: "Float64"}, // load 64-bit float from arg0 + arg1*8, arg2 = mem. {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem. {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. @@ -402,9 +407,11 @@ func init() { {name: "FMOVDstoreidx", argLength: 4, reg: fpstore2, asm: "FMOVD", typ: "Mem"}, // store 64-bit float of arg2 to arg0 + arg1, arg3=mem. // shifted register indexed store - {name: "MOVHstoreidx2", argLength: 4, reg: gpstore2, asm: "MOVH", typ: "Mem"}, // store 2 bytes of arg2 to arg0 + arg1*2, arg3 = mem. - {name: "MOVWstoreidx4", argLength: 4, reg: gpstore2, asm: "MOVW", typ: "Mem"}, // store 4 bytes of arg2 to arg0 + arg1*4, arg3 = mem. - {name: "MOVDstoreidx8", argLength: 4, reg: gpstore2, asm: "MOVD", typ: "Mem"}, // store 8 bytes of arg2 to arg0 + arg1*8, arg3 = mem. + {name: "MOVHstoreidx2", argLength: 4, reg: gpstore2, asm: "MOVH", typ: "Mem"}, // store 2 bytes of arg2 to arg0 + arg1*2, arg3 = mem. + {name: "MOVWstoreidx4", argLength: 4, reg: gpstore2, asm: "MOVW", typ: "Mem"}, // store 4 bytes of arg2 to arg0 + arg1*4, arg3 = mem. + {name: "MOVDstoreidx8", argLength: 4, reg: gpstore2, asm: "MOVD", typ: "Mem"}, // store 8 bytes of arg2 to arg0 + arg1*8, arg3 = mem. + {name: "FMOVSstoreidx4", argLength: 4, reg: fpstore2, asm: "FMOVS", typ: "Mem"}, // store 32-bit float of arg2 to arg0 + arg1*4, arg3=mem. + {name: "FMOVDstoreidx8", argLength: 4, reg: fpstore2, asm: "FMOVD", typ: "Mem"}, // store 64-bit float of arg2 to arg0 + arg1*8, arg3=mem. {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem. {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem. @@ -467,8 +474,12 @@ func init() { // conditional instructions; auxint is // one of the arm64 comparison pseudo-ops (LessThan, LessThanU, etc.) - {name: "CSEL", argLength: 3, reg: gp2flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : arg1 - {name: "CSEL0", argLength: 2, reg: gp1flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : 0 + {name: "CSEL", argLength: 3, reg: gp2flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : arg1 + {name: "CSEL0", argLength: 2, reg: gp1flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : 0 + {name: "CSINC", argLength: 3, reg: gp2flags1, asm: "CSINC", aux: "CCop"}, // auxint(flags) ? arg0 : arg1 + 1 + {name: "CSINV", argLength: 3, reg: gp2flags1, asm: "CSINV", aux: "CCop"}, // auxint(flags) ? arg0 : ^arg1 + {name: "CSNEG", argLength: 3, reg: gp2flags1, asm: "CSNEG", aux: "CCop"}, // auxint(flags) ? arg0 : -arg1 + {name: "CSETM", argLength: 1, reg: readflags, asm: "CSETM", aux: "CCop"}, // auxint(flags) ? -1 : 0 // function calls {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem @@ -502,13 +513,14 @@ func init() { // auxint = offset into duffzero code to start executing // returns mem // R20 changed as side effect + // R16 and R17 may be clobbered by linker trampoline. { name: "DUFFZERO", aux: "Int64", argLength: 2, reg: regInfo{ inputs: []regMask{buildReg("R20")}, - clobbers: buildReg("R20 R30"), + clobbers: buildReg("R16 R17 R20 R30"), }, faultOnNilArg0: true, unsafePoint: true, // FP maintenance around DUFFZERO can be clobbered by interrupts @@ -542,13 +554,14 @@ func init() { // auxint = offset into duffcopy code to start executing // returns mem // R20, R21 changed as side effect + // R16 and R17 may be clobbered by linker trampoline. { name: "DUFFCOPY", aux: "Int64", argLength: 3, reg: regInfo{ inputs: []regMask{buildReg("R21"), buildReg("R20")}, - clobbers: buildReg("R20 R21 R26 R30"), + clobbers: buildReg("R16 R17 R20 R21 R26 R30"), }, faultOnNilArg0: true, faultOnNilArg1: true, @@ -707,7 +720,8 @@ func init() { // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier // It saves all GP registers if necessary, // but clobbers R30 (LR) because it's a call. - {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R2"), buildReg("R3")}, clobbers: (callerSave &^ gpg) | buildReg("R30")}, clobberFlags: true, aux: "Sym", symEffect: "None"}, + // R16 and R17 may be clobbered by linker trampoline. + {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R2"), buildReg("R3")}, clobbers: (callerSave &^ gpg) | buildReg("R16 R17 R30")}, clobberFlags: true, aux: "Sym", symEffect: "None"}, // There are three of these functions so that they can have three different register inputs. // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the diff --git a/src/cmd/compile/internal/ssa/gen/ARMOps.go b/src/cmd/compile/internal/ssa/gen/ARMOps.go index 70c789937aa..d1f86039a36 100644 --- a/src/cmd/compile/internal/ssa/gen/ARMOps.go +++ b/src/cmd/compile/internal/ssa/gen/ARMOps.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build ignore // +build ignore package main @@ -156,7 +157,7 @@ func init() { reg: regInfo{ inputs: []regMask{buildReg("R1"), buildReg("R0")}, outputs: []regMask{buildReg("R0"), buildReg("R1")}, - clobbers: buildReg("R2 R3 R14"), + clobbers: buildReg("R2 R3 R12 R14"), // R14 is LR, R12 is linker trampoline scratch register }, clobberFlags: true, typ: "(UInt32,UInt32)", @@ -217,6 +218,7 @@ func init() { {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32 {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64 {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64 + {name: "SQRTF", argLength: 1, reg: fp11, asm: "SQRTF"}, // sqrt(arg0), float32 {name: "ABSD", argLength: 1, reg: fp11, asm: "ABSD"}, // abs(arg0), float64 {name: "CLZ", argLength: 1, reg: gp11, asm: "CLZ"}, // count leading zero @@ -458,7 +460,7 @@ func init() { argLength: 3, reg: regInfo{ inputs: []regMask{buildReg("R1"), buildReg("R0")}, - clobbers: buildReg("R1 R14"), + clobbers: buildReg("R1 R12 R14"), // R14 is LR, R12 is linker trampoline scratch register }, faultOnNilArg0: true, }, @@ -475,7 +477,7 @@ func init() { argLength: 3, reg: regInfo{ inputs: []regMask{buildReg("R2"), buildReg("R1")}, - clobbers: buildReg("R0 R1 R2 R14"), + clobbers: buildReg("R0 R1 R2 R12 R14"), // R14 is LR, R12 is linker trampoline scratch register }, faultOnNilArg0: true, faultOnNilArg1: true, @@ -563,8 +565,8 @@ func init() { // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier // It saves all GP registers if necessary, - // but clobbers R14 (LR) because it's a call. - {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R2"), buildReg("R3")}, clobbers: (callerSave &^ gpg) | buildReg("R14")}, clobberFlags: true, aux: "Sym", symEffect: "None"}, + // but clobbers R14 (LR) because it's a call, and R12 which is linker trampoline scratch register. + {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R2"), buildReg("R3")}, clobbers: (callerSave &^ gpg) | buildReg("R12 R14")}, clobberFlags: true, aux: "Sym", symEffect: "None"}, } blocks := []blockData{ diff --git a/src/cmd/compile/internal/ssa/gen/MIPS.rules b/src/cmd/compile/internal/ssa/gen/MIPS.rules index 8ad2c90ac33..4ac9668ea9c 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPS.rules +++ b/src/cmd/compile/internal/ssa/gen/MIPS.rules @@ -121,6 +121,7 @@ (Com(32|16|8) x) => (NORconst [0] x) (Sqrt ...) => (SQRTD ...) +(Sqrt32 ...) => (SQRTF ...) // TODO: optimize this case? (Ctz32NonZero ...) => (Ctz32 ...) @@ -143,7 +144,7 @@ (Const(32|16|8) [val]) => (MOVWconst [int32(val)]) (Const(32|64)F ...) => (MOV(F|D)const ...) (ConstNil) => (MOVWconst [0]) -(ConstBool [b]) => (MOVWconst [b2i32(b)]) +(ConstBool [t]) => (MOVWconst [b2i32(t)]) // truncations // Because we ignore high parts of registers, truncates are just copies. @@ -559,6 +560,10 @@ // MOVWnop doesn't emit instruction, only for ensuring the type. (MOVWreg x) && x.Uses == 1 => (MOVWnop x) +// TODO: we should be able to get rid of MOVWnop all together. +// But for now, this is enough to get rid of lots of them. +(MOVWnop (MOVWconst [c])) => (MOVWconst [c]) + // fold constant into arithmatic ops (ADD x (MOVWconst [c])) => (ADDconst [c] x) (SUB x (MOVWconst [c])) => (SUBconst [c] x) diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64.rules b/src/cmd/compile/internal/ssa/gen/MIPS64.rules index 088c9b1ac44..fd04a6c3a85 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPS64.rules +++ b/src/cmd/compile/internal/ssa/gen/MIPS64.rules @@ -121,6 +121,7 @@ (Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x) (Sqrt ...) => (SQRTD ...) +(Sqrt32 ...) => (SQRTF ...) // boolean ops -- booleans are represented with 0=false, 1=true (AndB ...) => (AND ...) @@ -133,7 +134,7 @@ (Const(64|32|16|8) [val]) => (MOVVconst [int64(val)]) (Const(32|64)F [val]) => (MOV(F|D)const [float64(val)]) (ConstNil) => (MOVVconst [0]) -(ConstBool [b]) => (MOVVconst [int64(b2i(b))]) +(ConstBool [t]) => (MOVVconst [int64(b2i(t))]) (Slicemask x) => (SRAVconst (NEGV x) [63]) @@ -558,6 +559,10 @@ // MOVVnop doesn't emit instruction, only for ensuring the type. (MOVVreg x) && x.Uses == 1 => (MOVVnop x) +// TODO: we should be able to get rid of MOVVnop all together. +// But for now, this is enough to get rid of lots of them. +(MOVVnop (MOVVconst [c])) => (MOVVconst [c]) + // fold constant into arithmatic ops (ADDV x (MOVVconst [c])) && is32Bit(c) => (ADDVconst [c] x) (SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x) @@ -676,3 +681,9 @@ (GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes) (GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no) (GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes) + +// fold readonly sym load +(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read8(sym, int64(off)))]) +(MOVHload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) +(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) +(MOVVload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))]) diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go b/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go index e1e39335026..77f251c0d3f 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go @@ -199,6 +199,7 @@ func init() { {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32 {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64 {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64 + {name: "SQRTF", argLength: 1, reg: fp11, asm: "SQRTF"}, // sqrt(arg0), float32 // shifts {name: "SLLV", argLength: 2, reg: gp21, asm: "SLLV"}, // arg0 << arg1, shift amount is mod 64 diff --git a/src/cmd/compile/internal/ssa/gen/MIPSOps.go b/src/cmd/compile/internal/ssa/gen/MIPSOps.go index 75ab99ea26c..b92e8cb9f1e 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPSOps.go +++ b/src/cmd/compile/internal/ssa/gen/MIPSOps.go @@ -182,6 +182,7 @@ func init() { {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32 {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64 {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64 + {name: "SQRTF", argLength: 1, reg: fp11, asm: "SQRTF"}, // sqrt(arg0), float32 // shifts {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << arg1, shift amount is mod 32 diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules index a762be65d42..ce4b324b5e1 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules @@ -12,20 +12,20 @@ (Sub64F ...) => (FSUB ...) // Combine 64 bit integer multiply and adds -(ADD l:(MULLD x y) z) && objabi.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD x y z) +(ADD l:(MULLD x y) z) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD x y z) (Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y)) (Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y)) (Mod8 x y) => (Mod32 (SignExt8to32 x) (SignExt8to32 y)) (Mod8u x y) => (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y)) -(Mod64 x y) && objabi.GOPPC64 >=9 => (MODSD x y) -(Mod64 x y) && objabi.GOPPC64 <=8 => (SUB x (MULLD y (DIVD x y))) -(Mod64u x y) && objabi.GOPPC64 >= 9 => (MODUD x y) -(Mod64u x y) && objabi.GOPPC64 <= 8 => (SUB x (MULLD y (DIVDU x y))) -(Mod32 x y) && objabi.GOPPC64 >= 9 => (MODSW x y) -(Mod32 x y) && objabi.GOPPC64 <= 8 => (SUB x (MULLW y (DIVW x y))) -(Mod32u x y) && objabi.GOPPC64 >= 9 => (MODUW x y) -(Mod32u x y) && objabi.GOPPC64 <= 8 => (SUB x (MULLW y (DIVWU x y))) +(Mod64 x y) && buildcfg.GOPPC64 >=9 => (MODSD x y) +(Mod64 x y) && buildcfg.GOPPC64 <=8 => (SUB x (MULLD y (DIVD x y))) +(Mod64u x y) && buildcfg.GOPPC64 >= 9 => (MODUD x y) +(Mod64u x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLD y (DIVDU x y))) +(Mod32 x y) && buildcfg.GOPPC64 >= 9 => (MODSW x y) +(Mod32 x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLW y (DIVW x y))) +(Mod32u x y) && buildcfg.GOPPC64 >= 9 => (MODUW x y) +(Mod32u x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLW y (DIVWU x y))) // (x + y) / 2 with x>=y => (x - y) / 2 + y (Avg64u x y) => (ADD (SRDconst (SUB x y) [1]) y) @@ -71,6 +71,7 @@ (Round(32|64)F ...) => (LoweredRound(32|64)F ...) (Sqrt ...) => (FSQRT ...) +(Sqrt32 ...) => (FSQRTS ...) (Floor ...) => (FFLOOR ...) (Ceil ...) => (FCEIL ...) (Trunc ...) => (FTRUNC ...) @@ -100,7 +101,7 @@ (Const(64|32|16|8) [val]) => (MOVDconst [int64(val)]) (Const(32|64)F ...) => (FMOV(S|D)const ...) (ConstNil) => (MOVDconst [0]) -(ConstBool [b]) => (MOVDconst [b2i(b)]) +(ConstBool [t]) => (MOVDconst [b2i(t)]) // Constant folding (FABS (FMOVDconst [x])) => (FMOVDconst [math.Abs(x)]) @@ -350,9 +351,9 @@ (Ctz32NonZero ...) => (Ctz32 ...) (Ctz64NonZero ...) => (Ctz64 ...) -(Ctz64 x) && objabi.GOPPC64<=8 => (POPCNTD (ANDN (ADDconst [-1] x) x)) +(Ctz64 x) && buildcfg.GOPPC64<=8 => (POPCNTD (ANDN (ADDconst [-1] x) x)) (Ctz64 x) => (CNTTZD x) -(Ctz32 x) && objabi.GOPPC64<=8 => (POPCNTW (MOVWZreg (ANDN (ADDconst [-1] x) x))) +(Ctz32 x) && buildcfg.GOPPC64<=8 => (POPCNTW (MOVWZreg (ANDN (ADDconst [-1] x) x))) (Ctz32 x) => (CNTTZW (MOVWZreg x)) (Ctz16 x) => (POPCNTW (MOVHZreg (ANDN (ADDconst [-1] x) x))) (Ctz8 x) => (POPCNTB (MOVBZreg (ANDN (ADDconst [-1] x) x))) @@ -606,24 +607,18 @@ (MOVHstorezero [4] destptr (MOVWstorezero destptr mem))) -// MOVD for store with DS must have offsets that are multiple of 4 -(Zero [8] {t} destptr mem) && t.Alignment()%4 == 0 => - (MOVDstorezero destptr mem) -(Zero [8] destptr mem) => - (MOVWstorezero [4] destptr - (MOVWstorezero [0] destptr mem)) -// Handle these cases only if aligned properly, otherwise use general case below -(Zero [12] {t} destptr mem) && t.Alignment()%4 == 0 => +(Zero [8] {t} destptr mem) => (MOVDstorezero destptr mem) +(Zero [12] {t} destptr mem) => (MOVWstorezero [8] destptr (MOVDstorezero [0] destptr mem)) -(Zero [16] {t} destptr mem) && t.Alignment()%4 == 0 => +(Zero [16] {t} destptr mem) => (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)) -(Zero [24] {t} destptr mem) && t.Alignment()%4 == 0 => +(Zero [24] {t} destptr mem) => (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))) -(Zero [32] {t} destptr mem) && t.Alignment()%4 == 0 => +(Zero [32] {t} destptr mem) => (MOVDstorezero [24] destptr (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr @@ -632,15 +627,12 @@ // Handle cases not handled above // Lowered Short cases do not generate loops, and as a result don't clobber // the address registers or flags. -(Zero [s] ptr mem) && objabi.GOPPC64 <= 8 && s < 64 => (LoweredZeroShort [s] ptr mem) -(Zero [s] ptr mem) && objabi.GOPPC64 <= 8 => (LoweredZero [s] ptr mem) -(Zero [s] ptr mem) && s < 128 && objabi.GOPPC64 >= 9 => (LoweredQuadZeroShort [s] ptr mem) -(Zero [s] ptr mem) && objabi.GOPPC64 >= 9 => (LoweredQuadZero [s] ptr mem) +(Zero [s] ptr mem) && buildcfg.GOPPC64 <= 8 && s < 64 => (LoweredZeroShort [s] ptr mem) +(Zero [s] ptr mem) && buildcfg.GOPPC64 <= 8 => (LoweredZero [s] ptr mem) +(Zero [s] ptr mem) && s < 128 && buildcfg.GOPPC64 >= 9 => (LoweredQuadZeroShort [s] ptr mem) +(Zero [s] ptr mem) && buildcfg.GOPPC64 >= 9 => (LoweredQuadZero [s] ptr mem) // moves -// Only the MOVD and MOVW instructions require 4 byte -// alignment in the offset field. The other MOVx instructions -// allow any alignment. (Move [0] _ _ mem) => mem (Move [1] dst src mem) => (MOVBstore dst (MOVBZload src mem) mem) (Move [2] dst src mem) => @@ -648,11 +640,8 @@ (Move [4] dst src mem) => (MOVWstore dst (MOVWZload src mem) mem) // MOVD for load and store must have offsets that are multiple of 4 -(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 => +(Move [8] {t} dst src mem) => (MOVDstore dst (MOVDload src mem) mem) -(Move [8] dst src mem) => - (MOVWstore [4] dst (MOVWZload [4] src mem) - (MOVWstore dst (MOVWZload src mem) mem)) (Move [3] dst src mem) => (MOVBstore [2] dst (MOVBZload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)) @@ -669,11 +658,11 @@ // Large move uses a loop. Since the address is computed and the // offset is zero, any alignment can be used. -(Move [s] dst src mem) && s > 8 && objabi.GOPPC64 <= 8 && logLargeCopy(v, s) => +(Move [s] dst src mem) && s > 8 && buildcfg.GOPPC64 <= 8 && logLargeCopy(v, s) => (LoweredMove [s] dst src mem) -(Move [s] dst src mem) && s > 8 && s <= 64 && objabi.GOPPC64 >= 9 => +(Move [s] dst src mem) && s > 8 && s <= 64 && buildcfg.GOPPC64 >= 9 => (LoweredQuadMoveShort [s] dst src mem) -(Move [s] dst src mem) && s > 8 && objabi.GOPPC64 >= 9 && logLargeCopy(v, s) => +(Move [s] dst src mem) && s > 8 && buildcfg.GOPPC64 >= 9 && logLargeCopy(v, s) => (LoweredQuadMove [s] dst src mem) // Calls @@ -874,7 +863,7 @@ (MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVDload [off] {sym} ptr mem) // Fold offsets for stores. -(MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0 => (MOVDstore [off1+int32(off2)] {sym} x val mem) +(MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOVDstore [off1+int32(off2)] {sym} x val mem) (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOVWstore [off1+int32(off2)] {sym} x val mem) (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOVHstore [off1+int32(off2)] {sym} x val mem) (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOVBstore [off1+int32(off2)] {sym} x val mem) @@ -897,7 +886,7 @@ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) => (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) - && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 => + && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) => (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) @@ -917,13 +906,13 @@ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) => (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) - && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 => + && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) => (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) => (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) - && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 => + && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) => (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) => @@ -936,8 +925,8 @@ (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(int64(off1)+off2) => (FMOVSload [off1+int32(off2)] {sym} ptr mem) (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(int64(off1)+off2) => (FMOVDload [off1+int32(off2)] {sym} ptr mem) -(MOVDload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0 => (MOVDload [off1+int32(off2)] {sym} x mem) -(MOVWload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0 => (MOVWload [off1+int32(off2)] {sym} x mem) +(MOVDload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVDload [off1+int32(off2)] {sym} x mem) +(MOVWload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVWload [off1+int32(off2)] {sym} x mem) (MOVWZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVWZload [off1+int32(off2)] {sym} x mem) (MOVHload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVHload [off1+int32(off2)] {sym} x mem) (MOVHZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVHZload [off1+int32(off2)] {sym} x mem) @@ -946,7 +935,10 @@ // Determine load + addressing that can be done as a register indexed load (MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 => (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem) -// Determine indexed loads with constant values that can be done without index +// Determine if there is benefit to using a non-indexed load, since that saves the load +// of the index register. With MOVDload and MOVWload, there is no benefit if the offset +// value is not a multiple of 4, since that results in an extra instruction in the base +// register address computation. (MOV(D|W)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) && c%4 == 0 => (MOV(D|W)load [int32(c)] ptr mem) (MOV(WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem) (MOV(D|W)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) && c%4 == 0 => (MOV(D|W)load [int32(c)] ptr mem) @@ -959,7 +951,7 @@ (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem) // Fold offsets for storezero -(MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0 => +(MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVDstorezero [off1+int32(off2)] {sym} x mem) (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} x mem) @@ -972,6 +964,7 @@ (MOV(D|W|H|B)store [0] {sym} p:(ADD ptr idx) val mem) && sym == nil && p.Uses == 1 => (MOV(D|W|H|B)storeidx ptr idx val mem) // Stores with constant index values can be done without indexed instructions +// No need to lower the idx cases if c%4 is not 0 (MOVDstoreidx ptr (MOVDconst [c]) val mem) && is16Bit(c) && c%4 == 0 => (MOVDstore [int32(c)] ptr val mem) (MOV(W|H|B)storeidx ptr (MOVDconst [c]) val mem) && is16Bit(c) => (MOV(W|H|B)store [int32(c)] ptr val mem) (MOVDstoreidx (MOVDconst [c]) ptr val mem) && is16Bit(c) && c%4 == 0 => (MOVDstore [int32(c)] ptr val mem) @@ -979,7 +972,7 @@ // Fold symbols into storezero (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) - && (x.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 => + && (x.Op != OpSB || p.Uses == 1) => (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) (MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1) => @@ -1055,7 +1048,7 @@ (SLWconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) (SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) // special case for power9 -(SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && objabi.GOPPC64 >= 9 => (EXTSWSLconst [c] x) +(SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && buildcfg.GOPPC64 >= 9 => (EXTSWSLconst [c] x) // Lose widening ops fed to stores (MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstore [off] {sym} ptr x mem) @@ -1293,7 +1286,6 @@ o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) x0:(MOVWZload {s} [i0] p mem))))) && !config.BigEndian - && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 @@ -1430,7 +1422,6 @@ x2:(MOVBstore [i4] {s} p (SRDconst w [32]) x3:(MOVWstore [i0] {s} p w mem))))) && !config.BigEndian - && i0%4 == 0 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0, x1, x2, x3) diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64.rules b/src/cmd/compile/internal/ssa/gen/RISCV64.rules index 4380a5efef7..9cdd62edbe0 100644 --- a/src/cmd/compile/internal/ssa/gen/RISCV64.rules +++ b/src/cmd/compile/internal/ssa/gen/RISCV64.rules @@ -92,6 +92,7 @@ (Com8 ...) => (NOT ...) (Sqrt ...) => (FSQRTD ...) +(Sqrt32 ...) => (FSQRTS ...) // Sign and zero extension. @@ -221,9 +222,9 @@ (Rsh64x64 x y) => (SRA x (OR y (ADDI [-1] (SLTIU [64] y)))) // rotates -(RotateLeft8 x (MOVBconst [c])) => (Or8 (Lsh8x64 x (MOVBconst [c&7])) (Rsh8Ux64 x (MOVBconst [-c&7]))) -(RotateLeft16 x (MOVHconst [c])) => (Or16 (Lsh16x64 x (MOVHconst [c&15])) (Rsh16Ux64 x (MOVHconst [-c&15]))) -(RotateLeft32 x (MOVWconst [c])) => (Or32 (Lsh32x64 x (MOVWconst [c&31])) (Rsh32Ux64 x (MOVWconst [-c&31]))) +(RotateLeft8 x (MOVDconst [c])) => (Or8 (Lsh8x64 x (MOVDconst [c&7])) (Rsh8Ux64 x (MOVDconst [-c&7]))) +(RotateLeft16 x (MOVDconst [c])) => (Or16 (Lsh16x64 x (MOVDconst [c&15])) (Rsh16Ux64 x (MOVDconst [-c&15]))) +(RotateLeft32 x (MOVDconst [c])) => (Or32 (Lsh32x64 x (MOVDconst [c&31])) (Rsh32Ux64 x (MOVDconst [-c&31]))) (RotateLeft64 x (MOVDconst [c])) => (Or64 (Lsh64x64 x (MOVDconst [c&63])) (Rsh64Ux64 x (MOVDconst [-c&63]))) (Less64 ...) => (SLT ...) @@ -251,7 +252,7 @@ (EqPtr x y) => (SEQZ (SUB x y)) (Eq64 x y) => (SEQZ (SUB x y)) -(Eq32 x y) => (SEQZ (SUBW x y)) +(Eq32 x y) => (SEQZ (SUB (ZeroExt32to64 x) (ZeroExt32to64 y))) (Eq16 x y) => (SEQZ (SUB (ZeroExt16to64 x) (ZeroExt16to64 y))) (Eq8 x y) => (SEQZ (SUB (ZeroExt8to64 x) (ZeroExt8to64 y))) (Eq64F ...) => (FEQD ...) @@ -259,7 +260,7 @@ (NeqPtr x y) => (SNEZ (SUB x y)) (Neq64 x y) => (SNEZ (SUB x y)) -(Neq32 x y) => (SNEZ (SUBW x y)) +(Neq32 x y) => (SNEZ (SUB (ZeroExt32to64 x) (ZeroExt32to64 y))) (Neq16 x y) => (SNEZ (SUB (ZeroExt16to64 x) (ZeroExt16to64 y))) (Neq8 x y) => (SNEZ (SUB (ZeroExt8to64 x) (ZeroExt8to64 y))) (Neq64F ...) => (FNED ...) @@ -353,45 +354,45 @@ // Small zeroing (Zero [0] _ mem) => mem -(Zero [1] ptr mem) => (MOVBstore ptr (MOVBconst [0]) mem) +(Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem) (Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 => - (MOVHstore ptr (MOVHconst [0]) mem) + (MOVHstore ptr (MOVDconst [0]) mem) (Zero [2] ptr mem) => - (MOVBstore [1] ptr (MOVBconst [0]) - (MOVBstore ptr (MOVBconst [0]) mem)) + (MOVBstore [1] ptr (MOVDconst [0]) + (MOVBstore ptr (MOVDconst [0]) mem)) (Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 => - (MOVWstore ptr (MOVWconst [0]) mem) + (MOVWstore ptr (MOVDconst [0]) mem) (Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 => - (MOVHstore [2] ptr (MOVHconst [0]) - (MOVHstore ptr (MOVHconst [0]) mem)) + (MOVHstore [2] ptr (MOVDconst [0]) + (MOVHstore ptr (MOVDconst [0]) mem)) (Zero [4] ptr mem) => - (MOVBstore [3] ptr (MOVBconst [0]) - (MOVBstore [2] ptr (MOVBconst [0]) - (MOVBstore [1] ptr (MOVBconst [0]) - (MOVBstore ptr (MOVBconst [0]) mem)))) + (MOVBstore [3] ptr (MOVDconst [0]) + (MOVBstore [2] ptr (MOVDconst [0]) + (MOVBstore [1] ptr (MOVDconst [0]) + (MOVBstore ptr (MOVDconst [0]) mem)))) (Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 => (MOVDstore ptr (MOVDconst [0]) mem) (Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 => - (MOVWstore [4] ptr (MOVWconst [0]) - (MOVWstore ptr (MOVWconst [0]) mem)) + (MOVWstore [4] ptr (MOVDconst [0]) + (MOVWstore ptr (MOVDconst [0]) mem)) (Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 => - (MOVHstore [6] ptr (MOVHconst [0]) - (MOVHstore [4] ptr (MOVHconst [0]) - (MOVHstore [2] ptr (MOVHconst [0]) - (MOVHstore ptr (MOVHconst [0]) mem)))) + (MOVHstore [6] ptr (MOVDconst [0]) + (MOVHstore [4] ptr (MOVDconst [0]) + (MOVHstore [2] ptr (MOVDconst [0]) + (MOVHstore ptr (MOVDconst [0]) mem)))) (Zero [3] ptr mem) => - (MOVBstore [2] ptr (MOVBconst [0]) - (MOVBstore [1] ptr (MOVBconst [0]) - (MOVBstore ptr (MOVBconst [0]) mem))) + (MOVBstore [2] ptr (MOVDconst [0]) + (MOVBstore [1] ptr (MOVDconst [0]) + (MOVBstore ptr (MOVDconst [0]) mem))) (Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 => - (MOVHstore [4] ptr (MOVHconst [0]) - (MOVHstore [2] ptr (MOVHconst [0]) - (MOVHstore ptr (MOVHconst [0]) mem))) + (MOVHstore [4] ptr (MOVDconst [0]) + (MOVHstore [2] ptr (MOVDconst [0]) + (MOVHstore ptr (MOVDconst [0]) mem))) (Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 => - (MOVWstore [8] ptr (MOVWconst [0]) - (MOVWstore [4] ptr (MOVWconst [0]) - (MOVWstore ptr (MOVWconst [0]) mem))) + (MOVWstore [8] ptr (MOVDconst [0]) + (MOVWstore [4] ptr (MOVDconst [0]) + (MOVWstore ptr (MOVDconst [0]) mem))) (Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 => (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) @@ -422,7 +423,7 @@ (Convert ...) => (MOVconvert ...) // Checks -(IsNonNil p) => (NeqPtr (MOVDconst [0]) p) +(IsNonNil ...) => (SNEZ ...) (IsInBounds ...) => (Less64U ...) (IsSliceInBounds ...) => (Leq64U ...) @@ -521,25 +522,14 @@ (OffPtr [off] ptr) && is32Bit(off) => (ADDI [off] ptr) (OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr) -// TODO(jsing): Check if we actually need MOV{B,H,W}const as most platforms -// use a single MOVDconst op. -(Const8 ...) => (MOVBconst ...) -(Const16 ...) => (MOVHconst ...) -(Const32 ...) => (MOVWconst ...) -(Const64 ...) => (MOVDconst ...) -(Const32F [val]) => (FMVSX (MOVWconst [int32(math.Float32bits(val))])) +(Const8 [val]) => (MOVDconst [int64(val)]) +(Const16 [val]) => (MOVDconst [int64(val)]) +(Const32 [val]) => (MOVDconst [int64(val)]) +(Const64 [val]) => (MOVDconst [int64(val)]) +(Const32F [val]) => (FMVSX (MOVDconst [int64(math.Float32bits(val))])) (Const64F [val]) => (FMVDX (MOVDconst [int64(math.Float64bits(val))])) (ConstNil) => (MOVDconst [0]) -(ConstBool [val]) => (MOVBconst [int8(b2i(val))]) - -// Convert 64 bit immediate to two 32 bit immediates, combine with add and shift. -// The lower 32 bit immediate will be treated as signed, -// so if it is negative, adjust for the borrow by incrementing the top half. -// We don't have to worry about overflow from the increment, -// because if the top half is all 1s, and int32(c) is negative, -// then the overall constant fits in an int32. -(MOVDconst [c]) && !is32Bit(c) && int32(c) < 0 => (ADD (SLLI [32] (MOVDconst [c>>32+1])) (MOVDconst [int64(int32(c))])) -(MOVDconst [c]) && !is32Bit(c) && int32(c) >= 0 => (ADD (SLLI [32] (MOVDconst [c>>32+0])) (MOVDconst [int64(int32(c))])) +(ConstBool [val]) => (MOVDconst [int64(b2i(val))]) (Addr {sym} base) => (MOVaddr {sym} [0] base) (LocalAddr {sym} base _) => (MOVaddr {sym} base) @@ -563,12 +553,28 @@ (AtomicAdd32 ...) => (LoweredAtomicAdd32 ...) (AtomicAdd64 ...) => (LoweredAtomicAdd64 ...) +// AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3, ^((uint8(val) ^ 0xff) << ((ptr & 3) * 8))) +(AtomicAnd8 ptr val mem) => + (LoweredAtomicAnd32 (ANDI [^3] ptr) + (NOT (SLL (XORI [0xff] (ZeroExt8to32 val)) + (SLLI [3] (ANDI [3] ptr)))) mem) + +(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...) + (AtomicCompareAndSwap32 ...) => (LoweredAtomicCas32 ...) (AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...) (AtomicExchange32 ...) => (LoweredAtomicExchange32 ...) (AtomicExchange64 ...) => (LoweredAtomicExchange64 ...) +// AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3, uint32(val)<<((ptr&3)*8)) +(AtomicOr8 ptr val mem) => + (LoweredAtomicOr32 (ANDI [^3] ptr) + (SLL (ZeroExt8to32 val) + (SLLI [3] (ANDI [3] ptr))) mem) + +(AtomicOr32 ...) => (LoweredAtomicOr32 ...) + // Conditional branches (If cond yes no) => (BNEZ cond yes no) @@ -595,24 +601,18 @@ (BNE cond (MOVDconst [0]) yes no) => (BNEZ cond yes no) // Store zero -(MOVBstore [off] {sym} ptr (MOVBconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem) -(MOVHstore [off] {sym} ptr (MOVHconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem) -(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem) +(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem) +(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem) +(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem) (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem) // Avoid sign/zero extension for consts. -(MOVBreg (MOVBconst [c])) => (MOVDconst [int64(c)]) -(MOVHreg (MOVBconst [c])) => (MOVDconst [int64(c)]) -(MOVHreg (MOVHconst [c])) => (MOVDconst [int64(c)]) -(MOVWreg (MOVBconst [c])) => (MOVDconst [int64(c)]) -(MOVWreg (MOVHconst [c])) => (MOVDconst [int64(c)]) -(MOVWreg (MOVWconst [c])) => (MOVDconst [int64(c)]) -(MOVBUreg (MOVBconst [c])) => (MOVDconst [int64(uint8(c))]) -(MOVHUreg (MOVBconst [c])) => (MOVDconst [int64(uint16(c))]) -(MOVHUreg (MOVHconst [c])) => (MOVDconst [int64(uint16(c))]) -(MOVWUreg (MOVBconst [c])) => (MOVDconst [int64(uint32(c))]) -(MOVWUreg (MOVHconst [c])) => (MOVDconst [int64(uint32(c))]) -(MOVWUreg (MOVWconst [c])) => (MOVDconst [int64(uint32(c))]) +(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))]) +(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))]) +(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))]) +(MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))]) +(MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))]) +(MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))]) // Avoid sign/zero extension after properly typed load. (MOVBreg x:(MOVBload _ _)) => (MOVDreg x) @@ -673,61 +673,29 @@ // MOVnop does not emit an instruction, only for ensuring the type. (MOVDreg x) && x.Uses == 1 => (MOVDnop x) +// TODO: we should be able to get rid of MOVDnop all together. +// But for now, this is enough to get rid of lots of them. +(MOVDnop (MOVDconst [c])) => (MOVDconst [c]) + // Fold constant into immediate instructions where possible. -(ADD (MOVBconst [val]) x) => (ADDI [int64(val)] x) -(ADD (MOVHconst [val]) x) => (ADDI [int64(val)] x) -(ADD (MOVWconst [val]) x) => (ADDI [int64(val)] x) (ADD (MOVDconst [val]) x) && is32Bit(val) => (ADDI [val] x) - -(AND (MOVBconst [val]) x) => (ANDI [int64(val)] x) -(AND (MOVHconst [val]) x) => (ANDI [int64(val)] x) -(AND (MOVWconst [val]) x) => (ANDI [int64(val)] x) (AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x) - -(OR (MOVBconst [val]) x) => (ORI [int64(val)] x) -(OR (MOVHconst [val]) x) => (ORI [int64(val)] x) -(OR (MOVWconst [val]) x) => (ORI [int64(val)] x) -(OR (MOVDconst [val]) x) && is32Bit(val) => (ORI [val] x) - -(XOR (MOVBconst [val]) x) => (XORI [int64(val)] x) -(XOR (MOVHconst [val]) x) => (XORI [int64(val)] x) -(XOR (MOVWconst [val]) x) => (XORI [int64(val)] x) +(OR (MOVDconst [val]) x) && is32Bit(val) => (ORI [val] x) (XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x) - -(SLL x (MOVBconst [val])) => (SLLI [int64(val&63)] x) -(SLL x (MOVHconst [val])) => (SLLI [int64(val&63)] x) -(SLL x (MOVWconst [val])) => (SLLI [int64(val&63)] x) (SLL x (MOVDconst [val])) => (SLLI [int64(val&63)] x) - -(SRL x (MOVBconst [val])) => (SRLI [int64(val&63)] x) -(SRL x (MOVHconst [val])) => (SRLI [int64(val&63)] x) -(SRL x (MOVWconst [val])) => (SRLI [int64(val&63)] x) (SRL x (MOVDconst [val])) => (SRLI [int64(val&63)] x) - -(SRA x (MOVBconst [val])) => (SRAI [int64(val&63)] x) -(SRA x (MOVHconst [val])) => (SRAI [int64(val&63)] x) -(SRA x (MOVWconst [val])) => (SRAI [int64(val&63)] x) (SRA x (MOVDconst [val])) => (SRAI [int64(val&63)] x) // Convert subtraction of a const into ADDI with negative immediate, where possible. -(SUB x (MOVBconst [val])) => (ADDI [-int64(val)] x) -(SUB x (MOVHconst [val])) => (ADDI [-int64(val)] x) -(SUB x (MOVWconst [val])) && is32Bit(-int64(val)) => (ADDI [-int64(val)] x) (SUB x (MOVDconst [val])) && is32Bit(-val) => (ADDI [-val] x) // Subtraction of zero. -(SUB x (MOVBconst [0])) => x -(SUB x (MOVHconst [0])) => x -(SUB x (MOVWconst [0])) => x (SUB x (MOVDconst [0])) => x // Subtraction of zero with sign extension. -(SUBW x (MOVWconst [0])) => (ADDIW [0] x) +(SUBW x (MOVDconst [0])) => (ADDIW [0] x) // Subtraction from zero. -(SUB (MOVBconst [0]) x) => (NEG x) -(SUB (MOVHconst [0]) x) => (NEG x) -(SUB (MOVWconst [0]) x) => (NEG x) (SUB (MOVDconst [0]) x) => (NEG x) // Subtraction from zero with sign extension. diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go index f64319230b0..0ac9c5f62ad 100644 --- a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go @@ -126,6 +126,7 @@ func init() { gp11sb = regInfo{inputs: []regMask{gpspsbMask}, outputs: []regMask{gpMask}} gpxchg = regInfo{inputs: []regMask{gpspsbgMask, gpgMask}, outputs: []regMask{gpMask}} gpcas = regInfo{inputs: []regMask{gpspsbgMask, gpgMask, gpgMask}, outputs: []regMask{gpMask}} + gpatomic = regInfo{inputs: []regMask{gpspsbgMask, gpgMask}} fp11 = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{fpMask}} fp21 = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{fpMask}} @@ -167,9 +168,6 @@ func init() { {name: "MOVaddr", argLength: 1, reg: gp11sb, asm: "MOV", aux: "SymOff", rematerializeable: true, symEffect: "RdWr"}, // arg0 + auxint + offset encoded in aux // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address - {name: "MOVBconst", reg: gp01, asm: "MOV", typ: "UInt8", aux: "Int8", rematerializeable: true}, // 8 low bits of auxint - {name: "MOVHconst", reg: gp01, asm: "MOV", typ: "UInt16", aux: "Int16", rematerializeable: true}, // 16 low bits of auxint - {name: "MOVWconst", reg: gp01, asm: "MOV", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint {name: "MOVDconst", reg: gp01, asm: "MOV", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint // Loads: load bits from arg0+auxint+aux and extend to 64 bits; arg1=mem @@ -335,7 +333,7 @@ func init() { {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, faultOnNilArg0: true}, // Atomic stores. - // store arg1 to arg0. arg2=mem. returns memory. + // store arg1 to *arg0. arg2=mem. returns memory. {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, @@ -367,6 +365,11 @@ func init() { {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + // Atomic 32 bit AND/OR. + // *arg0 &= (|=) arg1. arg2=mem. returns nil. + {name: "LoweredAtomicAnd32", argLength: 3, reg: gpatomic, asm: "AMOANDW", faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicOr32", argLength: 3, reg: gpatomic, asm: "AMOORW", faultOnNilArg0: true, hasSideEffects: true}, + // Lowering pass-throughs {name: "LoweredNilCheck", argLength: 2, faultOnNilArg0: true, nilCheck: true, reg: regInfo{inputs: []regMask{gpspMask}}}, // arg0=ptr,arg1=mem, returns void. Faults if ptr is nil. {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{regCtxt}}}, // scheduler ensures only at beginning of entry block diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules index c3421da0a24..88762f70458 100644 --- a/src/cmd/compile/internal/ssa/gen/S390X.rules +++ b/src/cmd/compile/internal/ssa/gen/S390X.rules @@ -142,6 +142,8 @@ (Round x) => (FIDBR [1] x) (FMA x y z) => (FMADD z x y) +(Sqrt32 ...) => (FSQRTS ...) + // Atomic loads and stores. // The SYNC instruction (fast-BCR-serialization) prevents store-load // reordering. Other sequences of memory operations (load-load, @@ -384,13 +386,13 @@ // MVC for other moves. Use up to 4 instructions (sizes up to 1024 bytes). (Move [s] dst src mem) && s > 0 && s <= 256 && logLargeCopy(v, s) => - (MVC [makeValAndOff32(int32(s), 0)] dst src mem) + (MVC [makeValAndOff(int32(s), 0)] dst src mem) (Move [s] dst src mem) && s > 256 && s <= 512 && logLargeCopy(v, s) => - (MVC [makeValAndOff32(int32(s)-256, 256)] dst src (MVC [makeValAndOff32(256, 0)] dst src mem)) + (MVC [makeValAndOff(int32(s)-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)) (Move [s] dst src mem) && s > 512 && s <= 768 && logLargeCopy(v, s) => - (MVC [makeValAndOff32(int32(s)-512, 512)] dst src (MVC [makeValAndOff32(256, 256)] dst src (MVC [makeValAndOff32(256, 0)] dst src mem))) + (MVC [makeValAndOff(int32(s)-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))) (Move [s] dst src mem) && s > 768 && s <= 1024 && logLargeCopy(v, s) => - (MVC [makeValAndOff32(int32(s)-768, 768)] dst src (MVC [makeValAndOff32(256, 512)] dst src (MVC [makeValAndOff32(256, 256)] dst src (MVC [makeValAndOff32(256, 0)] dst src mem)))) + (MVC [makeValAndOff(int32(s)-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))) // Move more than 1024 bytes using a loop. (Move [s] dst src mem) && s > 1024 && logLargeCopy(v, s) => @@ -403,20 +405,20 @@ (Zero [4] destptr mem) => (MOVWstoreconst [0] destptr mem) (Zero [8] destptr mem) => (MOVDstoreconst [0] destptr mem) (Zero [3] destptr mem) => - (MOVBstoreconst [makeValAndOff32(0,2)] destptr + (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVHstoreconst [0] destptr mem)) (Zero [5] destptr mem) => - (MOVBstoreconst [makeValAndOff32(0,4)] destptr + (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVWstoreconst [0] destptr mem)) (Zero [6] destptr mem) => - (MOVHstoreconst [makeValAndOff32(0,4)] destptr + (MOVHstoreconst [makeValAndOff(0,4)] destptr (MOVWstoreconst [0] destptr mem)) (Zero [7] destptr mem) => - (MOVWstoreconst [makeValAndOff32(0,3)] destptr + (MOVWstoreconst [makeValAndOff(0,3)] destptr (MOVWstoreconst [0] destptr mem)) (Zero [s] destptr mem) && s > 0 && s <= 1024 => - (CLEAR [makeValAndOff32(int32(s), 0)] destptr mem) + (CLEAR [makeValAndOff(int32(s), 0)] destptr mem) // Zero more than 1024 bytes using a loop. (Zero [s] destptr mem) && s > 1024 => @@ -426,7 +428,7 @@ (Const(64|32|16|8) [val]) => (MOVDconst [int64(val)]) (Const(32|64)F ...) => (FMOV(S|D)const ...) (ConstNil) => (MOVDconst [0]) -(ConstBool [b]) => (MOVDconst [b2i(b)]) +(ConstBool [t]) => (MOVDconst [b2i(t)]) // Lowering calls (StaticCall ...) => (CALLstatic ...) @@ -946,22 +948,22 @@ // Fold constants into stores. (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB => - (MOVDstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem) + (MOVDstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB => - (MOVWstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem) + (MOVWstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && isU12Bit(int64(off)) && ptr.Op != OpSB => - (MOVHstoreconst [makeValAndOff32(int32(int16(c)),off)] {sym} ptr mem) + (MOVHstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem) (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && is20Bit(int64(off)) && ptr.Op != OpSB => - (MOVBstoreconst [makeValAndOff32(int32(int8(c)),off)] {sym} ptr mem) + (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem) // Fold address offsets into constant stores. -(MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off()+int64(off)) => +(MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) => (MOVDstoreconst [sc.addOffset32(off)] {s} ptr mem) -(MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off()+int64(off)) => +(MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) => (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem) -(MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off()+int64(off)) => +(MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) => (MOVHstoreconst [sc.addOffset32(off)] {s} ptr mem) -(MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) && is20Bit(sc.Off()+int64(off)) => +(MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) && is20Bit(sc.Off64()+int64(off)) => (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem) // Merge address calculations into loads and stores. @@ -1304,19 +1306,19 @@ && x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x) - => (MOVHstoreconst [makeValAndOff32(c.Val32()&0xff | a.Val32()<<8, a.Off32())] {s} p mem) + => (MOVHstoreconst [makeValAndOff(c.Val()&0xff | a.Val()<<8, a.Off())] {s} p mem) (MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem)) && p.Op != OpSB && x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x) - => (MOVWstore [a.Off32()] {s} p (MOVDconst [int64(c.Val32()&0xffff | a.Val32()<<16)]) mem) + => (MOVWstore [a.Off()] {s} p (MOVDconst [int64(c.Val()&0xffff | a.Val()<<16)]) mem) (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) && p.Op != OpSB && x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x) - => (MOVDstore [a.Off32()] {s} p (MOVDconst [c.Val()&0xffffffff | a.Val()<<32]) mem) + => (MOVDstore [a.Off()] {s} p (MOVDconst [c.Val64()&0xffffffff | a.Val64()<<32]) mem) // Combine stores into larger (unaligned) stores. // It doesn't work on global data (based on SB) because stores with relative addressing @@ -1420,6 +1422,16 @@ && clobber(x) => (MOVDBRstore [i-4] {s} p w0 mem) +(MOVBstore [7] {s} p1 (SRDconst w) + x1:(MOVHBRstore [5] {s} p1 (SRDconst w) + x2:(MOVWBRstore [1] {s} p1 (SRDconst w) + x3:(MOVBstore [0] {s} p1 w mem)))) + && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && clobber(x1, x2, x3) + => (MOVDBRstore {s} p1 w mem) + // Combining byte loads into larger (unaligned) loads. // Big-endian loads diff --git a/src/cmd/compile/internal/ssa/gen/S390XOps.go b/src/cmd/compile/internal/ssa/gen/S390XOps.go index b24fd619422..5b33ba710e9 100644 --- a/src/cmd/compile/internal/ssa/gen/S390XOps.go +++ b/src/cmd/compile/internal/ssa/gen/S390XOps.go @@ -381,7 +381,8 @@ func init() { {name: "NOT", argLength: 1, reg: gp11, resultInArg0: true, clobberFlags: true}, // ^arg0 {name: "NOTW", argLength: 1, reg: gp11, resultInArg0: true, clobberFlags: true}, // ^arg0 - {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0) + {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0) + {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0), float32 // Conditional register-register moves. // The aux for these values is an s390x.CCMask value representing the condition code mask. diff --git a/src/cmd/compile/internal/ssa/gen/Wasm.rules b/src/cmd/compile/internal/ssa/gen/Wasm.rules index fc45cd3ed5f..7ad3d1c72e1 100644 --- a/src/cmd/compile/internal/ssa/gen/Wasm.rules +++ b/src/cmd/compile/internal/ssa/gen/Wasm.rules @@ -55,9 +55,9 @@ (ZeroExt32to64 x:(I64Load32U _ _)) => x (ZeroExt16to(64|32) x:(I64Load16U _ _)) => x (ZeroExt8to(64|32|16) x:(I64Load8U _ _)) => x -(SignExt32to64 x) && objabi.GOWASM.SignExt => (I64Extend32S x) -(SignExt8to(64|32|16) x) && objabi.GOWASM.SignExt => (I64Extend8S x) -(SignExt16to(64|32) x) && objabi.GOWASM.SignExt => (I64Extend16S x) +(SignExt32to64 x) && buildcfg.GOWASM.SignExt => (I64Extend32S x) +(SignExt8to(64|32|16) x) && buildcfg.GOWASM.SignExt => (I64Extend8S x) +(SignExt16to(64|32) x) && buildcfg.GOWASM.SignExt => (I64Extend16S x) (SignExt32to64 x) => (I64ShrS (I64Shl x (I64Const [32])) (I64Const [32])) (SignExt16to(64|32) x) => (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48])) (SignExt8to(64|32|16) x) => (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56])) @@ -332,6 +332,8 @@ (Abs ...) => (F64Abs ...) (Copysign ...) => (F64Copysign ...) +(Sqrt32 ...) => (F32Sqrt ...) + (Ctz64 ...) => (I64Ctz ...) (Ctz32 x) => (I64Ctz (I64Or x (I64Const [0x100000000]))) (Ctz16 x) => (I64Ctz (I64Or x (I64Const [0x10000]))) diff --git a/src/cmd/compile/internal/ssa/gen/WasmOps.go b/src/cmd/compile/internal/ssa/gen/WasmOps.go index 36c53bc78c2..c92878ca73b 100644 --- a/src/cmd/compile/internal/ssa/gen/WasmOps.go +++ b/src/cmd/compile/internal/ssa/gen/WasmOps.go @@ -238,13 +238,13 @@ func init() { {name: "I64Extend16S", asm: "I64Extend16S", argLength: 1, reg: gp11, typ: "Int64"}, // sign-extend arg0 from 16 to 64 bit {name: "I64Extend32S", asm: "I64Extend32S", argLength: 1, reg: gp11, typ: "Int64"}, // sign-extend arg0 from 32 to 64 bit - {name: "F32Sqrt", asm: "F32Sqrt", argLength: 1, reg: fp64_11, typ: "Float32"}, // sqrt(arg0) - {name: "F32Trunc", asm: "F32Trunc", argLength: 1, reg: fp64_11, typ: "Float32"}, // trunc(arg0) - {name: "F32Ceil", asm: "F32Ceil", argLength: 1, reg: fp64_11, typ: "Float32"}, // ceil(arg0) - {name: "F32Floor", asm: "F32Floor", argLength: 1, reg: fp64_11, typ: "Float32"}, // floor(arg0) - {name: "F32Nearest", asm: "F32Nearest", argLength: 1, reg: fp64_11, typ: "Float32"}, // round(arg0) - {name: "F32Abs", asm: "F32Abs", argLength: 1, reg: fp64_11, typ: "Float32"}, // abs(arg0) - {name: "F32Copysign", asm: "F32Copysign", argLength: 2, reg: fp64_21, typ: "Float32"}, // copysign(arg0, arg1) + {name: "F32Sqrt", asm: "F32Sqrt", argLength: 1, reg: fp32_11, typ: "Float32"}, // sqrt(arg0) + {name: "F32Trunc", asm: "F32Trunc", argLength: 1, reg: fp32_11, typ: "Float32"}, // trunc(arg0) + {name: "F32Ceil", asm: "F32Ceil", argLength: 1, reg: fp32_11, typ: "Float32"}, // ceil(arg0) + {name: "F32Floor", asm: "F32Floor", argLength: 1, reg: fp32_11, typ: "Float32"}, // floor(arg0) + {name: "F32Nearest", asm: "F32Nearest", argLength: 1, reg: fp32_11, typ: "Float32"}, // round(arg0) + {name: "F32Abs", asm: "F32Abs", argLength: 1, reg: fp32_11, typ: "Float32"}, // abs(arg0) + {name: "F32Copysign", asm: "F32Copysign", argLength: 2, reg: fp32_21, typ: "Float32"}, // copysign(arg0, arg1) {name: "F64Sqrt", asm: "F64Sqrt", argLength: 1, reg: fp64_11, typ: "Float64"}, // sqrt(arg0) {name: "F64Trunc", asm: "F64Trunc", argLength: 1, reg: fp64_11, typ: "Float64"}, // trunc(arg0) diff --git a/src/cmd/compile/internal/ssa/gen/dec.rules b/src/cmd/compile/internal/ssa/gen/dec.rules index 4c677f8418f..b19489870dd 100644 --- a/src/cmd/compile/internal/ssa/gen/dec.rules +++ b/src/cmd/compile/internal/ssa/gen/dec.rules @@ -56,6 +56,7 @@ (SlicePtr (SliceMake ptr _ _ )) => ptr (SliceLen (SliceMake _ len _)) => len (SliceCap (SliceMake _ _ cap)) => cap +(SlicePtrUnchecked (SliceMake ptr _ _ )) => ptr (Load ptr mem) && t.IsSlice() => (SliceMake diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules index 1784923224d..aad7600d793 100644 --- a/src/cmd/compile/internal/ssa/gen/generic.rules +++ b/src/cmd/compile/internal/ssa/gen/generic.rules @@ -768,7 +768,7 @@ => mem // Collapse OffPtr -(OffPtr (OffPtr p [b]) [a]) => (OffPtr p [a+b]) +(OffPtr (OffPtr p [y]) [x]) => (OffPtr p [x+y]) (OffPtr p [0]) && v.Type.Compare(p.Type) == types.CMPeq => p // indexing operations @@ -847,7 +847,7 @@ f0 mem)))) // Putting struct{*byte} and similar into direct interfaces. -(IMake typ (StructMake1 val)) => (IMake typ val) +(IMake _typ (StructMake1 val)) => (IMake _typ val) (StructSelect [0] (IData x)) => (IData x) // un-SSAable values use mem->mem copies @@ -869,7 +869,7 @@ (Store dst (ArrayMake1 e) mem) => (Store {e.Type} dst e mem) // Putting [1]*byte and similar into direct interfaces. -(IMake typ (ArrayMake1 val)) => (IMake typ val) +(IMake _typ (ArrayMake1 val)) => (IMake _typ val) (ArraySelect [0] (IData x)) => (IData x) // string ops @@ -1968,43 +1968,15 @@ (Div32F x (Const32F [c])) && reciprocalExact32(c) => (Mul32F x (Const32F [1/c])) (Div64F x (Const64F [c])) && reciprocalExact64(c) => (Mul64F x (Const64F [1/c])) +// rewrite single-precision sqrt expression "float32(math.Sqrt(float64(x)))" +(Cvt64Fto32F sqrt0:(Sqrt (Cvt32Fto64F x))) && sqrt0.Uses==1 => (Sqrt32 x) + (Sqrt (Const64F [c])) && !math.IsNaN(math.Sqrt(c)) => (Const64F [math.Sqrt(c)]) -// recognize runtime.newobject and don't Zero/Nilcheck it -(Zero (Load (OffPtr [c] (SP)) mem) mem) - && mem.Op == OpStaticCall - && isSameCall(mem.Aux, "runtime.newobject") - && c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value - => mem -(Store (Load (OffPtr [c] (SP)) mem) x mem) - && isConstZero(x) - && mem.Op == OpStaticCall - && isSameCall(mem.Aux, "runtime.newobject") - && c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value - => mem -(Store (OffPtr (Load (OffPtr [c] (SP)) mem)) x mem) - && isConstZero(x) - && mem.Op == OpStaticCall - && isSameCall(mem.Aux, "runtime.newobject") - && c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value - => mem -// nil checks just need to rewrite to something useless. -// they will be deadcode eliminated soon afterwards. -(NilCheck (Load (OffPtr [c] (SP)) (StaticCall {sym} _)) _) - && isSameCall(sym, "runtime.newobject") - && c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value - && warnRule(fe.Debug_checknil(), v, "removed nil check") - => (Invalid) -(NilCheck (OffPtr (Load (OffPtr [c] (SP)) (StaticCall {sym} _))) _) - && isSameCall(sym, "runtime.newobject") - && c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value - && warnRule(fe.Debug_checknil(), v, "removed nil check") - => (Invalid) - // for rewriting results of some late-expanded rewrites (below) -(SelectN [0] (MakeResult a ___)) => a -(SelectN [1] (MakeResult a b ___)) => b -(SelectN [2] (MakeResult a b c ___)) => c +(SelectN [0] (MakeResult x ___)) => x +(SelectN [1] (MakeResult x y ___)) => y +(SelectN [2] (MakeResult x y z ___)) => z // for late-expanded calls, recognize newobject and remove zeroing and nilchecks (Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call)) @@ -2021,12 +1993,12 @@ && isSameCall(call.Aux, "runtime.newobject") => mem -(NilCheck (SelectN [0] call:(StaticLECall _ _)) (SelectN [1] call)) +(NilCheck (SelectN [0] call:(StaticLECall _ _)) _) && isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check") => (Invalid) -(NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) (SelectN [1] call)) +(NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) _) && isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check") => (Invalid) @@ -2041,18 +2013,18 @@ // Evaluate constant address comparisons. (EqPtr x x) => (ConstBool [true]) (NeqPtr x x) => (ConstBool [false]) -(EqPtr (Addr {a} _) (Addr {b} _)) => (ConstBool [a == b]) -(EqPtr (Addr {a} _) (OffPtr [o] (Addr {b} _))) => (ConstBool [a == b && o == 0]) -(EqPtr (OffPtr [o1] (Addr {a} _)) (OffPtr [o2] (Addr {b} _))) => (ConstBool [a == b && o1 == o2]) -(NeqPtr (Addr {a} _) (Addr {b} _)) => (ConstBool [a != b]) -(NeqPtr (Addr {a} _) (OffPtr [o] (Addr {b} _))) => (ConstBool [a != b || o != 0]) -(NeqPtr (OffPtr [o1] (Addr {a} _)) (OffPtr [o2] (Addr {b} _))) => (ConstBool [a != b || o1 != o2]) -(EqPtr (LocalAddr {a} _ _) (LocalAddr {b} _ _)) => (ConstBool [a == b]) -(EqPtr (LocalAddr {a} _ _) (OffPtr [o] (LocalAddr {b} _ _))) => (ConstBool [a == b && o == 0]) -(EqPtr (OffPtr [o1] (LocalAddr {a} _ _)) (OffPtr [o2] (LocalAddr {b} _ _))) => (ConstBool [a == b && o1 == o2]) -(NeqPtr (LocalAddr {a} _ _) (LocalAddr {b} _ _)) => (ConstBool [a != b]) -(NeqPtr (LocalAddr {a} _ _) (OffPtr [o] (LocalAddr {b} _ _))) => (ConstBool [a != b || o != 0]) -(NeqPtr (OffPtr [o1] (LocalAddr {a} _ _)) (OffPtr [o2] (LocalAddr {b} _ _))) => (ConstBool [a != b || o1 != o2]) +(EqPtr (Addr {x} _) (Addr {y} _)) => (ConstBool [x == y]) +(EqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _))) => (ConstBool [x == y && o == 0]) +(EqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _))) => (ConstBool [x == y && o1 == o2]) +(NeqPtr (Addr {x} _) (Addr {y} _)) => (ConstBool [x != y]) +(NeqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _))) => (ConstBool [x != y || o != 0]) +(NeqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _))) => (ConstBool [x != y || o1 != o2]) +(EqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _)) => (ConstBool [x == y]) +(EqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _))) => (ConstBool [x == y && o == 0]) +(EqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _))) => (ConstBool [x == y && o1 == o2]) +(NeqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _)) => (ConstBool [x != y]) +(NeqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _))) => (ConstBool [x != y || o != 0]) +(NeqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _))) => (ConstBool [x != y || o1 != o2]) (EqPtr (OffPtr [o1] p1) p2) && isSamePtr(p1, p2) => (ConstBool [o1 == 0]) (NeqPtr (OffPtr [o1] p1) p2) && isSamePtr(p1, p2) => (ConstBool [o1 != 0]) (EqPtr (OffPtr [o1] p1) (OffPtr [o2] p2)) && isSamePtr(p1, p2) => (ConstBool [o1 == o2]) @@ -2085,18 +2057,22 @@ // Inline small or disjoint runtime.memmove calls with constant length. // See the comment in op Move in genericOps.go for discussion of the type. -(StaticCall {sym} s1:(Store _ (Const(64|32) [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))) + +// Because expand calls runs after prove, constants useful to this pattern may not appear. +// Both versions need to exist; the memory and register variants. +// +// Match post-expansion calls, memory version. +(SelectN [0] call:(StaticCall {sym} s1:(Store _ (Const(64|32) [sz]) s2:(Store _ src s3:(Store {t} _ dst mem))))) && sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() // avoids TUINTPTR, see issue 30061 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) - && clobber(s1, s2, s3) + && clobber(s1, s2, s3, call) => (Move {t.Elem()} [int64(sz)] dst src mem) -// Inline small or disjoint runtime.memmove calls with constant length. -// See the comment in op Move in genericOps.go for discussion of the type. -(SelectN [0] call:(StaticLECall {sym} dst src (Const(64|32) [sz]) mem)) +// Match post-expansion calls, register version. +(SelectN [0] call:(StaticCall {sym} dst src (Const(64|32) [sz]) mem)) && sz >= 0 && call.Uses == 1 // this will exclude all calls with results && isSameCall(sym, "runtime.memmove") @@ -2105,12 +2081,15 @@ && clobber(call) => (Move {dst.Type.Elem()} [int64(sz)] dst src mem) -// De-virtualize interface calls into static calls. -// Note that (ITab (IMake)) doesn't get -// rewritten until after the first opt pass, -// so this rule should trigger reliably. -(InterCall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) mem) && devirt(v, auxCall, itab, off) != nil => - (StaticCall [int32(argsize)] {devirt(v, auxCall, itab, off)} mem) +// Match pre-expansion calls. +(SelectN [0] call:(StaticLECall {sym} dst src (Const(64|32) [sz]) mem)) + && sz >= 0 + && call.Uses == 1 // this will exclude all calls with results + && isSameCall(sym, "runtime.memmove") + && dst.Type.IsPtr() // avoids TUINTPTR, see issue 30061 + && isInlinableMemmove(dst, src, int64(sz), config) + && clobber(call) + => (Move {dst.Type.Elem()} [int64(sz)] dst src mem) // De-virtualize late-expanded interface calls into late-expanded static calls. // Note that (ITab (IMake)) doesn't get rewritten until after the first opt pass, @@ -2499,8 +2478,8 @@ (Store {t5} (OffPtr [o5] dst) d4 (Zero {t1} [n] dst mem))))) -// TODO this does not fire before call expansion; is that acceptable? -(StaticCall {sym} x) && needRaceCleanup(sym, v) => x +(SelectN [0] call:(StaticLECall {sym} a x)) && needRaceCleanup(sym, call) && clobber(call) => x +(SelectN [0] call:(StaticLECall {sym} x)) && needRaceCleanup(sym, call) && clobber(call) => x // Collapse moving A -> B -> C into just A -> C. // Later passes (deadstore, elim unread auto) will remove the A -> B move, if possible. diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go index 8cfda35c225..9f6664386c9 100644 --- a/src/cmd/compile/internal/ssa/gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/gen/genericOps.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build ignore // +build ignore package main @@ -257,13 +258,14 @@ var genericOps = []opData{ {name: "RotateLeft32", argLength: 2}, // Rotate bits in arg[0] left by arg[1] {name: "RotateLeft64", argLength: 2}, // Rotate bits in arg[0] left by arg[1] - // Square root, float64 only. + // Square root. // Special cases: // +∞ → +∞ // ±0 → ±0 (sign preserved) // x<0 → NaN // NaN → NaN - {name: "Sqrt", argLength: 1}, // √arg0 + {name: "Sqrt", argLength: 1}, // √arg0 (floating point, double precision) + {name: "Sqrt32", argLength: 1}, // √arg0 (floating point, single precision) // Round to integer, float64 only. // Special cases: @@ -332,6 +334,11 @@ var genericOps = []opData{ {name: "InitMem", zeroWidth: true}, // memory input to the function. {name: "Arg", aux: "SymOff", symEffect: "Read", zeroWidth: true}, // argument to the function. aux=GCNode of arg, off = offset in that arg. + // Like Arg, these are generic ops that survive lowering. AuxInt is a register index, and the actual output register for each index is defined by the architecture. + // AuxInt = integer argument index (not a register number). ABI-specified spill loc obtained from function + {name: "ArgIntReg", aux: "NameOffsetInt8", zeroWidth: true}, // argument to the function in an int reg. + {name: "ArgFloatReg", aux: "NameOffsetInt8", zeroWidth: true}, // argument to the function in a float reg. + // The address of a variable. arg0 is the base pointer. // If the variable is a global, the base pointer will be SB and // the Aux field will be a *obj.LSym. @@ -389,9 +396,28 @@ var genericOps = []opData{ // TODO(josharian): ClosureCall and InterCall should have Int32 aux // to match StaticCall's 32 bit arg size limit. // TODO(drchase,josharian): could the arg size limit be bundled into the rules for CallOff? - {name: "ClosureCall", argLength: 3, aux: "CallOff", call: true}, // arg0=code pointer, arg1=context ptr, arg2=memory. auxint=arg size. Returns memory. - {name: "StaticCall", argLength: 1, aux: "CallOff", call: true}, // call function aux.(*obj.LSym), arg0=memory. auxint=arg size. Returns memory. - {name: "InterCall", argLength: 2, aux: "CallOff", call: true}, // interface call. arg0=code pointer, arg1=memory, auxint=arg size. Returns memory. + + // Before lowering, LECalls receive their fixed inputs (first), memory (last), + // and a variable number of input values in the middle. + // They produce a variable number of result values. + // These values are not necessarily "SSA-able"; they can be too large, + // but in that case inputs are loaded immediately before with OpDereference, + // and outputs are stored immediately with OpStore. + // + // After call expansion, Calls have the same fixed-middle-memory arrangement of inputs, + // with the difference that the "middle" is only the register-resident inputs, + // and the non-register inputs are instead stored at ABI-defined offsets from SP + // (and the stores thread through the memory that is ultimately an input to the call). + // Outputs follow a similar pattern; register-resident outputs are the leading elements + // of a Result-typed output, with memory last, and any memory-resident outputs have been + // stored to ABI-defined locations. Each non-memory input or output fits in a register. + // + // Subsequent architecture-specific lowering only changes the opcode. + + {name: "ClosureCall", argLength: -1, aux: "CallOff", call: true}, // arg0=code pointer, arg1=context ptr, arg2..argN-1 are register inputs, argN=memory. auxint=arg size. Returns Result of register results, plus memory. + {name: "StaticCall", argLength: -1, aux: "CallOff", call: true}, // call function aux.(*obj.LSym), arg0..argN-1 are register inputs, argN=memory. auxint=arg size. Returns Result of register results, plus memory. + {name: "InterCall", argLength: -1, aux: "CallOff", call: true}, // interface call. arg0=code pointer, arg1..argN-1 are register inputs, argN=memory, auxint=arg size. Returns Result of register results, plus memory. + {name: "ClosureLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded closure call. arg0=code pointer, arg1=context ptr, arg2..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem. {name: "StaticLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded static call function aux.(*ssa.AuxCall.Fn). arg0..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem. {name: "InterLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded interface call. arg0=code pointer, arg1..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem. @@ -453,6 +479,10 @@ var genericOps = []opData{ {name: "SlicePtr", argLength: 1, typ: "BytePtr"}, // ptr(arg0) {name: "SliceLen", argLength: 1}, // len(arg0) {name: "SliceCap", argLength: 1}, // cap(arg0) + // SlicePtrUnchecked, like SlicePtr, extracts the pointer from a slice. + // SlicePtr values are assumed non-nil, because they are guarded by bounds checks. + // SlicePtrUnchecked values can be nil. + {name: "SlicePtrUnchecked", argLength: 1}, // Complex (part/whole) {name: "ComplexMake", argLength: 2}, // arg0=real, arg1=imag @@ -587,6 +617,7 @@ var genericOps = []opData{ // Clobber experiment op {name: "Clobber", argLength: 0, typ: "Void", aux: "SymOff", symEffect: "None"}, // write an invalid pointer value to the given pointer slot of a stack variable + {name: "ClobberReg", argLength: 0, typ: "Void"}, // clobber a register } // kind controls successors implicit exit diff --git a/src/cmd/compile/internal/ssa/gen/main.go b/src/cmd/compile/internal/ssa/gen/main.go index dfa146a28ae..8e5997b25a4 100644 --- a/src/cmd/compile/internal/ssa/gen/main.go +++ b/src/cmd/compile/internal/ssa/gen/main.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build ignore // +build ignore // The gen command generates Go code (in the parent directory) for all @@ -30,21 +31,23 @@ import ( // apart from type names, and avoid awkward func parameters like "arch arch". type arch struct { - name string - pkg string // obj package to import for this arch. - genfile string // source file containing opcode code generation. - ops []opData - blocks []blockData - regnames []string - gpregmask regMask - fpregmask regMask - fp32regmask regMask - fp64regmask regMask - specialregmask regMask - framepointerreg int8 - linkreg int8 - generic bool - imports []string + name string + pkg string // obj package to import for this arch. + genfile string // source file containing opcode code generation. + ops []opData + blocks []blockData + regnames []string + ParamIntRegNames string + ParamFloatRegNames string + gpregmask regMask + fpregmask regMask + fp32regmask regMask + fp64regmask regMask + specialregmask regMask + framepointerreg int8 + linkreg int8 + generic bool + imports []string } type opData struct { @@ -63,7 +66,6 @@ type opData struct { nilCheck bool // this op is a nil check on arg0 faultOnNilArg0 bool // this op will fault if arg0 is nil (and aux encodes a small offset) faultOnNilArg1 bool // this op will fault if arg1 is nil (and aux encodes a small offset) - usesScratch bool // this op requires scratch memory space hasSideEffects bool // for "reasons", not to be eliminated. E.g., atomic store, #19182. zeroWidth bool // op never translates into any machine code. example: copy, which may sometimes translate to machine code, is not zero-width. unsafePoint bool // this op is an unsafe point, i.e. not safe for async preemption @@ -320,9 +322,6 @@ func genOp() { log.Fatalf("faultOnNilArg1 with aux %s not allowed", v.aux) } } - if v.usesScratch { - fmt.Fprintln(w, "usesScratch: true,") - } if v.hasSideEffects { fmt.Fprintln(w, "hasSideEffects: true,") } @@ -404,12 +403,11 @@ func genOp() { // generate op string method fmt.Fprintln(w, "func (o Op) String() string {return opcodeTable[o].name }") - fmt.Fprintln(w, "func (o Op) UsesScratch() bool { return opcodeTable[o].usesScratch }") - fmt.Fprintln(w, "func (o Op) SymEffect() SymEffect { return opcodeTable[o].symEffect }") fmt.Fprintln(w, "func (o Op) IsCall() bool { return opcodeTable[o].call }") fmt.Fprintln(w, "func (o Op) HasSideEffects() bool { return opcodeTable[o].hasSideEffects }") fmt.Fprintln(w, "func (o Op) UnsafePoint() bool { return opcodeTable[o].unsafePoint }") + fmt.Fprintln(w, "func (o Op) ResultInArg0() bool { return opcodeTable[o].resultInArg0 }") // generate registers for _, a := range archs { @@ -418,7 +416,9 @@ func genOp() { } fmt.Fprintf(w, "var registers%s = [...]Register {\n", a.name) var gcRegN int + num := map[string]int8{} for i, r := range a.regnames { + num[r] = int8(i) pkg := a.pkg[len("cmd/internal/obj/"):] var objname string // name in cmd/internal/obj/$ARCH switch r { @@ -441,11 +441,38 @@ func genOp() { } fmt.Fprintf(w, " {%d, %s, %d, \"%s\"},\n", i, objname, gcRegIdx, r) } + parameterRegisterList := func(paramNamesString string) []int8 { + paramNamesString = strings.TrimSpace(paramNamesString) + if paramNamesString == "" { + return nil + } + paramNames := strings.Split(paramNamesString, " ") + var paramRegs []int8 + for _, regName := range paramNames { + if regName == "" { + // forgive extra spaces + continue + } + if regNum, ok := num[regName]; ok { + paramRegs = append(paramRegs, regNum) + delete(num, regName) + } else { + log.Fatalf("parameter register %s for architecture %s not a register name (or repeated in parameter list)", regName, a.name) + } + } + return paramRegs + } + + paramIntRegs := parameterRegisterList(a.ParamIntRegNames) + paramFloatRegs := parameterRegisterList(a.ParamFloatRegNames) + if gcRegN > 32 { // Won't fit in a uint32 mask. log.Fatalf("too many GC registers (%d > 32) on %s", gcRegN, a.name) } fmt.Fprintln(w, "}") + fmt.Fprintf(w, "var paramIntReg%s = %#v\n", a.name, paramIntRegs) + fmt.Fprintf(w, "var paramFloatReg%s = %#v\n", a.name, paramFloatRegs) fmt.Fprintf(w, "var gpRegMask%s = regMask(%d)\n", a.name, a.gpregmask) fmt.Fprintf(w, "var fpRegMask%s = regMask(%d)\n", a.name, a.fpregmask) if a.fp32regmask != 0 { diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go index 6388aab3621..fe8db4ed1f2 100644 --- a/src/cmd/compile/internal/ssa/gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/gen/rulegen.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gen // +build gen // This program generates Go code that applies rewrite rules to a Value. @@ -194,7 +195,9 @@ func genRulesSuffix(arch arch, suff string) { swc.add(stmtf("return rewriteValue%s%s_%s(v)", arch.name, suff, op)) sw.add(swc) } - fn.add(sw) + if len(sw.List) > 0 { // skip if empty + fn.add(sw) + } fn.add(stmtf("return false")) genFile.add(fn) @@ -215,10 +218,10 @@ func genRulesSuffix(arch arch, suff string) { Suffix: fmt.Sprintf("_%s", op), ArgLen: opByName(arch, op).argLength, } - fn.add(declf("b", "v.Block")) - fn.add(declf("config", "b.Func.Config")) - fn.add(declf("fe", "b.Func.fe")) - fn.add(declf("typ", "&b.Func.Config.Types")) + fn.add(declReserved("b", "v.Block")) + fn.add(declReserved("config", "b.Func.Config")) + fn.add(declReserved("fe", "b.Func.fe")) + fn.add(declReserved("typ", "&b.Func.Config.Types")) for _, rule := range rules { if rr != nil && !rr.CanFail { log.Fatalf("unconditional rule %s is followed by other rules", rr.Match) @@ -247,8 +250,8 @@ func genRulesSuffix(arch arch, suff string) { // Generate block rewrite function. There are only a few block types // so we can make this one function with a switch. fn = &Func{Kind: "Block"} - fn.add(declf("config", "b.Func.Config")) - fn.add(declf("typ", "&b.Func.Config.Types")) + fn.add(declReserved("config", "b.Func.Config")) + fn.add(declReserved("typ", "&b.Func.Config.Types")) sw = &Switch{Expr: exprf("b.Kind")} ops = ops[:0] @@ -264,7 +267,9 @@ func genRulesSuffix(arch arch, suff string) { } sw.add(swc) } - fn.add(sw) + if len(sw.List) > 0 { // skip if empty + fn.add(sw) + } fn.add(stmtf("return false")) genFile.add(fn) @@ -579,9 +584,9 @@ func fprint(w io.Writer, n Node) { fmt.Fprintf(w, "\npackage ssa\n") for _, path := range append([]string{ "fmt", + "internal/buildcfg", "math", "cmd/internal/obj", - "cmd/internal/objabi", "cmd/compile/internal/base", "cmd/compile/internal/types", }, n.Arch.imports...) { @@ -823,12 +828,36 @@ func stmtf(format string, a ...interface{}) Statement { return file.Decls[0].(*ast.FuncDecl).Body.List[0] } -// declf constructs a simple "name := value" declaration, using exprf for its -// value. -func declf(name, format string, a ...interface{}) *Declare { +var reservedNames = map[string]bool{ + "v": true, // Values[i], etc + "b": true, // v.Block + "config": true, // b.Func.Config + "fe": true, // b.Func.fe + "typ": true, // &b.Func.Config.Types +} + +// declf constructs a simple "name := value" declaration, +// using exprf for its value. +// +// name must not be one of reservedNames. +// This helps prevent unintended shadowing and name clashes. +// To declare a reserved name, use declReserved. +func declf(loc, name, format string, a ...interface{}) *Declare { + if reservedNames[name] { + log.Fatalf("rule %s uses the reserved name %s", loc, name) + } return &Declare{name, exprf(format, a...)} } +// declReserved is like declf, but the name must be one of reservedNames. +// Calls to declReserved should generally be static and top-level. +func declReserved(name, value string) *Declare { + if !reservedNames[name] { + panic(fmt.Sprintf("declReserved call does not use a reserved name: %q", name)) + } + return &Declare{name, exprf(value)} +} + // breakf constructs a simple "if cond { break }" statement, using exprf for its // condition. func breakf(format string, a ...interface{}) *CondBreak { @@ -853,7 +882,7 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite { if vname == "" { vname = fmt.Sprintf("v_%v", i) } - rr.add(declf(vname, cname)) + rr.add(declf(rr.Loc, vname, cname)) p, op := genMatch0(rr, arch, expr, vname, nil, false) // TODO: pass non-nil cnt? if op != "" { check := fmt.Sprintf("%s.Op == %s", cname, op) @@ -868,7 +897,7 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite { } pos[i] = p } else { - rr.add(declf(arg, cname)) + rr.add(declf(rr.Loc, arg, cname)) pos[i] = arg + ".Pos" } } @@ -888,7 +917,7 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite { if !token.IsIdentifier(e.name) || rr.declared(e.name) { rr.add(breakf("%sTo%s(b.%s) != %s", unTitle(e.field), title(e.dclType), e.field, e.name)) } else { - rr.add(declf(e.name, "%sTo%s(b.%s)", unTitle(e.field), title(e.dclType), e.field)) + rr.add(declf(rr.Loc, e.name, "%sTo%s(b.%s)", unTitle(e.field), title(e.dclType), e.field)) } } if rr.Cond != "" { @@ -1038,11 +1067,11 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int, } else { switch e.field { case "Aux": - rr.add(declf(e.name, "auxTo%s(%s.%s)", title(e.dclType), v, e.field)) + rr.add(declf(rr.Loc, e.name, "auxTo%s(%s.%s)", title(e.dclType), v, e.field)) case "AuxInt": - rr.add(declf(e.name, "auxIntTo%s(%s.%s)", title(e.dclType), v, e.field)) + rr.add(declf(rr.Loc, e.name, "auxIntTo%s(%s.%s)", title(e.dclType), v, e.field)) case "Type": - rr.add(declf(e.name, "%s.%s", v, e.field)) + rr.add(declf(rr.Loc, e.name, "%s.%s", v, e.field)) } } } @@ -1072,7 +1101,7 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int, continue } if !rr.declared(a) && token.IsIdentifier(a) && !(commutative && len(args) == 2) { - rr.add(declf(a, "%s.Args[%d]", v, n)) + rr.add(declf(rr.Loc, a, "%s.Args[%d]", v, n)) // delete the last argument so it is not reprocessed args = args[:n] } else { @@ -1084,7 +1113,7 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int, if commutative && !pregenTop { for i := 0; i <= 1; i++ { vname := fmt.Sprintf("%s_%d", v, i) - rr.add(declf(vname, "%s.Args[%d]", v, i)) + rr.add(declf(rr.Loc, vname, "%s.Args[%d]", v, i)) } } if commutative { @@ -1111,7 +1140,7 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int, rr.add(breakf("%s != %s", arg, rhs)) } else { if arg != rhs { - rr.add(declf(arg, "%s", rhs)) + rr.add(declf(rr.Loc, arg, "%s", rhs)) } } continue @@ -1126,7 +1155,7 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int, } if argname != rhs { - rr.add(declf(argname, "%s", rhs)) + rr.add(declf(rr.Loc, argname, "%s", rhs)) } bexpr := exprf("%s.Op != addLater", argname) rr.add(&CondBreak{Cond: bexpr}) @@ -1203,7 +1232,7 @@ func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos s v = resname } rr.Alloc++ - rr.add(declf(v, "b.NewValue0(%s, Op%s%s, %s)", pos, oparch, op.name, typ)) + rr.add(declf(rr.Loc, v, "b.NewValue0(%s, Op%s%s, %s)", pos, oparch, op.name, typ)) if move && top { // Rewrite original into a copy rr.add(stmtf("v.copyOf(%s)", v)) diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go index c06b5808e1c..4d191199fba 100644 --- a/src/cmd/compile/internal/ssa/html.go +++ b/src/cmd/compile/internal/ssa/html.go @@ -1064,7 +1064,7 @@ func (f *Func) HTML(phase string, dot *dotWriter) string { p := htmlFuncPrinter{w: buf} fprintFunc(p, f) - // fprintFunc(&buf, f) // TODO: HTML, not text,
    for line breaks, etc. + // fprintFunc(&buf, f) // TODO: HTML, not text,
    for line breaks, etc. fmt.Fprint(buf, "") return buf.String() } diff --git a/src/cmd/compile/internal/ssa/layout.go b/src/cmd/compile/internal/ssa/layout.go index 30b7b97d040..6abdb0d0c92 100644 --- a/src/cmd/compile/internal/ssa/layout.go +++ b/src/cmd/compile/internal/ssa/layout.go @@ -12,26 +12,10 @@ func layout(f *Func) { } // Register allocation may use a different order which has constraints -// imposed by the linear-scan algorithm. Note that f.pass here is -// regalloc, so the switch is conditional on -d=ssa/regalloc/test=N +// imposed by the linear-scan algorithm. func layoutRegallocOrder(f *Func) []*Block { - - switch f.pass.test { - case 0: // layout order - return layoutOrder(f) - case 1: // existing block order - return f.Blocks - case 2: // reverse of postorder; legal, but usually not good. - po := f.postorder() - visitOrder := make([]*Block, len(po)) - for i, b := range po { - j := len(po) - i - 1 - visitOrder[j] = b - } - return visitOrder - } - - return nil + // remnant of an experiment; perhaps there will be another. + return layoutOrder(f) } func layoutOrder(f *Func) []*Block { @@ -41,8 +25,13 @@ func layoutOrder(f *Func) []*Block { indegree := make([]int, f.NumBlocks()) posdegree := f.newSparseSet(f.NumBlocks()) // blocks with positive remaining degree defer f.retSparseSet(posdegree) - zerodegree := f.newSparseSet(f.NumBlocks()) // blocks with zero remaining degree - defer f.retSparseSet(zerodegree) + // blocks with zero remaining degree. Use slice to simulate a LIFO queue to implement + // the depth-first topology sorting algorithm. + var zerodegree []ID + // LIFO queue. Track the successor blocks of the scheduled block so that when we + // encounter loops, we choose to schedule the successor block of the most recently + // scheduled block. + var succs []ID exit := f.newSparseSet(f.NumBlocks()) // exit blocks defer f.retSparseSet(exit) @@ -88,7 +77,8 @@ func layoutOrder(f *Func) []*Block { } indegree[b.ID] = len(b.Preds) if len(b.Preds) == 0 { - zerodegree.add(b.ID) + // Push an element to the tail of the queue. + zerodegree = append(zerodegree, b.ID) } else { posdegree.add(b.ID) } @@ -105,12 +95,24 @@ blockloop: break } - for _, e := range b.Succs { - c := e.b + // Here, the order of traversing the b.Succs affects the direction in which the topological + // sort advances in depth. Take the following cfg as an example, regardless of other factors. + // b1 + // 0/ \1 + // b2 b3 + // Traverse b.Succs in order, the right child node b3 will be scheduled immediately after + // b1, traverse b.Succs in reverse order, the left child node b2 will be scheduled + // immediately after b1. The test results show that reverse traversal performs a little + // better. + // Note: You need to consider both layout and register allocation when testing performance. + for i := len(b.Succs) - 1; i >= 0; i-- { + c := b.Succs[i].b indegree[c.ID]-- if indegree[c.ID] == 0 { posdegree.remove(c.ID) - zerodegree.add(c.ID) + zerodegree = append(zerodegree, c.ID) + } else { + succs = append(succs, c.ID) } } @@ -132,30 +134,30 @@ blockloop: // Use degree for now. bid = 0 - mindegree := f.NumBlocks() - for _, e := range order[len(order)-1].Succs { - c := e.b - if scheduled[c.ID] || c.Kind == BlockExit { - continue - } - if indegree[c.ID] < mindegree { - mindegree = indegree[c.ID] - bid = c.ID - } - } - if bid != 0 { - continue - } // TODO: improve this part // No successor of the previously scheduled block works. // Pick a zero-degree block if we can. - for zerodegree.size() > 0 { - cid := zerodegree.pop() + for len(zerodegree) > 0 { + // Pop an element from the tail of the queue. + cid := zerodegree[len(zerodegree)-1] + zerodegree = zerodegree[:len(zerodegree)-1] if !scheduled[cid] { bid = cid continue blockloop } } + + // Still nothing, pick the unscheduled successor block encountered most recently. + for len(succs) > 0 { + // Pop an element from the tail of the queue. + cid := succs[len(succs)-1] + succs = succs[:len(succs)-1] + if !scheduled[cid] { + bid = cid + continue blockloop + } + } + // Still nothing, pick any non-exit block. for posdegree.size() > 0 { cid := posdegree.pop() diff --git a/src/cmd/compile/internal/ssa/lca.go b/src/cmd/compile/internal/ssa/lca.go index 5cb73911dfa..90daebe44f7 100644 --- a/src/cmd/compile/internal/ssa/lca.go +++ b/src/cmd/compile/internal/ssa/lca.go @@ -4,6 +4,10 @@ package ssa +import ( + "math/bits" +) + // Code to compute lowest common ancestors in the dominator tree. // https://en.wikipedia.org/wiki/Lowest_common_ancestor // https://en.wikipedia.org/wiki/Range_minimum_query#Solution_using_constant_time_and_linearithmic_space @@ -79,7 +83,7 @@ func makeLCArange(f *Func) *lcaRange { } // Compute fast range-minimum query data structure - var rangeMin [][]ID + rangeMin := make([][]ID, 0, bits.Len64(uint64(len(tour)))) rangeMin = append(rangeMin, tour) // 1-size windows are just the tour itself. for logS, s := 1, 2; s < len(tour); logS, s = logS+1, s*2 { r := make([]ID, len(tour)-s+1) diff --git a/src/cmd/compile/internal/ssa/likelyadjust.go b/src/cmd/compile/internal/ssa/likelyadjust.go index 49898a1322b..f462bf29a64 100644 --- a/src/cmd/compile/internal/ssa/likelyadjust.go +++ b/src/cmd/compile/internal/ssa/likelyadjust.go @@ -222,6 +222,7 @@ func likelyadjust(f *Func) { if opcodeTable[v.Op].call { local[b.ID] = blCALL certain[b.ID] = max8(blCALL, certain[b.Succs[0].b.ID]) + break } } } diff --git a/src/cmd/compile/internal/ssa/location.go b/src/cmd/compile/internal/ssa/location.go index 4cd0ac8d777..252c47cdebc 100644 --- a/src/cmd/compile/internal/ssa/location.go +++ b/src/cmd/compile/internal/ssa/location.go @@ -88,28 +88,22 @@ func (t LocPair) String() string { return fmt.Sprintf("<%s,%s>", n0, n1) } -type ArgPair struct { - reg *Register - mem LocalSlot -} +type LocResults []Location -func (ap *ArgPair) Reg() int16 { - return ap.reg.objNum -} - -func (ap *ArgPair) Type() *types.Type { - return ap.mem.Type -} - -func (ap *ArgPair) Mem() *LocalSlot { - return &ap.mem -} - -func (t ArgPair) String() string { - n0 := "nil" - if t.reg != nil { - n0 = t.reg.String() +func (t LocResults) String() string { + s := "<" + a := "" + for _, r := range t { + a += s + s = "," + a += r.String() } - n1 := t.mem.String() - return fmt.Sprintf("<%s,%s>", n0, n1) + a += ">" + return a +} + +type Spill struct { + Type *types.Type + Offset int64 + Reg int16 } diff --git a/src/cmd/compile/internal/ssa/loopreschedchecks.go b/src/cmd/compile/internal/ssa/loopreschedchecks.go index 9c73bcff262..738c62607ad 100644 --- a/src/cmd/compile/internal/ssa/loopreschedchecks.go +++ b/src/cmd/compile/internal/ssa/loopreschedchecks.go @@ -246,7 +246,8 @@ func insertLoopReschedChecks(f *Func) { // mem1 := call resched (mem0) // goto header resched := f.fe.Syslook("goschedguarded") - mem1 := sched.NewValue1A(bb.Pos, OpStaticCall, types.TypeMem, StaticAuxCall(resched, nil, nil), mem0) + // TODO(register args) -- will need more details + mem1 := sched.NewValue1A(bb.Pos, OpStaticCall, types.TypeMem, StaticAuxCall(resched, nil), mem0) sched.AddEdgeTo(h) headerMemPhi.AddArg(mem1) diff --git a/src/cmd/compile/internal/ssa/looprotate.go b/src/cmd/compile/internal/ssa/looprotate.go index 2e5e421df7f..35010a78d8e 100644 --- a/src/cmd/compile/internal/ssa/looprotate.go +++ b/src/cmd/compile/internal/ssa/looprotate.go @@ -68,12 +68,15 @@ func loopRotate(f *Func) { if nextb == p { // original loop predecessor is next break } - if loopnest.b2l[nextb.ID] != loop { // about to leave loop - break + if loopnest.b2l[nextb.ID] == loop { + after[p.ID] = append(after[p.ID], nextb) } - after[p.ID] = append(after[p.ID], nextb) b = nextb } + // Swap b and p so that we'll handle p before b when moving blocks. + f.Blocks[idToIdx[loop.header.ID]] = p + f.Blocks[idToIdx[p.ID]] = loop.header + idToIdx[loop.header.ID], idToIdx[p.ID] = idToIdx[p.ID], idToIdx[loop.header.ID] // Place b after p. for _, b := range after[p.ID] { @@ -86,21 +89,23 @@ func loopRotate(f *Func) { // before the rest of the loop. And that relies on the // fact that we only identify reducible loops. j := 0 - for i, b := range f.Blocks { + // Some blocks that are not part of a loop may be placed + // between loop blocks. In order to avoid these blocks from + // being overwritten, use a temporary slice. + newOrder := make([]*Block, 0, f.NumBlocks()) + for _, b := range f.Blocks { if _, ok := move[b.ID]; ok { continue } - f.Blocks[j] = b + newOrder = append(newOrder, b) j++ for _, a := range after[b.ID] { - if j > i { - f.Fatalf("head before tail in loop %s", b) - } - f.Blocks[j] = a + newOrder = append(newOrder, a) j++ } } if j != len(f.Blocks) { f.Fatalf("bad reordering in looprotate") } + f.Blocks = newOrder } diff --git a/src/cmd/compile/internal/ssa/lower.go b/src/cmd/compile/internal/ssa/lower.go index f332b2e028e..5760c356015 100644 --- a/src/cmd/compile/internal/ssa/lower.go +++ b/src/cmd/compile/internal/ssa/lower.go @@ -21,8 +21,12 @@ func checkLower(f *Func) { continue // lowered } switch v.Op { - case OpSP, OpSB, OpInitMem, OpArg, OpPhi, OpVarDef, OpVarKill, OpVarLive, OpKeepAlive, OpSelect0, OpSelect1, OpConvert, OpInlMark: + case OpSP, OpSB, OpInitMem, OpArg, OpArgIntReg, OpArgFloatReg, OpPhi, OpVarDef, OpVarKill, OpVarLive, OpKeepAlive, OpSelect0, OpSelect1, OpSelectN, OpConvert, OpInlMark: continue // ok not to lower + case OpMakeResult: + if b.Controls[0] == v { + continue + } case OpGetG: if f.Config.hasGReg { // has hardware g register, regalloc takes care of it @@ -30,6 +34,7 @@ func checkLower(f *Func) { } } s := "not lowered: " + v.String() + ", " + v.Op.String() + " " + v.Type.SimpleString() + for _, a := range v.Args { s += " " + a.Type.SimpleString() } diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go index bae50657c9a..14f511a5f1c 100644 --- a/src/cmd/compile/internal/ssa/nilcheck.go +++ b/src/cmd/compile/internal/ssa/nilcheck.go @@ -6,8 +6,8 @@ package ssa import ( "cmd/compile/internal/ir" - "cmd/internal/objabi" "cmd/internal/src" + "internal/buildcfg" ) // nilcheckelim eliminates unnecessary nil checks. @@ -192,7 +192,7 @@ func nilcheckelim(f *Func) { const minZeroPage = 4096 // faultOnLoad is true if a load to an address below minZeroPage will trigger a SIGSEGV. -var faultOnLoad = objabi.GOOS != "aix" +var faultOnLoad = buildcfg.GOOS != "aix" // nilcheckelim2 eliminates unnecessary nil checks. // Runs after lowering and scheduling. diff --git a/src/cmd/compile/internal/ssa/numberlines.go b/src/cmd/compile/internal/ssa/numberlines.go index 2a9c8e4f326..9d6aeca9c0d 100644 --- a/src/cmd/compile/internal/ssa/numberlines.go +++ b/src/cmd/compile/internal/ssa/numberlines.go @@ -16,7 +16,8 @@ func isPoorStatementOp(op Op) bool { // so that a debugger-user sees the stop before the panic, and can examine the value. case OpAddr, OpLocalAddr, OpOffPtr, OpStructSelect, OpPhi, OpITab, OpIData, OpIMake, OpStringMake, OpSliceMake, OpStructMake0, OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4, - OpConstBool, OpConst8, OpConst16, OpConst32, OpConst64, OpConst32F, OpConst64F: + OpConstBool, OpConst8, OpConst16, OpConst32, OpConst64, OpConst32F, OpConst64F, OpSB, OpSP, + OpArgIntReg, OpArgFloatReg: return true } return false @@ -61,7 +62,7 @@ func nextGoodStatementIndex(v *Value, i int, b *Block) int { // statement boundary. func notStmtBoundary(op Op) bool { switch op { - case OpCopy, OpPhi, OpVarKill, OpVarDef, OpVarLive, OpUnknown, OpFwdRef, OpArg: + case OpCopy, OpPhi, OpVarKill, OpVarDef, OpVarLive, OpUnknown, OpFwdRef, OpArg, OpArgIntReg, OpArgFloatReg: return true } return false diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go index f41d014d413..f09a08abcf7 100644 --- a/src/cmd/compile/internal/ssa/op.go +++ b/src/cmd/compile/internal/ssa/op.go @@ -5,10 +5,12 @@ package ssa import ( + "cmd/compile/internal/abi" "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/obj" "fmt" + "strings" ) // An Op encodes the specific operation that a Value performs. @@ -67,51 +69,187 @@ type regInfo struct { outputs []outputInfo } +func (r *regInfo) String() string { + s := "" + s += "INS:\n" + for _, i := range r.inputs { + mask := fmt.Sprintf("%64b", i.regs) + mask = strings.Replace(mask, "0", ".", -1) + s += fmt.Sprintf("%2d |%s|\n", i.idx, mask) + } + s += "OUTS:\n" + for _, i := range r.outputs { + mask := fmt.Sprintf("%64b", i.regs) + mask = strings.Replace(mask, "0", ".", -1) + s += fmt.Sprintf("%2d |%s|\n", i.idx, mask) + } + s += "CLOBBERS:\n" + mask := fmt.Sprintf("%64b", r.clobbers) + mask = strings.Replace(mask, "0", ".", -1) + s += fmt.Sprintf(" |%s|\n", mask) + return s +} + type auxType int8 -type Param struct { - Type *types.Type - Offset int32 // Offset of Param if not in a register. - Name *ir.Name // For OwnAux, need to prepend stores with Vardefs +type AuxNameOffset struct { + Name *ir.Name + Offset int64 +} + +func (a *AuxNameOffset) CanBeAnSSAAux() {} +func (a *AuxNameOffset) String() string { + return fmt.Sprintf("%s+%d", a.Name.Sym().Name, a.Offset) } type AuxCall struct { Fn *obj.LSym - args []Param // Includes receiver for method calls. Does NOT include hidden closure pointer. - results []Param + reg *regInfo // regInfo for this call + abiInfo *abi.ABIParamResultInfo } -// ResultForOffset returns the index of the result at a particular offset among the results -// This does not include the mem result for the call opcode. -func (a *AuxCall) ResultForOffset(offset int64) int64 { - which := int64(-1) - for i := int64(0); i < a.NResults(); i++ { // note aux NResults does not include mem result. - if a.OffsetOfResult(i) == offset { - which = i - break +// Reg returns the regInfo for a given call, combining the derived in/out register masks +// with the machine-specific register information in the input i. (The machine-specific +// regInfo is much handier at the call site than it is when the AuxCall is being constructed, +// therefore do this lazily). +// +// TODO: there is a Clever Hack that allows pre-generation of a small-ish number of the slices +// of inputInfo and outputInfo used here, provided that we are willing to reorder the inputs +// and outputs from calls, so that all integer registers come first, then all floating registers. +// At this point (active development of register ABI) that is very premature, +// but if this turns out to be a cost, we could do it. +func (a *AuxCall) Reg(i *regInfo, c *Config) *regInfo { + if a.reg.clobbers != 0 { + // Already updated + return a.reg + } + if a.abiInfo.InRegistersUsed()+a.abiInfo.OutRegistersUsed() == 0 { + // Shortcut for zero case, also handles old ABI. + a.reg = i + return a.reg + } + + k := len(i.inputs) + for _, p := range a.abiInfo.InParams() { + for _, r := range p.Registers { + m := archRegForAbiReg(r, c) + a.reg.inputs = append(a.reg.inputs, inputInfo{idx: k, regs: (1 << m)}) + k++ } } - return which + a.reg.inputs = append(a.reg.inputs, i.inputs...) // These are less constrained, thus should come last + k = len(i.outputs) + for _, p := range a.abiInfo.OutParams() { + for _, r := range p.Registers { + m := archRegForAbiReg(r, c) + a.reg.outputs = append(a.reg.outputs, outputInfo{idx: k, regs: (1 << m)}) + k++ + } + } + a.reg.outputs = append(a.reg.outputs, i.outputs...) + a.reg.clobbers = i.clobbers + return a.reg +} +func (a *AuxCall) ABI() *abi.ABIConfig { + return a.abiInfo.Config() +} +func (a *AuxCall) ABIInfo() *abi.ABIParamResultInfo { + return a.abiInfo +} +func (a *AuxCall) ResultReg(c *Config) *regInfo { + if a.abiInfo.OutRegistersUsed() == 0 { + return a.reg + } + if len(a.reg.inputs) > 0 { + return a.reg + } + k := 0 + for _, p := range a.abiInfo.OutParams() { + for _, r := range p.Registers { + m := archRegForAbiReg(r, c) + a.reg.inputs = append(a.reg.inputs, inputInfo{idx: k, regs: (1 << m)}) + k++ + } + } + return a.reg +} + +// For ABI register index r, returns the (dense) register number used in +// SSA backend. +func archRegForAbiReg(r abi.RegIndex, c *Config) uint8 { + var m int8 + if int(r) < len(c.intParamRegs) { + m = c.intParamRegs[r] + } else { + m = c.floatParamRegs[int(r)-len(c.intParamRegs)] + } + return uint8(m) +} + +// For ABI register index r, returns the register number used in the obj +// package (assembler). +func ObjRegForAbiReg(r abi.RegIndex, c *Config) int16 { + m := archRegForAbiReg(r, c) + return c.registers[m].objNum +} + +// ArgWidth returns the amount of stack needed for all the inputs +// and outputs of a function or method, including ABI-defined parameter +// slots and ABI-defined spill slots for register-resident parameters. +// +// The name is taken from the types package's ArgWidth(), +// which predated changes to the ABI; this version handles those changes. +func (a *AuxCall) ArgWidth() int64 { + return a.abiInfo.ArgWidth() +} + +// ParamAssignmentForResult returns the ABI Parameter assignment for result which (indexed 0, 1, etc). +func (a *AuxCall) ParamAssignmentForResult(which int64) *abi.ABIParamAssignment { + return a.abiInfo.OutParam(int(which)) } // OffsetOfResult returns the SP offset of result which (indexed 0, 1, etc). func (a *AuxCall) OffsetOfResult(which int64) int64 { - return int64(a.results[which].Offset) + n := int64(a.abiInfo.OutParam(int(which)).Offset()) + return n } // OffsetOfArg returns the SP offset of argument which (indexed 0, 1, etc). +// If the call is to a method, the receiver is the first argument (i.e., index 0) func (a *AuxCall) OffsetOfArg(which int64) int64 { - return int64(a.args[which].Offset) + n := int64(a.abiInfo.InParam(int(which)).Offset()) + return n +} + +// RegsOfResult returns the register(s) used for result which (indexed 0, 1, etc). +func (a *AuxCall) RegsOfResult(which int64) []abi.RegIndex { + return a.abiInfo.OutParam(int(which)).Registers +} + +// RegsOfArg returns the register(s) used for argument which (indexed 0, 1, etc). +// If the call is to a method, the receiver is the first argument (i.e., index 0) +func (a *AuxCall) RegsOfArg(which int64) []abi.RegIndex { + return a.abiInfo.InParam(int(which)).Registers +} + +// NameOfResult returns the type of result which (indexed 0, 1, etc). +func (a *AuxCall) NameOfResult(which int64) *ir.Name { + name := a.abiInfo.OutParam(int(which)).Name + if name == nil { + return nil + } + return name.(*ir.Name) } // TypeOfResult returns the type of result which (indexed 0, 1, etc). func (a *AuxCall) TypeOfResult(which int64) *types.Type { - return a.results[which].Type + return a.abiInfo.OutParam(int(which)).Type } // TypeOfArg returns the type of argument which (indexed 0, 1, etc). +// If the call is to a method, the receiver is the first argument (i.e., index 0) func (a *AuxCall) TypeOfArg(which int64) *types.Type { - return a.args[which].Type + return a.abiInfo.InParam(int(which)).Type } // SizeOfResult returns the size of result which (indexed 0, 1, etc). @@ -120,13 +258,14 @@ func (a *AuxCall) SizeOfResult(which int64) int64 { } // SizeOfArg returns the size of argument which (indexed 0, 1, etc). +// If the call is to a method, the receiver is the first argument (i.e., index 0) func (a *AuxCall) SizeOfArg(which int64) int64 { return a.TypeOfArg(which).Width } // NResults returns the number of results func (a *AuxCall) NResults() int64 { - return int64(len(a.results)) + return int64(len(a.abiInfo.OutParams())) } // LateExpansionResultType returns the result type (including trailing mem) @@ -140,15 +279,12 @@ func (a *AuxCall) LateExpansionResultType() *types.Type { return types.NewResults(tys) } -// NArgs returns the number of arguments +// NArgs returns the number of arguments (including receiver, if there is one). func (a *AuxCall) NArgs() int64 { - return int64(len(a.args)) + return int64(len(a.abiInfo.InParams())) } -// String returns -// "AuxCall{()}" if len(results) == 0; -// "AuxCall{()}" if len(results) == 1; -// "AuxCall{()()}" otherwise. +// String returns "AuxCall{}" func (a *AuxCall) String() string { var fn string if a.Fn == nil { @@ -156,78 +292,75 @@ func (a *AuxCall) String() string { } else { fn = fmt.Sprintf("AuxCall{%v", a.Fn) } - - if len(a.args) == 0 { - fn += "()" - } else { - s := "(" - for _, arg := range a.args { - fn += fmt.Sprintf("%s[%v,%v]", s, arg.Type, arg.Offset) - s = "," - } - fn += ")" - } - - if len(a.results) > 0 { // usual is zero or one; only some RT calls have more than one. - if len(a.results) == 1 { - fn += fmt.Sprintf("[%v,%v]", a.results[0].Type, a.results[0].Offset) - } else { - s := "(" - for _, result := range a.results { - fn += fmt.Sprintf("%s[%v,%v]", s, result.Type, result.Offset) - s = "," - } - fn += ")" - } - } + // TODO how much of the ABI should be printed? return fn + "}" } // StaticAuxCall returns an AuxCall for a static call. -func StaticAuxCall(sym *obj.LSym, args []Param, results []Param) *AuxCall { - return &AuxCall{Fn: sym, args: args, results: results} +func StaticAuxCall(sym *obj.LSym, paramResultInfo *abi.ABIParamResultInfo) *AuxCall { + if paramResultInfo == nil { + panic(fmt.Errorf("Nil paramResultInfo, sym=%v", sym)) + } + var reg *regInfo + if paramResultInfo.InRegistersUsed()+paramResultInfo.OutRegistersUsed() > 0 { + reg = ®Info{} + } + return &AuxCall{Fn: sym, abiInfo: paramResultInfo, reg: reg} } // InterfaceAuxCall returns an AuxCall for an interface call. -func InterfaceAuxCall(args []Param, results []Param) *AuxCall { - return &AuxCall{Fn: nil, args: args, results: results} +func InterfaceAuxCall(paramResultInfo *abi.ABIParamResultInfo) *AuxCall { + var reg *regInfo + if paramResultInfo.InRegistersUsed()+paramResultInfo.OutRegistersUsed() > 0 { + reg = ®Info{} + } + return &AuxCall{Fn: nil, abiInfo: paramResultInfo, reg: reg} } // ClosureAuxCall returns an AuxCall for a closure call. -func ClosureAuxCall(args []Param, results []Param) *AuxCall { - return &AuxCall{Fn: nil, args: args, results: results} +func ClosureAuxCall(paramResultInfo *abi.ABIParamResultInfo) *AuxCall { + var reg *regInfo + if paramResultInfo.InRegistersUsed()+paramResultInfo.OutRegistersUsed() > 0 { + reg = ®Info{} + } + return &AuxCall{Fn: nil, abiInfo: paramResultInfo, reg: reg} } func (*AuxCall) CanBeAnSSAAux() {} // OwnAuxCall returns a function's own AuxCall -func OwnAuxCall(fn *obj.LSym, args []Param, results []Param) *AuxCall { +func OwnAuxCall(fn *obj.LSym, paramResultInfo *abi.ABIParamResultInfo) *AuxCall { // TODO if this remains identical to ClosureAuxCall above after new ABI is done, should deduplicate. - return &AuxCall{Fn: fn, args: args, results: results} + var reg *regInfo + if paramResultInfo.InRegistersUsed()+paramResultInfo.OutRegistersUsed() > 0 { + reg = ®Info{} + } + return &AuxCall{Fn: fn, abiInfo: paramResultInfo, reg: reg} } const ( - auxNone auxType = iota - auxBool // auxInt is 0/1 for false/true - auxInt8 // auxInt is an 8-bit integer - auxInt16 // auxInt is a 16-bit integer - auxInt32 // auxInt is a 32-bit integer - auxInt64 // auxInt is a 64-bit integer - auxInt128 // auxInt represents a 128-bit integer. Always 0. - auxUInt8 // auxInt is an 8-bit unsigned integer - auxFloat32 // auxInt is a float32 (encoded with math.Float64bits) - auxFloat64 // auxInt is a float64 (encoded with math.Float64bits) - auxFlagConstant // auxInt is a flagConstant - auxString // aux is a string - auxSym // aux is a symbol (a *gc.Node for locals, an *obj.LSym for globals, or nil for none) - auxSymOff // aux is a symbol, auxInt is an offset - auxSymValAndOff // aux is a symbol, auxInt is a ValAndOff - auxTyp // aux is a type - auxTypSize // aux is a type, auxInt is a size, must have Aux.(Type).Size() == AuxInt - auxCCop // aux is a ssa.Op that represents a flags-to-bool conversion (e.g. LessThan) - auxCall // aux is a *ssa.AuxCall - auxCallOff // aux is a *ssa.AuxCall, AuxInt is int64 param (in+out) size + auxNone auxType = iota + auxBool // auxInt is 0/1 for false/true + auxInt8 // auxInt is an 8-bit integer + auxInt16 // auxInt is a 16-bit integer + auxInt32 // auxInt is a 32-bit integer + auxInt64 // auxInt is a 64-bit integer + auxInt128 // auxInt represents a 128-bit integer. Always 0. + auxUInt8 // auxInt is an 8-bit unsigned integer + auxFloat32 // auxInt is a float32 (encoded with math.Float64bits) + auxFloat64 // auxInt is a float64 (encoded with math.Float64bits) + auxFlagConstant // auxInt is a flagConstant + auxNameOffsetInt8 // aux is a &struct{Name ir.Name, Offset int64}; auxInt is index in parameter registers array + auxString // aux is a string + auxSym // aux is a symbol (a *gc.Node for locals, an *obj.LSym for globals, or nil for none) + auxSymOff // aux is a symbol, auxInt is an offset + auxSymValAndOff // aux is a symbol, auxInt is a ValAndOff + auxTyp // aux is a type + auxTypSize // aux is a type, auxInt is a size, must have Aux.(Type).Size() == AuxInt + auxCCop // aux is a ssa.Op that represents a flags-to-bool conversion (e.g. LessThan) + auxCall // aux is a *ssa.AuxCall + auxCallOff // aux is a *ssa.AuxCall, AuxInt is int64 param (in+out) size // architecture specific aux types auxARM64BitField // aux is an arm64 bitfield lsb and width packed into auxInt @@ -269,13 +402,13 @@ type Sym interface { // The low 32 bits hold a pointer offset. type ValAndOff int64 -func (x ValAndOff) Val() int64 { return int64(x) >> 32 } -func (x ValAndOff) Val32() int32 { return int32(int64(x) >> 32) } +func (x ValAndOff) Val() int32 { return int32(int64(x) >> 32) } +func (x ValAndOff) Val64() int64 { return int64(x) >> 32 } func (x ValAndOff) Val16() int16 { return int16(int64(x) >> 32) } func (x ValAndOff) Val8() int8 { return int8(int64(x) >> 32) } -func (x ValAndOff) Off() int64 { return int64(int32(x)) } -func (x ValAndOff) Off32() int32 { return int32(x) } +func (x ValAndOff) Off64() int64 { return int64(int32(x)) } +func (x ValAndOff) Off() int32 { return int32(x) } func (x ValAndOff) String() string { return fmt.Sprintf("val=%d,off=%d", x.Val(), x.Off()) @@ -287,40 +420,16 @@ func validVal(val int64) bool { return val == int64(int32(val)) } -// validOff reports whether the offset can be used -// as an argument to makeValAndOff. -func validOff(off int64) bool { - return off == int64(int32(off)) -} - -// validValAndOff reports whether we can fit the value and offset into -// a ValAndOff value. -func validValAndOff(val, off int64) bool { - if !validVal(val) { - return false - } - if !validOff(off) { - return false - } - return true -} - -func makeValAndOff32(val, off int32) ValAndOff { +func makeValAndOff(val, off int32) ValAndOff { return ValAndOff(int64(val)<<32 + int64(uint32(off))) } -func makeValAndOff64(val, off int64) ValAndOff { - if !validValAndOff(val, off) { - panic("invalid makeValAndOff64") - } - return ValAndOff(val<<32 + int64(uint32(off))) -} func (x ValAndOff) canAdd32(off int32) bool { - newoff := x.Off() + int64(off) + newoff := x.Off64() + int64(off) return newoff == int64(int32(newoff)) } func (x ValAndOff) canAdd64(off int64) bool { - newoff := x.Off() + off + newoff := x.Off64() + off return newoff == int64(int32(newoff)) } @@ -328,13 +437,13 @@ func (x ValAndOff) addOffset32(off int32) ValAndOff { if !x.canAdd32(off) { panic("invalid ValAndOff.addOffset32") } - return makeValAndOff64(x.Val(), x.Off()+int64(off)) + return makeValAndOff(x.Val(), x.Off()+off) } func (x ValAndOff) addOffset64(off int64) ValAndOff { if !x.canAdd64(off) { panic("invalid ValAndOff.addOffset64") } - return makeValAndOff64(x.Val(), x.Off()+off) + return makeValAndOff(x.Val(), x.Off()+int32(off)) } // int128 is a type that stores a 128-bit constant. @@ -360,6 +469,7 @@ const ( BoundsSlice3BU // ... with unsigned high BoundsSlice3C // 3-arg slicing operation, 0 <= low <= high failed BoundsSlice3CU // ... with unsigned low + BoundsConvert // conversion to array pointer failed BoundsKindCount ) @@ -387,7 +497,8 @@ func boundsABI(b int64) int { case BoundsSlice3Alen, BoundsSlice3AlenU, BoundsSlice3Acap, - BoundsSlice3AcapU: + BoundsSlice3AcapU, + BoundsConvert: return 0 case BoundsSliceAlen, BoundsSliceAlenU, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index ccfed93475d..1c37fbe0db4 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -432,6 +432,7 @@ const ( Op386BSRW Op386BSWAPL Op386SQRTSD + Op386SQRTSS Op386SBBLcarrymask Op386SETEQ Op386SETNE @@ -691,18 +692,6 @@ const ( OpAMD64BTRQconst OpAMD64BTSLconst OpAMD64BTSQconst - OpAMD64BTCQmodify - OpAMD64BTCLmodify - OpAMD64BTSQmodify - OpAMD64BTSLmodify - OpAMD64BTRQmodify - OpAMD64BTRLmodify - OpAMD64BTCQconstmodify - OpAMD64BTCLconstmodify - OpAMD64BTSQconstmodify - OpAMD64BTSLconstmodify - OpAMD64BTRQconstmodify - OpAMD64BTRLconstmodify OpAMD64TESTQ OpAMD64TESTL OpAMD64TESTW @@ -731,6 +720,8 @@ const ( OpAMD64SARLconst OpAMD64SARWconst OpAMD64SARBconst + OpAMD64SHRDQ + OpAMD64SHLDQ OpAMD64ROLQ OpAMD64ROLL OpAMD64ROLW @@ -888,6 +879,7 @@ const ( OpAMD64POPCNTQ OpAMD64POPCNTL OpAMD64SQRTSD + OpAMD64SQRTSS OpAMD64ROUNDSD OpAMD64VFMADD231SD OpAMD64SBBQcarrymask @@ -1090,6 +1082,7 @@ const ( OpARMNEGF OpARMNEGD OpARMSQRTD + OpARMSQRTF OpARMABSD OpARMCLZ OpARMREV @@ -1358,8 +1351,10 @@ const ( OpARM64FNEGS OpARM64FNEGD OpARM64FSQRTD + OpARM64FSQRTS OpARM64REV OpARM64REVW + OpARM64REV16 OpARM64REV16W OpARM64RBIT OpARM64RBITW @@ -1481,6 +1476,8 @@ const ( OpARM64MOVWloadidx4 OpARM64MOVWUloadidx4 OpARM64MOVDloadidx8 + OpARM64FMOVSloadidx4 + OpARM64FMOVDloadidx8 OpARM64MOVBstore OpARM64MOVHstore OpARM64MOVWstore @@ -1497,6 +1494,8 @@ const ( OpARM64MOVHstoreidx2 OpARM64MOVWstoreidx4 OpARM64MOVDstoreidx8 + OpARM64FMOVSstoreidx4 + OpARM64FMOVDstoreidx8 OpARM64MOVBstorezero OpARM64MOVHstorezero OpARM64MOVWstorezero @@ -1546,6 +1545,10 @@ const ( OpARM64FRINTZD OpARM64CSEL OpARM64CSEL0 + OpARM64CSINC + OpARM64CSINV + OpARM64CSNEG + OpARM64CSETM OpARM64CALLstatic OpARM64CALLclosure OpARM64CALLinter @@ -1637,6 +1640,7 @@ const ( OpMIPSNEGF OpMIPSNEGD OpMIPSSQRTD + OpMIPSSQRTF OpMIPSSLL OpMIPSSLLconst OpMIPSSRL @@ -1747,6 +1751,7 @@ const ( OpMIPS64NEGF OpMIPS64NEGD OpMIPS64SQRTD + OpMIPS64SQRTF OpMIPS64SLLV OpMIPS64SLLVconst OpMIPS64SRLV @@ -2073,9 +2078,6 @@ const ( OpRISCV64REMW OpRISCV64REMUW OpRISCV64MOVaddr - OpRISCV64MOVBconst - OpRISCV64MOVHconst - OpRISCV64MOVWconst OpRISCV64MOVDconst OpRISCV64MOVBload OpRISCV64MOVHload @@ -2139,6 +2141,8 @@ const ( OpRISCV64LoweredAtomicAdd64 OpRISCV64LoweredAtomicCas32 OpRISCV64LoweredAtomicCas64 + OpRISCV64LoweredAtomicAnd32 + OpRISCV64LoweredAtomicOr32 OpRISCV64LoweredNilCheck OpRISCV64LoweredGetClosurePtr OpRISCV64LoweredGetCallerSP @@ -2297,6 +2301,7 @@ const ( OpS390XNOT OpS390XNOTW OpS390XFSQRT + OpS390XFSQRTS OpS390XLOCGR OpS390XMOVBreg OpS390XMOVBZreg @@ -2723,6 +2728,7 @@ const ( OpRotateLeft32 OpRotateLeft64 OpSqrt + OpSqrt32 OpFloor OpCeil OpTrunc @@ -2747,6 +2753,8 @@ const ( OpConstSlice OpInitMem OpArg + OpArgIntReg + OpArgFloatReg OpAddr OpLocalAddr OpSP @@ -2814,6 +2822,7 @@ const ( OpSlicePtr OpSliceLen OpSliceCap + OpSlicePtrUnchecked OpComplexMake OpComplexReal OpComplexImag @@ -2902,6 +2911,7 @@ const ( OpAtomicOr8Variant OpAtomicOr32Variant OpClobber + OpClobberReg ) var opcodeTable = [...]opInfo{ @@ -2912,7 +2922,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, commutative: true, resultInArg0: true, - usesScratch: true, asm: x86.AADDSS, reg: regInfo{ inputs: []inputInfo{ @@ -2944,7 +2953,6 @@ var opcodeTable = [...]opInfo{ name: "SUBSS", argLen: 2, resultInArg0: true, - usesScratch: true, asm: x86.ASUBSS, reg: regInfo{ inputs: []inputInfo{ @@ -2976,7 +2984,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, commutative: true, resultInArg0: true, - usesScratch: true, asm: x86.AMULSS, reg: regInfo{ inputs: []inputInfo{ @@ -3008,7 +3015,6 @@ var opcodeTable = [...]opInfo{ name: "DIVSS", argLen: 2, resultInArg0: true, - usesScratch: true, asm: x86.ADIVSS, reg: regInfo{ inputs: []inputInfo{ @@ -4072,10 +4078,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "UCOMISS", - argLen: 2, - usesScratch: true, - asm: x86.AUCOMISS, + name: "UCOMISS", + argLen: 2, + asm: x86.AUCOMISS, reg: regInfo{ inputs: []inputInfo{ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 @@ -4084,10 +4089,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "UCOMISD", - argLen: 2, - usesScratch: true, - asm: x86.AUCOMISD, + name: "UCOMISD", + argLen: 2, + asm: x86.AUCOMISD, reg: regInfo{ inputs: []inputInfo{ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 @@ -4778,6 +4782,19 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "SQRTSS", + argLen: 1, + asm: x86.ASQRTSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, { name: "SBBLcarrymask", argLen: 1, @@ -5027,10 +5044,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CVTTSD2SL", - argLen: 1, - usesScratch: true, - asm: x86.ACVTTSD2SL, + name: "CVTTSD2SL", + argLen: 1, + asm: x86.ACVTTSD2SL, reg: regInfo{ inputs: []inputInfo{ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 @@ -5041,10 +5057,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CVTTSS2SL", - argLen: 1, - usesScratch: true, - asm: x86.ACVTTSS2SL, + name: "CVTTSS2SL", + argLen: 1, + asm: x86.ACVTTSS2SL, reg: regInfo{ inputs: []inputInfo{ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 @@ -5055,10 +5070,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CVTSL2SS", - argLen: 1, - usesScratch: true, - asm: x86.ACVTSL2SS, + name: "CVTSL2SS", + argLen: 1, + asm: x86.ACVTSL2SS, reg: regInfo{ inputs: []inputInfo{ {0, 239}, // AX CX DX BX BP SI DI @@ -5069,10 +5083,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CVTSL2SD", - argLen: 1, - usesScratch: true, - asm: x86.ACVTSL2SD, + name: "CVTSL2SD", + argLen: 1, + asm: x86.ACVTSL2SD, reg: regInfo{ inputs: []inputInfo{ {0, 239}, // AX CX DX BX BP SI DI @@ -5083,10 +5096,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CVTSD2SS", - argLen: 1, - usesScratch: true, - asm: x86.ACVTSD2SS, + name: "CVTSD2SS", + argLen: 1, + asm: x86.ACVTSD2SS, reg: regInfo{ inputs: []inputInfo{ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 @@ -8497,180 +8509,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "BTCQmodify", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymRead | SymWrite, - asm: x86.ABTCQ, - reg: regInfo{ - inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB - }, - }, - }, - { - name: "BTCLmodify", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymRead | SymWrite, - asm: x86.ABTCL, - reg: regInfo{ - inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB - }, - }, - }, - { - name: "BTSQmodify", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymRead | SymWrite, - asm: x86.ABTSQ, - reg: regInfo{ - inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB - }, - }, - }, - { - name: "BTSLmodify", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymRead | SymWrite, - asm: x86.ABTSL, - reg: regInfo{ - inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB - }, - }, - }, - { - name: "BTRQmodify", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymRead | SymWrite, - asm: x86.ABTRQ, - reg: regInfo{ - inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB - }, - }, - }, - { - name: "BTRLmodify", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymRead | SymWrite, - asm: x86.ABTRL, - reg: regInfo{ - inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB - }, - }, - }, - { - name: "BTCQconstmodify", - auxType: auxSymValAndOff, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymRead | SymWrite, - asm: x86.ABTCQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB - }, - }, - }, - { - name: "BTCLconstmodify", - auxType: auxSymValAndOff, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymRead | SymWrite, - asm: x86.ABTCL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB - }, - }, - }, - { - name: "BTSQconstmodify", - auxType: auxSymValAndOff, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymRead | SymWrite, - asm: x86.ABTSQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB - }, - }, - }, - { - name: "BTSLconstmodify", - auxType: auxSymValAndOff, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymRead | SymWrite, - asm: x86.ABTSL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB - }, - }, - }, - { - name: "BTRQconstmodify", - auxType: auxSymValAndOff, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymRead | SymWrite, - asm: x86.ABTRQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB - }, - }, - }, - { - name: "BTRLconstmodify", - auxType: auxSymValAndOff, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymRead | SymWrite, - asm: x86.ABTRL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB - }, - }, - }, { name: "TESTQ", argLen: 2, @@ -9083,6 +8921,40 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "SHRDQ", + argLen: 3, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHRQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHLDQ", + argLen: 3, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, { name: "ROLQ", argLen: 2, @@ -11635,6 +11507,19 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "SQRTSS", + argLen: 1, + asm: x86.ASQRTSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "ROUNDSD", auxType: auxInt8, @@ -13198,7 +13083,7 @@ var opcodeTable = [...]opInfo{ { name: "CALLstatic", auxType: auxCallOff, - argLen: 1, + argLen: -1, clobberFlags: true, call: true, reg: regInfo{ @@ -13208,7 +13093,7 @@ var opcodeTable = [...]opInfo{ { name: "CALLclosure", auxType: auxCallOff, - argLen: 3, + argLen: -1, clobberFlags: true, call: true, reg: regInfo{ @@ -13222,7 +13107,7 @@ var opcodeTable = [...]opInfo{ { name: "CALLinter", auxType: auxCallOff, - argLen: 2, + argLen: -1, clobberFlags: true, call: true, reg: regInfo{ @@ -13808,7 +13693,7 @@ var opcodeTable = [...]opInfo{ {0, 2}, // R1 {1, 1}, // R0 }, - clobbers: 16396, // R2 R3 R14 + clobbers: 20492, // R2 R3 R12 R14 outputs: []outputInfo{ {0, 1}, // R0 {1, 2}, // R1 @@ -14429,6 +14314,19 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "SQRTF", + argLen: 1, + asm: arm.ASQRTF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, { name: "ABSD", argLen: 1, @@ -17144,7 +17042,7 @@ var opcodeTable = [...]opInfo{ {0, 2}, // R1 {1, 1}, // R0 }, - clobbers: 16386, // R1 R14 + clobbers: 20482, // R1 R12 R14 }, }, { @@ -17158,7 +17056,7 @@ var opcodeTable = [...]opInfo{ {0, 4}, // R2 {1, 2}, // R1 }, - clobbers: 16391, // R0 R1 R2 R14 + clobbers: 20487, // R0 R1 R2 R12 R14 }, }, { @@ -17319,7 +17217,7 @@ var opcodeTable = [...]opInfo{ {0, 4}, // R2 {1, 8}, // R3 }, - clobbers: 4294918144, // R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + clobbers: 4294922240, // R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, @@ -18091,6 +17989,19 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "FSQRTS", + argLen: 1, + asm: arm64.AFSQRTS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, { name: "REV", argLen: 1, @@ -18117,6 +18028,19 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "REV16", + argLen: 1, + asm: arm64.AREV16, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, { name: "REV16W", argLen: 1, @@ -19796,6 +19720,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "FMOVSloadidx4", + argLen: 3, + asm: arm64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDloadidx8", + argLen: 3, + asm: arm64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, { name: "MOVBstore", auxType: auxSymOff, @@ -20003,6 +19955,30 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "FMOVSstoreidx4", + argLen: 4, + asm: arm64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDstoreidx8", + argLen: 4, + asm: arm64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, { name: "MOVBstorezero", auxType: auxSymOff, @@ -20629,6 +20605,62 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "CSINC", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSINC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "CSINV", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSINV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "CSNEG", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSNEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "CSETM", + auxType: auxCCop, + argLen: 1, + asm: arm64.ACSETM, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, { name: "CALLstatic", auxType: auxCallOff, @@ -20849,7 +20881,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {0, 1048576}, // R20 }, - clobbers: 537919488, // R20 R30 + clobbers: 538116096, // R16 R17 R20 R30 }, }, { @@ -20877,7 +20909,7 @@ var opcodeTable = [...]opInfo{ {0, 2097152}, // R21 {1, 1048576}, // R20 }, - clobbers: 607125504, // R20 R21 R26 R30 + clobbers: 607322112, // R16 R17 R20 R21 R26 R30 }, }, { @@ -21374,7 +21406,7 @@ var opcodeTable = [...]opInfo{ {0, 4}, // R2 {1, 8}, // R3 }, - clobbers: 9223372035244163072, // R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + clobbers: 9223372035244359680, // R16 R17 R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { @@ -21832,6 +21864,19 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "SQRTF", + argLen: 1, + asm: mips.ASQRTF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, { name: "SLL", argLen: 2, @@ -23311,6 +23356,19 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "SQRTF", + argLen: 1, + asm: mips.ASQRTF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, { name: "SLLV", argLen: 2, @@ -27673,42 +27731,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "MOVBconst", - auxType: auxInt8, - argLen: 0, - rematerializeable: true, - asm: riscv.AMOV, - reg: regInfo{ - outputs: []outputInfo{ - {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - }, - }, - { - name: "MOVHconst", - auxType: auxInt16, - argLen: 0, - rematerializeable: true, - asm: riscv.AMOV, - reg: regInfo{ - outputs: []outputInfo{ - {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - }, - }, - { - name: "MOVWconst", - auxType: auxInt32, - argLen: 0, - rematerializeable: true, - asm: riscv.AMOV, - reg: regInfo{ - outputs: []outputInfo{ - {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - }, - }, { name: "MOVDconst", auxType: auxInt64, @@ -28589,6 +28611,32 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "LoweredAtomicAnd32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: riscv.AAMOANDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + }, + }, + { + name: "LoweredAtomicOr32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: riscv.AAMOORW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + }, + }, { name: "LoweredNilCheck", argLen: 2, @@ -30895,6 +30943,19 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "FSQRTS", + argLen: 1, + asm: s390x.AFSQRTS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, { name: "LOCGR", auxType: auxS390XCCMask, @@ -33829,10 +33890,10 @@ var opcodeTable = [...]opInfo{ asm: wasm.AF32Sqrt, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, @@ -33842,10 +33903,10 @@ var opcodeTable = [...]opInfo{ asm: wasm.AF32Trunc, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, @@ -33855,10 +33916,10 @@ var opcodeTable = [...]opInfo{ asm: wasm.AF32Ceil, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, @@ -33868,10 +33929,10 @@ var opcodeTable = [...]opInfo{ asm: wasm.AF32Floor, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, @@ -33881,10 +33942,10 @@ var opcodeTable = [...]opInfo{ asm: wasm.AF32Nearest, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, @@ -33894,10 +33955,10 @@ var opcodeTable = [...]opInfo{ asm: wasm.AF32Abs, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, @@ -33907,11 +33968,11 @@ var opcodeTable = [...]opInfo{ asm: wasm.AF32Copysign, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, @@ -35129,6 +35190,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Sqrt32", + argLen: 1, + generic: true, + }, { name: "Floor", argLen: 1, @@ -35264,6 +35330,20 @@ var opcodeTable = [...]opInfo{ symEffect: SymRead, generic: true, }, + { + name: "ArgIntReg", + auxType: auxNameOffsetInt8, + argLen: 0, + zeroWidth: true, + generic: true, + }, + { + name: "ArgFloatReg", + auxType: auxNameOffsetInt8, + argLen: 0, + zeroWidth: true, + generic: true, + }, { name: "Addr", auxType: auxSym, @@ -35367,21 +35447,21 @@ var opcodeTable = [...]opInfo{ { name: "ClosureCall", auxType: auxCallOff, - argLen: 3, + argLen: -1, call: true, generic: true, }, { name: "StaticCall", auxType: auxCallOff, - argLen: 1, + argLen: -1, call: true, generic: true, }, { name: "InterCall", auxType: auxCallOff, - argLen: 2, + argLen: -1, call: true, generic: true, }, @@ -35633,6 +35713,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SlicePtrUnchecked", + argLen: 1, + generic: true, + }, { name: "ComplexMake", argLen: 2, @@ -36123,16 +36208,21 @@ var opcodeTable = [...]opInfo{ symEffect: SymNone, generic: true, }, + { + name: "ClobberReg", + argLen: 0, + generic: true, + }, } func (o Op) Asm() obj.As { return opcodeTable[o].asm } func (o Op) Scale() int16 { return int16(opcodeTable[o].scale) } func (o Op) String() string { return opcodeTable[o].name } -func (o Op) UsesScratch() bool { return opcodeTable[o].usesScratch } func (o Op) SymEffect() SymEffect { return opcodeTable[o].symEffect } func (o Op) IsCall() bool { return opcodeTable[o].call } func (o Op) HasSideEffects() bool { return opcodeTable[o].hasSideEffects } func (o Op) UnsafePoint() bool { return opcodeTable[o].unsafePoint } +func (o Op) ResultInArg0() bool { return opcodeTable[o].resultInArg0 } var registers386 = [...]Register{ {0, x86.REG_AX, 0, "AX"}, @@ -36153,6 +36243,8 @@ var registers386 = [...]Register{ {15, x86.REG_X7, -1, "X7"}, {16, 0, -1, "SB"}, } +var paramIntReg386 = []int8(nil) +var paramFloatReg386 = []int8(nil) var gpRegMask386 = regMask(239) var fpRegMask386 = regMask(65280) var specialRegMask386 = regMask(0) @@ -36193,6 +36285,8 @@ var registersAMD64 = [...]Register{ {31, x86.REG_X15, -1, "X15"}, {32, 0, -1, "SB"}, } +var paramIntRegAMD64 = []int8{0, 3, 1, 7, 6, 8, 9, 10, 11} +var paramFloatRegAMD64 = []int8{16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30} var gpRegMaskAMD64 = regMask(49135) var fpRegMaskAMD64 = regMask(2147418112) var specialRegMaskAMD64 = regMask(2147483648) @@ -36233,6 +36327,8 @@ var registersARM = [...]Register{ {31, arm.REG_F15, -1, "F15"}, {32, 0, -1, "SB"}, } +var paramIntRegARM = []int8(nil) +var paramFloatRegARM = []int8(nil) var gpRegMaskARM = regMask(21503) var fpRegMaskARM = regMask(4294901760) var specialRegMaskARM = regMask(0) @@ -36304,6 +36400,8 @@ var registersARM64 = [...]Register{ {62, arm64.REG_F31, -1, "F31"}, {63, 0, -1, "SB"}, } +var paramIntRegARM64 = []int8(nil) +var paramFloatRegARM64 = []int8(nil) var gpRegMaskARM64 = regMask(670826495) var fpRegMaskARM64 = regMask(9223372034707292160) var specialRegMaskARM64 = regMask(0) @@ -36359,6 +36457,8 @@ var registersMIPS = [...]Register{ {46, mips.REG_LO, -1, "LO"}, {47, 0, -1, "SB"}, } +var paramIntRegMIPS = []int8(nil) +var paramFloatRegMIPS = []int8(nil) var gpRegMaskMIPS = regMask(335544318) var fpRegMaskMIPS = regMask(35183835217920) var specialRegMaskMIPS = regMask(105553116266496) @@ -36429,6 +36529,8 @@ var registersMIPS64 = [...]Register{ {61, mips.REG_LO, -1, "LO"}, {62, 0, -1, "SB"}, } +var paramIntRegMIPS64 = []int8(nil) +var paramFloatRegMIPS64 = []int8(nil) var gpRegMaskMIPS64 = regMask(167772158) var fpRegMaskMIPS64 = regMask(1152921504338411520) var specialRegMaskMIPS64 = regMask(3458764513820540928) @@ -36500,6 +36602,8 @@ var registersPPC64 = [...]Register{ {62, ppc64.REG_F30, -1, "F30"}, {63, ppc64.REG_F31, -1, "F31"}, } +var paramIntRegPPC64 = []int8(nil) +var paramFloatRegPPC64 = []int8(nil) var gpRegMaskPPC64 = regMask(1073733624) var fpRegMaskPPC64 = regMask(576460743713488896) var specialRegMaskPPC64 = regMask(0) @@ -36571,6 +36675,8 @@ var registersRISCV64 = [...]Register{ {62, riscv.REG_F31, -1, "F31"}, {63, 0, -1, "SB"}, } +var paramIntRegRISCV64 = []int8(nil) +var paramFloatRegRISCV64 = []int8(nil) var gpRegMaskRISCV64 = regMask(1006632948) var fpRegMaskRISCV64 = regMask(9223372034707292160) var specialRegMaskRISCV64 = regMask(0) @@ -36611,6 +36717,8 @@ var registersS390X = [...]Register{ {31, s390x.REG_F15, -1, "F15"}, {32, 0, -1, "SB"}, } +var paramIntRegS390X = []int8(nil) +var paramFloatRegS390X = []int8(nil) var gpRegMaskS390X = regMask(23551) var fpRegMaskS390X = regMask(4294901760) var specialRegMaskS390X = regMask(0) @@ -36669,6 +36777,8 @@ var registersWasm = [...]Register{ {49, wasm.REGG, -1, "g"}, {50, 0, -1, "SB"}, } +var paramIntRegWasm = []int8(nil) +var paramFloatRegWasm = []int8(nil) var gpRegMaskWasm = regMask(65535) var fpRegMaskWasm = regMask(281474976645120) var fp32RegMaskWasm = regMask(4294901760) diff --git a/src/cmd/compile/internal/ssa/phiopt.go b/src/cmd/compile/internal/ssa/phiopt.go index db7b02275cf..745c61cb860 100644 --- a/src/cmd/compile/internal/ssa/phiopt.go +++ b/src/cmd/compile/internal/ssa/phiopt.go @@ -46,7 +46,6 @@ func phiopt(f *Func) { continue } // b0 is the if block giving the boolean value. - // reverse is the predecessor from which the truth value comes. var reverse int if b0.Succs[0].b == pb0 && b0.Succs[1].b == pb1 { @@ -120,6 +119,141 @@ func phiopt(f *Func) { } } } + // strengthen phi optimization. + // Main use case is to transform: + // x := false + // if c { + // x = true + // ... + // } + // into + // x := c + // if x { ... } + // + // For example, in SSA code a case appears as + // b0 + // If c -> b, sb0 + // sb0 + // If d -> sd0, sd1 + // sd1 + // ... + // sd0 + // Plain -> b + // b + // x = (OpPhi (ConstBool [true]) (ConstBool [false])) + // + // In this case we can also replace x with a copy of c. + // + // The optimization idea: + // 1. block b has a phi value x, x = OpPhi (ConstBool [true]) (ConstBool [false]), + // and len(b.Preds) is equal to 2. + // 2. find the common dominator(b0) of the predecessors(pb0, pb1) of block b, and the + // dominator(b0) is a If block. + // Special case: one of the predecessors(pb0 or pb1) is the dominator(b0). + // 3. the successors(sb0, sb1) of the dominator need to dominate the predecessors(pb0, pb1) + // of block b respectively. + // 4. replace this boolean Phi based on dominator block. + // + // b0(pb0) b0(pb1) b0 + // | \ / | / \ + // | sb1 sb0 | sb0 sb1 + // | ... ... | ... ... + // | pb1 pb0 | pb0 pb1 + // | / \ | \ / + // b b b + // + var lca *lcaRange + for _, b := range f.Blocks { + if len(b.Preds) != 2 || len(b.Values) == 0 { + // TODO: handle more than 2 predecessors, e.g. a || b || c. + continue + } + + for _, v := range b.Values { + // find a phi value v = OpPhi (ConstBool [true]) (ConstBool [false]). + // TODO: v = OpPhi (ConstBool [true]) (Arg {value}) + if v.Op != OpPhi { + continue + } + if v.Args[0].Op != OpConstBool || v.Args[1].Op != OpConstBool { + continue + } + if v.Args[0].AuxInt == v.Args[1].AuxInt { + continue + } + + pb0 := b.Preds[0].b + pb1 := b.Preds[1].b + if pb0.Kind == BlockIf && pb0 == sdom.Parent(b) { + // special case: pb0 is the dominator block b0. + // b0(pb0) + // | \ + // | sb1 + // | ... + // | pb1 + // | / + // b + // if another successor sb1 of b0(pb0) dominates pb1, do replace. + ei := b.Preds[0].i + sb1 := pb0.Succs[1-ei].b + if sdom.IsAncestorEq(sb1, pb1) { + convertPhi(pb0, v, ei) + break + } + } else if pb1.Kind == BlockIf && pb1 == sdom.Parent(b) { + // special case: pb1 is the dominator block b0. + // b0(pb1) + // / | + // sb0 | + // ... | + // pb0 | + // \ | + // b + // if another successor sb0 of b0(pb0) dominates pb0, do replace. + ei := b.Preds[1].i + sb0 := pb1.Succs[1-ei].b + if sdom.IsAncestorEq(sb0, pb0) { + convertPhi(pb1, v, 1-ei) + break + } + } else { + // b0 + // / \ + // sb0 sb1 + // ... ... + // pb0 pb1 + // \ / + // b + // + // Build data structure for fast least-common-ancestor queries. + if lca == nil { + lca = makeLCArange(f) + } + b0 := lca.find(pb0, pb1) + if b0.Kind != BlockIf { + break + } + sb0 := b0.Succs[0].b + sb1 := b0.Succs[1].b + var reverse int + if sdom.IsAncestorEq(sb0, pb0) && sdom.IsAncestorEq(sb1, pb1) { + reverse = 0 + } else if sdom.IsAncestorEq(sb1, pb0) && sdom.IsAncestorEq(sb0, pb1) { + reverse = 1 + } else { + break + } + if len(sb0.Preds) != 1 || len(sb1.Preds) != 1 { + // we can not replace phi value x in the following case. + // if gp == nil || sp < lo { x = true} + // if a || b { x = true } + // so the if statement can only have one condition. + break + } + convertPhi(b0, v, reverse) + } + } + } } func phioptint(v *Value, b0 *Block, reverse int) { @@ -174,3 +308,16 @@ func phioptint(v *Value, b0 *Block, reverse int) { f.Warnl(v.Block.Pos, "converted OpPhi bool -> int%d", v.Type.Size()*8) } } + +// b is the If block giving the boolean value. +// v is the phi value v = (OpPhi (ConstBool [true]) (ConstBool [false])). +// reverse is the predecessor from which the truth value comes. +func convertPhi(b *Block, v *Value, reverse int) { + f := b.Func + ops := [2]Op{OpNot, OpCopy} + v.reset(ops[v.Args[reverse].AuxInt]) + v.AddArg(b.Controls[0]) + if f.pass.debug > 0 { + f.Warnl(b.Pos, "converted OpPhi to %v", v.Op) + } +} diff --git a/src/cmd/compile/internal/ssa/poset.go b/src/cmd/compile/internal/ssa/poset.go index 1e04b48ba46..d2719eb8a1d 100644 --- a/src/cmd/compile/internal/ssa/poset.go +++ b/src/cmd/compile/internal/ssa/poset.go @@ -12,7 +12,7 @@ import ( // If true, check poset integrity after every mutation var debugPoset = false -const uintSize = 32 << (^uint(0) >> 32 & 1) // 32 or 64 +const uintSize = 32 << (^uint(0) >> 63) // 32 or 64 // bitset is a bit array for dense indexes. type bitset []uint diff --git a/src/cmd/compile/internal/ssa/print.go b/src/cmd/compile/internal/ssa/print.go index 36f09c3ad97..d917183c70f 100644 --- a/src/cmd/compile/internal/ssa/print.go +++ b/src/cmd/compile/internal/ssa/print.go @@ -154,6 +154,6 @@ func fprintFunc(p funcPrinter, f *Func) { p.endBlock(b) } for _, name := range f.Names { - p.named(name, f.NamedValues[name]) + p.named(*name, f.NamedValues[*name]) } } diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go index 8a2e7c09bc5..b203584c6b4 100644 --- a/src/cmd/compile/internal/ssa/prove.go +++ b/src/cmd/compile/internal/ssa/prove.go @@ -726,6 +726,20 @@ var ( } ) +// cleanup returns the posets to the free list +func (ft *factsTable) cleanup(f *Func) { + for _, po := range []*poset{ft.orderS, ft.orderU} { + // Make sure it's empty as it should be. A non-empty poset + // might cause errors and miscompilations if reused. + if checkEnabled { + if err := po.CheckEmpty(); err != nil { + f.Fatalf("poset not empty after function %s: %v", f.Name, err) + } + } + f.retPoset(po) + } +} + // prove removes redundant BlockIf branches that can be inferred // from previous dominating comparisons. // @@ -778,7 +792,14 @@ func prove(f *Func) { if ft.lens == nil { ft.lens = map[ID]*Value{} } - ft.lens[v.Args[0].ID] = v + // Set all len Values for the same slice as equal in the poset. + // The poset handles transitive relations, so Values related to + // any OpSliceLen for this slice will be correctly related to others. + if l, ok := ft.lens[v.Args[0].ID]; ok { + ft.update(b, v, l, signed, eq) + } else { + ft.lens[v.Args[0].ID] = v + } ft.update(b, v, ft.zero, signed, gt|eq) if v.Args[0].Op == OpSliceMake { if lensVars == nil { @@ -790,7 +811,12 @@ func prove(f *Func) { if ft.caps == nil { ft.caps = map[ID]*Value{} } - ft.caps[v.Args[0].ID] = v + // Same as case OpSliceLen above, but for slice cap. + if c, ok := ft.caps[v.Args[0].ID]; ok { + ft.update(b, v, c, signed, eq) + } else { + ft.caps[v.Args[0].ID] = v + } ft.update(b, v, ft.zero, signed, gt|eq) if v.Args[0].Op == OpSliceMake { if lensVars == nil { @@ -905,17 +931,7 @@ func prove(f *Func) { ft.restore() - // Return the posets to the free list - for _, po := range []*poset{ft.orderS, ft.orderU} { - // Make sure it's empty as it should be. A non-empty poset - // might cause errors and miscompilations if reused. - if checkEnabled { - if err := po.CheckEmpty(); err != nil { - f.Fatalf("prove poset not empty after function %s: %v", f.Name, err) - } - } - f.retPoset(po) - } + ft.cleanup(f) } // getBranch returns the range restrictions added by p diff --git a/src/cmd/compile/internal/ssa/redblack32.go b/src/cmd/compile/internal/ssa/redblack32.go deleted file mode 100644 index fc9cc71ba03..00000000000 --- a/src/cmd/compile/internal/ssa/redblack32.go +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -import "fmt" - -const ( - rankLeaf rbrank = 1 - rankZero rbrank = 0 -) - -type rbrank int8 - -// RBTint32 is a red-black tree with data stored at internal nodes, -// following Tarjan, Data Structures and Network Algorithms, -// pp 48-52, using explicit rank instead of red and black. -// Deletion is not yet implemented because it is not yet needed. -// Extra operations glb, lub, glbEq, lubEq are provided for -// use in sparse lookup algorithms. -type RBTint32 struct { - root *node32 - // An extra-clever implementation will have special cases - // for small sets, but we are not extra-clever today. -} - -func (t *RBTint32) String() string { - if t.root == nil { - return "[]" - } - return "[" + t.root.String() + "]" -} - -func (t *node32) String() string { - s := "" - if t.left != nil { - s = t.left.String() + " " - } - s = s + fmt.Sprintf("k=%d,d=%v", t.key, t.data) - if t.right != nil { - s = s + " " + t.right.String() - } - return s -} - -type node32 struct { - // Standard conventions hold for left = smaller, right = larger - left, right, parent *node32 - data interface{} - key int32 - rank rbrank // From Tarjan pp 48-49: - // If x is a node with a parent, then x.rank <= x.parent.rank <= x.rank+1. - // If x is a node with a grandparent, then x.rank < x.parent.parent.rank. - // If x is an "external [null] node", then x.rank = 0 && x.parent.rank = 1. - // Any node with one or more null children should have rank = 1. -} - -// makeNode returns a new leaf node with the given key and nil data. -func (t *RBTint32) makeNode(key int32) *node32 { - return &node32{key: key, rank: rankLeaf} -} - -// IsEmpty reports whether t is empty. -func (t *RBTint32) IsEmpty() bool { - return t.root == nil -} - -// IsSingle reports whether t is a singleton (leaf). -func (t *RBTint32) IsSingle() bool { - return t.root != nil && t.root.isLeaf() -} - -// VisitInOrder applies f to the key and data pairs in t, -// with keys ordered from smallest to largest. -func (t *RBTint32) VisitInOrder(f func(int32, interface{})) { - if t.root == nil { - return - } - t.root.visitInOrder(f) -} - -func (n *node32) Data() interface{} { - if n == nil { - return nil - } - return n.data -} - -func (n *node32) keyAndData() (k int32, d interface{}) { - if n == nil { - k = 0 - d = nil - } else { - k = n.key - d = n.data - } - return -} - -func (n *node32) Rank() rbrank { - if n == nil { - return 0 - } - return n.rank -} - -// Find returns the data associated with key in the tree, or -// nil if key is not in the tree. -func (t *RBTint32) Find(key int32) interface{} { - return t.root.find(key).Data() -} - -// Insert adds key to the tree and associates key with data. -// If key was already in the tree, it updates the associated data. -// Insert returns the previous data associated with key, -// or nil if key was not present. -// Insert panics if data is nil. -func (t *RBTint32) Insert(key int32, data interface{}) interface{} { - if data == nil { - panic("Cannot insert nil data into tree") - } - n := t.root - var newroot *node32 - if n == nil { - n = t.makeNode(key) - newroot = n - } else { - newroot, n = n.insert(key, t) - } - r := n.data - n.data = data - t.root = newroot - return r -} - -// Min returns the minimum element of t and its associated data. -// If t is empty, then (0, nil) is returned. -func (t *RBTint32) Min() (k int32, d interface{}) { - return t.root.min().keyAndData() -} - -// Max returns the maximum element of t and its associated data. -// If t is empty, then (0, nil) is returned. -func (t *RBTint32) Max() (k int32, d interface{}) { - return t.root.max().keyAndData() -} - -// Glb returns the greatest-lower-bound-exclusive of x and its associated -// data. If x has no glb in the tree, then (0, nil) is returned. -func (t *RBTint32) Glb(x int32) (k int32, d interface{}) { - return t.root.glb(x, false).keyAndData() -} - -// GlbEq returns the greatest-lower-bound-inclusive of x and its associated -// data. If x has no glbEQ in the tree, then (0, nil) is returned. -func (t *RBTint32) GlbEq(x int32) (k int32, d interface{}) { - return t.root.glb(x, true).keyAndData() -} - -// Lub returns the least-upper-bound-exclusive of x and its associated -// data. If x has no lub in the tree, then (0, nil) is returned. -func (t *RBTint32) Lub(x int32) (k int32, d interface{}) { - return t.root.lub(x, false).keyAndData() -} - -// LubEq returns the least-upper-bound-inclusive of x and its associated -// data. If x has no lubEq in the tree, then (0, nil) is returned. -func (t *RBTint32) LubEq(x int32) (k int32, d interface{}) { - return t.root.lub(x, true).keyAndData() -} - -func (t *node32) isLeaf() bool { - return t.left == nil && t.right == nil -} - -func (t *node32) visitInOrder(f func(int32, interface{})) { - if t.left != nil { - t.left.visitInOrder(f) - } - f(t.key, t.data) - if t.right != nil { - t.right.visitInOrder(f) - } -} - -func (t *node32) maxChildRank() rbrank { - if t.left == nil { - if t.right == nil { - return rankZero - } - return t.right.rank - } - if t.right == nil { - return t.left.rank - } - if t.right.rank > t.left.rank { - return t.right.rank - } - return t.left.rank -} - -func (t *node32) minChildRank() rbrank { - if t.left == nil || t.right == nil { - return rankZero - } - if t.right.rank < t.left.rank { - return t.right.rank - } - return t.left.rank -} - -func (t *node32) find(key int32) *node32 { - for t != nil { - if key < t.key { - t = t.left - } else if key > t.key { - t = t.right - } else { - return t - } - } - return nil -} - -func (t *node32) min() *node32 { - if t == nil { - return t - } - for t.left != nil { - t = t.left - } - return t -} - -func (t *node32) max() *node32 { - if t == nil { - return t - } - for t.right != nil { - t = t.right - } - return t -} - -func (t *node32) glb(key int32, allow_eq bool) *node32 { - var best *node32 - for t != nil { - if key <= t.key { - if key == t.key && allow_eq { - return t - } - // t is too big, glb is to left. - t = t.left - } else { - // t is a lower bound, record it and seek a better one. - best = t - t = t.right - } - } - return best -} - -func (t *node32) lub(key int32, allow_eq bool) *node32 { - var best *node32 - for t != nil { - if key >= t.key { - if key == t.key && allow_eq { - return t - } - // t is too small, lub is to right. - t = t.right - } else { - // t is a upper bound, record it and seek a better one. - best = t - t = t.left - } - } - return best -} - -func (t *node32) insert(x int32, w *RBTint32) (newroot, newnode *node32) { - // defaults - newroot = t - newnode = t - if x == t.key { - return - } - if x < t.key { - if t.left == nil { - n := w.makeNode(x) - n.parent = t - t.left = n - newnode = n - return - } - var new_l *node32 - new_l, newnode = t.left.insert(x, w) - t.left = new_l - new_l.parent = t - newrank := 1 + new_l.maxChildRank() - if newrank > t.rank { - if newrank > 1+t.right.Rank() { // rotations required - if new_l.left.Rank() < new_l.right.Rank() { - // double rotation - t.left = new_l.rightToRoot() - } - newroot = t.leftToRoot() - return - } else { - t.rank = newrank - } - } - } else { // x > t.key - if t.right == nil { - n := w.makeNode(x) - n.parent = t - t.right = n - newnode = n - return - } - var new_r *node32 - new_r, newnode = t.right.insert(x, w) - t.right = new_r - new_r.parent = t - newrank := 1 + new_r.maxChildRank() - if newrank > t.rank { - if newrank > 1+t.left.Rank() { // rotations required - if new_r.right.Rank() < new_r.left.Rank() { - // double rotation - t.right = new_r.leftToRoot() - } - newroot = t.rightToRoot() - return - } else { - t.rank = newrank - } - } - } - return -} - -func (t *node32) rightToRoot() *node32 { - // this - // left right - // rl rr - // - // becomes - // - // right - // this rr - // left rl - // - right := t.right - rl := right.left - right.parent = t.parent - right.left = t - t.parent = right - // parent's child ptr fixed in caller - t.right = rl - if rl != nil { - rl.parent = t - } - return right -} - -func (t *node32) leftToRoot() *node32 { - // this - // left right - // ll lr - // - // becomes - // - // left - // ll this - // lr right - // - left := t.left - lr := left.right - left.parent = t.parent - left.right = t - t.parent = left - // parent's child ptr fixed in caller - t.left = lr - if lr != nil { - lr.parent = t - } - return left -} - -// next returns the successor of t in a left-to-right -// walk of the tree in which t is embedded. -func (t *node32) next() *node32 { - // If there is a right child, it is to the right - r := t.right - if r != nil { - return r.min() - } - // if t is p.left, then p, else repeat. - p := t.parent - for p != nil { - if p.left == t { - return p - } - t = p - p = t.parent - } - return nil -} - -// prev returns the predecessor of t in a left-to-right -// walk of the tree in which t is embedded. -func (t *node32) prev() *node32 { - // If there is a left child, it is to the left - l := t.left - if l != nil { - return l.max() - } - // if t is p.right, then p, else repeat. - p := t.parent - for p != nil { - if p.right == t { - return p - } - t = p - p = t.parent - } - return nil -} diff --git a/src/cmd/compile/internal/ssa/redblack32_test.go b/src/cmd/compile/internal/ssa/redblack32_test.go deleted file mode 100644 index 376e8cff8dd..00000000000 --- a/src/cmd/compile/internal/ssa/redblack32_test.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -import ( - "fmt" - "testing" -) - -type sstring string - -func (s sstring) String() string { - return string(s) -} - -// wellFormed ensures that a red-black tree meets -// all of its invariants and returns a string identifying -// the first problem encountered. If there is no problem -// then the returned string is empty. The size is also -// returned to allow comparison of calculated tree size -// with expected. -func (t *RBTint32) wellFormed() (s string, i int) { - if t.root == nil { - s = "" - i = 0 - return - } - return t.root.wellFormedSubtree(nil, -0x80000000, 0x7fffffff) -} - -// wellFormedSubtree ensures that a red-black subtree meets -// all of its invariants and returns a string identifying -// the first problem encountered. If there is no problem -// then the returned string is empty. The size is also -// returned to allow comparison of calculated tree size -// with expected. -func (t *node32) wellFormedSubtree(parent *node32, min, max int32) (s string, i int) { - i = -1 // initialize to a failing value - s = "" // s is the reason for failure; empty means okay. - - if t.parent != parent { - s = "t.parent != parent" - return - } - - if min >= t.key { - s = "min >= t.key" - return - } - - if max <= t.key { - s = "max <= t.key" - return - } - - l := t.left - r := t.right - if l == nil && r == nil { - if t.rank != rankLeaf { - s = "leaf rank wrong" - return - } - } - if l != nil { - if t.rank < l.rank { - s = "t.rank < l.rank" - } else if t.rank > 1+l.rank { - s = "t.rank > 1+l.rank" - } else if t.rank <= l.maxChildRank() { - s = "t.rank <= l.maxChildRank()" - } else if t.key <= l.key { - s = "t.key <= l.key" - } - if s != "" { - return - } - } else { - if t.rank != 1 { - s = "t w/ left nil has rank != 1" - return - } - } - if r != nil { - if t.rank < r.rank { - s = "t.rank < r.rank" - } else if t.rank > 1+r.rank { - s = "t.rank > 1+r.rank" - } else if t.rank <= r.maxChildRank() { - s = "t.rank <= r.maxChildRank()" - } else if t.key >= r.key { - s = "t.key >= r.key" - } - if s != "" { - return - } - } else { - if t.rank != 1 { - s = "t w/ right nil has rank != 1" - return - } - } - ii := 1 - if l != nil { - res, il := l.wellFormedSubtree(t, min, t.key) - if res != "" { - s = "L." + res - return - } - ii += il - } - if r != nil { - res, ir := r.wellFormedSubtree(t, t.key, max) - if res != "" { - s = "R." + res - return - } - ii += ir - } - i = ii - return -} - -func (t *RBTint32) DebugString() string { - if t.root == nil { - return "" - } - return t.root.DebugString() -} - -// DebugString prints the tree with nested information -// to allow an eyeball check on the tree balance. -func (t *node32) DebugString() string { - s := "" - if t.left != nil { - s += "[" - s += t.left.DebugString() - s += "]" - } - s += fmt.Sprintf("%v=%v:%d", t.key, t.data, t.rank) - if t.right != nil { - s += "[" - s += t.right.DebugString() - s += "]" - } - return s -} - -func allRBT32Ops(te *testing.T, x []int32) { - t := &RBTint32{} - for i, d := range x { - x[i] = d + d // Double everything for glb/lub testing - } - - // fmt.Printf("Inserting double of %v", x) - k := 0 - min := int32(0x7fffffff) - max := int32(-0x80000000) - for _, d := range x { - if d < min { - min = d - } - - if d > max { - max = d - } - - t.Insert(d, sstring(fmt.Sprintf("%v", d))) - k++ - s, i := t.wellFormed() - if i != k { - te.Errorf("Wrong tree size %v, expected %v for %v", i, k, t.DebugString()) - } - if s != "" { - te.Errorf("Tree consistency problem at %v", s) - return - } - } - - oops := false - - for _, d := range x { - s := fmt.Sprintf("%v", d) - f := t.Find(d) - - // data - if s != fmt.Sprintf("%v", f) { - te.Errorf("s(%v) != f(%v)", s, f) - oops = true - } - } - - if !oops { - for _, d := range x { - s := fmt.Sprintf("%v", d) - - kg, g := t.Glb(d + 1) - kge, ge := t.GlbEq(d) - kl, l := t.Lub(d - 1) - kle, le := t.LubEq(d) - - // keys - if d != kg { - te.Errorf("d(%v) != kg(%v)", d, kg) - } - if d != kl { - te.Errorf("d(%v) != kl(%v)", d, kl) - } - if d != kge { - te.Errorf("d(%v) != kge(%v)", d, kge) - } - if d != kle { - te.Errorf("d(%v) != kle(%v)", d, kle) - } - // data - if s != fmt.Sprintf("%v", g) { - te.Errorf("s(%v) != g(%v)", s, g) - } - if s != fmt.Sprintf("%v", l) { - te.Errorf("s(%v) != l(%v)", s, l) - } - if s != fmt.Sprintf("%v", ge) { - te.Errorf("s(%v) != ge(%v)", s, ge) - } - if s != fmt.Sprintf("%v", le) { - te.Errorf("s(%v) != le(%v)", s, le) - } - } - - for _, d := range x { - s := fmt.Sprintf("%v", d) - kge, ge := t.GlbEq(d + 1) - kle, le := t.LubEq(d - 1) - if d != kge { - te.Errorf("d(%v) != kge(%v)", d, kge) - } - if d != kle { - te.Errorf("d(%v) != kle(%v)", d, kle) - } - if s != fmt.Sprintf("%v", ge) { - te.Errorf("s(%v) != ge(%v)", s, ge) - } - if s != fmt.Sprintf("%v", le) { - te.Errorf("s(%v) != le(%v)", s, le) - } - } - - kg, g := t.Glb(min) - kge, ge := t.GlbEq(min - 1) - kl, l := t.Lub(max) - kle, le := t.LubEq(max + 1) - fmin := t.Find(min - 1) - fmax := t.Find(min + 11) - - if kg != 0 || kge != 0 || kl != 0 || kle != 0 { - te.Errorf("Got non-zero-key for missing query") - } - - if g != nil || ge != nil || l != nil || le != nil || fmin != nil || fmax != nil { - te.Errorf("Got non-error-data for missing query") - } - - } -} - -func TestAllRBTreeOps(t *testing.T) { - allRBT32Ops(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25}) - allRBT32Ops(t, []int32{22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 3, 2, 1, 25, 24, 23, 12, 11, 10, 9, 8, 7, 6, 5, 4}) - allRBT32Ops(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}) - allRBT32Ops(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24}) - allRBT32Ops(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2}) - allRBT32Ops(t, []int32{24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25}) -} diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 8c25b1c81dc..c81d5574fe5 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -114,12 +114,13 @@ package ssa import ( + "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/types" - "cmd/internal/objabi" "cmd/internal/src" "cmd/internal/sys" "fmt" + "internal/buildcfg" "math/bits" "unsafe" ) @@ -151,6 +152,14 @@ type register uint8 const noRegister register = 255 +// For bulk initializing +var noRegisters [32]register = [32]register{ + noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, + noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, + noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, + noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, +} + // A regMask encodes a set of machine registers. // TODO: regMask -> regSet? type regMask uint64 @@ -233,12 +242,6 @@ type regAllocState struct { GReg register allocatable regMask - // for each block, its primary predecessor. - // A predecessor of b is primary if it is the closest - // predecessor that appears before b in the layout order. - // We record the index in the Preds list where the primary predecessor sits. - primary []int32 - // live values at the end of each block. live[b.ID] is a list of value IDs // which are live at the end of b, together with a count of how many instructions // forward to the next use. @@ -296,6 +299,12 @@ type regAllocState struct { // choose a good order in which to visit blocks for allocation purposes. visitOrder []*Block + + // blockOrder[b.ID] corresponds to the index of block b in visitOrder. + blockOrder []int32 + + // whether to insert instructions that clobber dead registers at call sites + doClobber bool } type endReg struct { @@ -334,6 +343,17 @@ func (s *regAllocState) freeRegs(m regMask) { } } +// clobberRegs inserts instructions that clobber registers listed in m. +func (s *regAllocState) clobberRegs(m regMask) { + m &= s.allocatable & s.f.Config.gpRegMask // only integer register can contain pointers, only clobber them + for m != 0 { + r := pickReg(m) + m &^= 1 << r + x := s.curBlock.NewValue0(src.NoXPos, OpClobberReg, types.TypeVoid) + s.f.setHome(x, &s.registers[r]) + } +} + // setOrig records that c's original value is the same as // v's original value. func (s *regAllocState) setOrig(c *Value, v *Value) { @@ -589,7 +609,7 @@ func (s *regAllocState) init(f *Func) { if s.f.Config.hasGReg { s.allocatable &^= 1 << s.GReg } - if objabi.Framepointer_enabled && s.f.Config.FPReg >= 0 { + if buildcfg.FramePointerEnabled && s.f.Config.FPReg >= 0 { s.allocatable &^= 1 << uint(s.f.Config.FPReg) } if s.f.Config.LinkReg != -1 { @@ -597,12 +617,6 @@ func (s *regAllocState) init(f *Func) { // Leaf functions don't save/restore the link register. s.allocatable &^= 1 << uint(s.f.Config.LinkReg) } - if s.f.Config.arch == "arm" && objabi.GOARM == 5 { - // On ARMv5 we insert softfloat calls at each FP instruction. - // This clobbers LR almost everywhere. Disable allocating LR - // on ARMv5. - s.allocatable &^= 1 << uint(s.f.Config.LinkReg) - } } if s.f.Config.ctxt.Flag_dynlink { switch s.f.Config.arch { @@ -634,9 +648,9 @@ func (s *regAllocState) init(f *Func) { // Compute block order. This array allows us to distinguish forward edges // from backward edges and compute how far they go. - blockOrder := make([]int32, f.NumBlocks()) + s.blockOrder = make([]int32, f.NumBlocks()) for i, b := range s.visitOrder { - blockOrder[b.ID] = int32(i) + s.blockOrder[b.ID] = int32(i) } s.regs = make([]regState, s.numRegs) @@ -662,22 +676,6 @@ func (s *regAllocState) init(f *Func) { } s.computeLive() - // Compute primary predecessors. - s.primary = make([]int32, f.NumBlocks()) - for _, b := range s.visitOrder { - best := -1 - for i, e := range b.Preds { - p := e.b - if blockOrder[p.ID] >= blockOrder[b.ID] { - continue // backward edge - } - if best == -1 || blockOrder[p.ID] > blockOrder[b.Preds[best].b.ID] { - best = i - } - } - s.primary[b.ID] = int32(best) - } - s.endRegs = make([][]endReg, f.NumBlocks()) s.startRegs = make([][]startReg, f.NumBlocks()) s.spillLive = make([][]ID, f.NumBlocks()) @@ -717,6 +715,14 @@ func (s *regAllocState) init(f *Func) { } } } + + // The clobberdeadreg experiment inserts code to clobber dead registers + // at call sites. + // Ignore huge functions to avoid doing too much work. + if base.Flag.ClobberDeadReg && len(s.f.Blocks) <= 10000 { + // TODO: honor GOCLOBBERDEADHASH, or maybe GOSSAHASH. + s.doClobber = true + } } // Adds a use record for id at distance dist from the start of the block. @@ -761,6 +767,9 @@ func (s *regAllocState) advanceUses(v *Value) { // current instruction. func (s *regAllocState) liveAfterCurrentInstruction(v *Value) bool { u := s.values[v.ID].uses + if u == nil { + panic(fmt.Errorf("u is nil, v = %s, s.values[v.ID] = %v", v.LongString(), s.values[v.ID])) + } d := u.dist for u != nil && u.dist == d { u = u.next @@ -797,7 +806,8 @@ func (s *regAllocState) compatRegs(t *types.Type) regMask { } // regspec returns the regInfo for operation op. -func (s *regAllocState) regspec(op Op) regInfo { +func (s *regAllocState) regspec(v *Value) regInfo { + op := v.Op if op == OpConvert { // OpConvert is a generic op, so it doesn't have a // register set in the static table. It can use any @@ -805,6 +815,22 @@ func (s *regAllocState) regspec(op Op) regInfo { m := s.allocatable & s.f.Config.gpRegMask return regInfo{inputs: []inputInfo{{regs: m}}, outputs: []outputInfo{{regs: m}}} } + if op == OpArgIntReg { + reg := v.Block.Func.Config.intParamRegs[v.AuxInt8()] + return regInfo{outputs: []outputInfo{{regs: 1 << uint(reg)}}} + } + if op == OpArgFloatReg { + reg := v.Block.Func.Config.floatParamRegs[v.AuxInt8()] + return regInfo{outputs: []outputInfo{{regs: 1 << uint(reg)}}} + } + if op.IsCall() { + if ac, ok := v.Aux.(*AuxCall); ok && ac.reg != nil { + return *ac.Reg(&opcodeTable[op].reg, s.f.Config) + } + } + if op == OpMakeResult && s.f.OwnAux.reg != nil { + return *s.f.OwnAux.ResultReg(s.f.Config) + } return opcodeTable[op].reg } @@ -935,10 +961,49 @@ func (s *regAllocState) regalloc(f *Func) { // This is the complicated case. We have more than one predecessor, // which means we may have Phi ops. - // Start with the final register state of the primary predecessor - idx := s.primary[b.ID] + // Start with the final register state of the predecessor with least spill values. + // This is based on the following points: + // 1, The less spill value indicates that the register pressure of this path is smaller, + // so the values of this block are more likely to be allocated to registers. + // 2, Avoid the predecessor that contains the function call, because the predecessor that + // contains the function call usually generates a lot of spills and lose the previous + // allocation state. + // TODO: Improve this part. At least the size of endRegs of the predecessor also has + // an impact on the code size and compiler speed. But it is not easy to find a simple + // and efficient method that combines multiple factors. + idx := -1 + for i, p := range b.Preds { + // If the predecessor has not been visited yet, skip it because its end state + // (redRegs and spillLive) has not been computed yet. + pb := p.b + if s.blockOrder[pb.ID] >= s.blockOrder[b.ID] { + continue + } + if idx == -1 { + idx = i + continue + } + pSel := b.Preds[idx].b + if len(s.spillLive[pb.ID]) < len(s.spillLive[pSel.ID]) { + idx = i + } else if len(s.spillLive[pb.ID]) == len(s.spillLive[pSel.ID]) { + // Use a bit of likely information. After critical pass, pb and pSel must + // be plain blocks, so check edge pb->pb.Preds instead of edge pb->b. + // TODO: improve the prediction of the likely predecessor. The following + // method is only suitable for the simplest cases. For complex cases, + // the prediction may be inaccurate, but this does not affect the + // correctness of the program. + // According to the layout algorithm, the predecessor with the + // smaller blockOrder is the true branch, and the test results show + // that it is better to choose the predecessor with a smaller + // blockOrder than no choice. + if pb.likelyBranch() && !pSel.likelyBranch() || s.blockOrder[pb.ID] < s.blockOrder[pSel.ID] { + idx = i + } + } + } if idx < 0 { - f.Fatalf("block with no primary predecessor %s", b) + f.Fatalf("bad visitOrder, no predecessor of %s has been visited before it", b) } p := b.Preds[idx].b s.setState(s.endRegs[p.ID]) @@ -1026,7 +1091,7 @@ func (s *regAllocState) regalloc(f *Func) { // If one of the other inputs of v is in a register, and the register is available, // select this register, which can save some unnecessary copies. for i, pe := range b.Preds { - if int32(i) == idx { + if i == idx { continue } ri := noRegister @@ -1160,7 +1225,7 @@ func (s *regAllocState) regalloc(f *Func) { for i := len(oldSched) - 1; i >= 0; i-- { v := oldSched[i] prefs := desired.remove(v.ID) - regspec := s.regspec(v.Op) + regspec := s.regspec(v) desired.clobber(regspec.clobbers) for _, j := range regspec.inputs { if countRegs(j.regs) != 1 { @@ -1190,7 +1255,7 @@ func (s *regAllocState) regalloc(f *Func) { if s.f.pass.debug > regDebug { fmt.Printf(" processing %s\n", v.LongString()) } - regspec := s.regspec(v.Op) + regspec := s.regspec(v) if v.Op == OpPhi { f.Fatalf("phi %s not at start of block", v) } @@ -1208,13 +1273,17 @@ func (s *regAllocState) regalloc(f *Func) { s.sb = v.ID continue } - if v.Op == OpSelect0 || v.Op == OpSelect1 { + if v.Op == OpSelect0 || v.Op == OpSelect1 || v.Op == OpSelectN { if s.values[v.ID].needReg { - var i = 0 - if v.Op == OpSelect1 { - i = 1 + if v.Op == OpSelectN { + s.assignReg(register(s.f.getHome(v.Args[0].ID).(LocResults)[int(v.AuxInt)].(*Register).num), v, v) + } else { + var i = 0 + if v.Op == OpSelect1 { + i = 1 + } + s.assignReg(register(s.f.getHome(v.Args[0].ID).(LocPair)[i].(*Register).num), v, v) } - s.assignReg(register(s.f.getHome(v.Args[0].ID).(LocPair)[i].(*Register).num), v, v) } b.Values = append(b.Values, v) s.advanceUses(v) @@ -1268,6 +1337,9 @@ func (s *regAllocState) regalloc(f *Func) { } if len(regspec.inputs) == 0 && len(regspec.outputs) == 0 { // No register allocation required (or none specified yet) + if s.doClobber && v.Op.IsCall() { + s.clobberRegs(regspec.clobbers) + } s.freeRegs(regspec.clobbers) b.Values = append(b.Values, v) s.advanceUses(v) @@ -1305,12 +1377,58 @@ func (s *regAllocState) regalloc(f *Func) { } } - // Move arguments to registers. Process in an ordering defined - // by the register specification (most constrained first). - args = append(args[:0], v.Args...) + // Move arguments to registers. + // First, if an arg must be in a specific register and it is already + // in place, keep it. + args = append(args[:0], make([]*Value, len(v.Args))...) + for i, a := range v.Args { + if !s.values[a.ID].needReg { + args[i] = a + } + } for _, i := range regspec.inputs { mask := i.regs - if mask&s.values[args[i.idx].ID].regs == 0 { + if countRegs(mask) == 1 && mask&s.values[v.Args[i.idx].ID].regs != 0 { + args[i.idx] = s.allocValToReg(v.Args[i.idx], mask, true, v.Pos) + } + } + // Then, if an arg must be in a specific register and that + // register is free, allocate that one. Otherwise when processing + // another input we may kick a value into the free register, which + // then will be kicked out again. + // This is a common case for passing-in-register arguments for + // function calls. + for { + freed := false + for _, i := range regspec.inputs { + if args[i.idx] != nil { + continue // already allocated + } + mask := i.regs + if countRegs(mask) == 1 && mask&^s.used != 0 { + args[i.idx] = s.allocValToReg(v.Args[i.idx], mask, true, v.Pos) + // If the input is in other registers that will be clobbered by v, + // or the input is dead, free the registers. This may make room + // for other inputs. + oldregs := s.values[v.Args[i.idx].ID].regs + if oldregs&^regspec.clobbers == 0 || !s.liveAfterCurrentInstruction(v.Args[i.idx]) { + s.freeRegs(oldregs &^ mask &^ s.nospill) + freed = true + } + } + } + if !freed { + break + } + } + // Last, allocate remaining ones, in an ordering defined + // by the register specification (most constrained first). + for _, i := range regspec.inputs { + if args[i.idx] != nil { + continue // already allocated + } + mask := i.regs + if mask&s.values[v.Args[i.idx].ID].regs == 0 { // Need a new register for the input. mask &= s.allocatable mask &^= s.nospill @@ -1329,7 +1447,7 @@ func (s *regAllocState) regalloc(f *Func) { mask &^= desired.avoid } } - args[i.idx] = s.allocValToReg(args[i.idx], mask, true, v.Pos) + args[i.idx] = s.allocValToReg(v.Args[i.idx], mask, true, v.Pos) } // If the output clobbers the input register, make sure we have @@ -1375,9 +1493,9 @@ func (s *regAllocState) regalloc(f *Func) { goto ok } - // Try to move an input to the desired output. + // Try to move an input to the desired output, if allowed. for _, r := range dinfo[idx].out { - if r != noRegister && m>>r&1 != 0 { + if r != noRegister && (m®spec.outputs[0].regs)>>r&1 != 0 { m = regMask(1) << r args[0] = s.allocValToReg(v.Args[0], m, true, v.Pos) // Note: we update args[0] so the instruction will @@ -1429,12 +1547,18 @@ func (s *regAllocState) regalloc(f *Func) { } // Dump any registers which will be clobbered + if s.doClobber && v.Op.IsCall() { + // clobber registers that are marked as clobber in regmask, but + // don't clobber inputs. + s.clobberRegs(regspec.clobbers &^ s.tmpused &^ s.nospill) + } s.freeRegs(regspec.clobbers) s.tmpused |= regspec.clobbers // Pick registers for outputs. { - outRegs := [2]register{noRegister, noRegister} + outRegs := noRegisters // TODO if this is costly, hoist and clear incrementally below. + maxOutIdx := -1 var used regMask for _, out := range regspec.outputs { mask := out.regs & s.allocatable &^ used @@ -1445,6 +1569,9 @@ func (s *regAllocState) regalloc(f *Func) { if !opcodeTable[v.Op].commutative { // Output must use the same register as input 0. r := register(s.f.getHome(args[0].ID).(*Register).num) + if mask>>r&1 == 0 { + s.f.Fatalf("resultInArg0 value's input %v cannot be an output of %s", s.f.getHome(args[0].ID).(*Register), v.LongString()) + } mask = regMask(1) << r } else { // Output must use the same register as input 0 or 1. @@ -1480,6 +1607,9 @@ func (s *regAllocState) regalloc(f *Func) { mask &^= desired.avoid } r := s.allocReg(mask, v) + if out.idx > maxOutIdx { + maxOutIdx = out.idx + } outRegs[out.idx] = r used |= regMask(1) << r s.tmpused |= regMask(1) << r @@ -1495,6 +1625,15 @@ func (s *regAllocState) regalloc(f *Func) { } s.f.setHome(v, outLocs) // Note that subsequent SelectX instructions will do the assignReg calls. + } else if v.Type.IsResults() { + // preallocate outLocs to the right size, which is maxOutIdx+1 + outLocs := make(LocResults, maxOutIdx+1, maxOutIdx+1) + for i := 0; i <= maxOutIdx; i++ { + if r := outRegs[i]; r != noRegister { + outLocs[i] = &s.registers[r] + } + } + s.f.setHome(v, outLocs) } else { if r := outRegs[0]; r != noRegister { s.assignReg(r, v, v) @@ -1767,6 +1906,9 @@ func (s *regAllocState) placeSpills() { // put the spill of v. At the start "best" is the best place // we have found so far. // TODO: find a way to make this O(1) without arbitrary cutoffs. + if v == nil { + panic(fmt.Errorf("nil v, s.orig[%d], vi = %v, spill = %s", i, vi, spill.LongString())) + } best := v.Block bestArg := v var bestDepth int16 @@ -2437,7 +2579,7 @@ func (s *regAllocState) computeLive() { // desired registers back though phi nodes. continue } - regspec := s.regspec(v.Op) + regspec := s.regspec(v) // Cancel desired registers if they get clobbered. desired.clobber(regspec.clobbers) // Update desired registers if there are any fixed register inputs. diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index e82aa84cdfa..375c4d5a560 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -27,7 +27,7 @@ const ( removeDeadValues = true ) -// deadcode indicates that rewrite should try to remove any values that become dead. +// deadcode indicates whether rewrite should try to remove any values that become dead. func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter, deadcode deadValueChoice) { // repeat rewrites until we find no more rewrites pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block @@ -159,7 +159,7 @@ func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter, deadcode deadValu f.freeValue(v) continue } - if v.Pos.IsStmt() != src.PosNotStmt && pendingLines.get(vl) == int32(b.ID) { + if v.Pos.IsStmt() != src.PosNotStmt && !notStmtBoundary(v.Op) && pendingLines.get(vl) == int32(b.ID) { pendingLines.remove(vl) v.Pos = v.Pos.WithIsStmt() } @@ -765,7 +765,7 @@ func devirt(v *Value, aux Aux, sym Sym, offset int64) *AuxCall { return nil } va := aux.(*AuxCall) - return StaticAuxCall(lsym, va.args, va.results) + return StaticAuxCall(lsym, va.abiInfo) } // de-virtualize an InterLECall @@ -793,7 +793,8 @@ func devirtLESym(v *Value, aux Aux, sym Sym, offset int64) *obj.LSym { func devirtLECall(v *Value, sym *obj.LSym) *Value { v.Op = OpStaticLECall - v.Aux.(*AuxCall).Fn = sym + auxcall := v.Aux.(*AuxCall) + auxcall.Fn = sym v.RemoveArg(0) return v } @@ -858,13 +859,13 @@ func disjoint(p1 *Value, n1 int64, p2 *Value, n2 int64) bool { if p2.Op == OpAddr || p2.Op == OpLocalAddr || p2.Op == OpSP { return true } - return p2.Op == OpArg && p1.Args[0].Op == OpSP - case OpArg: + return (p2.Op == OpArg || p2.Op == OpArgIntReg) && p1.Args[0].Op == OpSP + case OpArg, OpArgIntReg: if p2.Op == OpSP || p2.Op == OpLocalAddr { return true } case OpSP: - return p2.Op == OpAddr || p2.Op == OpLocalAddr || p2.Op == OpArg || p2.Op == OpSP + return p2.Op == OpAddr || p2.Op == OpLocalAddr || p2.Op == OpArg || p2.Op == OpArgIntReg || p2.Op == OpSP } return false } @@ -1413,7 +1414,7 @@ func isPPC64WordRotateMask(v64 int64) bool { return (v&vp == 0 || vn&vpn == 0) && v != 0 } -// Compress mask and and shift into single value of the form +// Compress mask and shift into single value of the form // me | mb<<8 | rotate<<16 | nbits<<24 where me and mb can // be used to regenerate the input mask. func encodePPC64RotateMask(rotate, mask, nbits int64) int64 { @@ -1491,7 +1492,7 @@ func mergePPC64AndSrwi(m, s int64) int64 { if !isPPC64WordRotateMask(mask) { return 0 } - return encodePPC64RotateMask(32-s, mask, 32) + return encodePPC64RotateMask((32-s)&31, mask, 32) } // Test if a shift right feeding into a CLRLSLDI can be merged into RLWINM. @@ -1611,18 +1612,18 @@ func needRaceCleanup(sym *AuxCall, v *Value) bool { if !f.Config.Race { return false } - if !isSameCall(sym, "runtime.racefuncenter") && !isSameCall(sym, "runtime.racefuncenterfp") && !isSameCall(sym, "runtime.racefuncexit") { + if !isSameCall(sym, "runtime.racefuncenter") && !isSameCall(sym, "runtime.racefuncexit") { return false } for _, b := range f.Blocks { for _, v := range b.Values { switch v.Op { - case OpStaticCall: - // Check for racefuncenter/racefuncenterfp will encounter racefuncexit and vice versa. + case OpStaticCall, OpStaticLECall: + // Check for racefuncenter will encounter racefuncexit and vice versa. // Allow calls to panic* s := v.Aux.(*AuxCall).Fn.String() switch s { - case "runtime.racefuncenter", "runtime.racefuncenterfp", "runtime.racefuncexit", + case "runtime.racefuncenter", "runtime.racefuncexit", "runtime.panicdivide", "runtime.panicwrap", "runtime.panicshift": continue @@ -1632,15 +1633,20 @@ func needRaceCleanup(sym *AuxCall, v *Value) bool { return false case OpPanicBounds, OpPanicExtend: // Note: these are panic generators that are ok (like the static calls above). - case OpClosureCall, OpInterCall: + case OpClosureCall, OpInterCall, OpClosureLECall, OpInterLECall: // We must keep the race functions if there are any other call types. return false } } } if isSameCall(sym, "runtime.racefuncenter") { + // TODO REGISTER ABI this needs to be cleaned up. // If we're removing racefuncenter, remove its argument as well. if v.Args[0].Op != OpStore { + if v.Op == OpStaticLECall { + // there is no store, yet. + return true + } return false } mem := v.Args[0].Args[2] diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index 4e7fdb9e639..1ec2d26f750 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -620,6 +620,9 @@ func rewriteValue386(v *Value) bool { case OpSqrt: v.Op = Op386SQRTSD return true + case OpSqrt32: + v.Op = Op386SQRTSS + return true case OpStaticCall: v.Op = Op386CALLstatic return true @@ -1993,8 +1996,8 @@ func rewriteValue386_Op386CMPBconst(v *Value) bool { return true } // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c]) - // cond: l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l) - // result: @l.Block (CMPBconstload {sym} [makeValAndOff32(int32(c),int32(off))] ptr mem) + // cond: l.Uses == 1 && clobber(l) + // result: @l.Block (CMPBconstload {sym} [makeValAndOff(int32(c),off)] ptr mem) for { c := auxIntToInt8(v.AuxInt) l := v_0 @@ -2005,13 +2008,13 @@ func rewriteValue386_Op386CMPBconst(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - if !(l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l)) { + if !(l.Uses == 1 && clobber(l)) { break } b = l.Block v0 := b.NewValue0(l.Pos, Op386CMPBconstload, types.TypeFlags) v.copyOf(v0) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), int32(off))) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true @@ -2023,8 +2026,7 @@ func rewriteValue386_Op386CMPBload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) - // cond: validValAndOff(int64(int8(c)),int64(off)) - // result: (CMPBconstload {sym} [makeValAndOff32(int32(int8(c)),off)] ptr mem) + // result: (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -2034,11 +2036,8 @@ func rewriteValue386_Op386CMPBload(v *Value) bool { } c := auxIntToInt32(v_1.AuxInt) mem := v_2 - if !(validValAndOff(int64(int8(c)), int64(off))) { - break - } v.reset(Op386CMPBconstload) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int8(c)), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -2301,8 +2300,8 @@ func rewriteValue386_Op386CMPLconst(v *Value) bool { return true } // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c]) - // cond: l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l) - // result: @l.Block (CMPLconstload {sym} [makeValAndOff32(int32(c),int32(off))] ptr mem) + // cond: l.Uses == 1 && clobber(l) + // result: @l.Block (CMPLconstload {sym} [makeValAndOff(int32(c),off)] ptr mem) for { c := auxIntToInt32(v.AuxInt) l := v_0 @@ -2313,13 +2312,13 @@ func rewriteValue386_Op386CMPLconst(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - if !(l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l)) { + if !(l.Uses == 1 && clobber(l)) { break } b = l.Block v0 := b.NewValue0(l.Pos, Op386CMPLconstload, types.TypeFlags) v.copyOf(v0) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), int32(off))) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true @@ -2331,8 +2330,7 @@ func rewriteValue386_Op386CMPLload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem) - // cond: validValAndOff(int64(c),int64(off)) - // result: (CMPLconstload {sym} [makeValAndOff32(c,off)] ptr mem) + // result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -2342,11 +2340,8 @@ func rewriteValue386_Op386CMPLload(v *Value) bool { } c := auxIntToInt32(v_1.AuxInt) mem := v_2 - if !(validValAndOff(int64(c), int64(off))) { - break - } v.reset(Op386CMPLconstload) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -2594,8 +2589,8 @@ func rewriteValue386_Op386CMPWconst(v *Value) bool { return true } // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c]) - // cond: l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l) - // result: @l.Block (CMPWconstload {sym} [makeValAndOff32(int32(c),int32(off))] ptr mem) + // cond: l.Uses == 1 && clobber(l) + // result: @l.Block (CMPWconstload {sym} [makeValAndOff(int32(c),off)] ptr mem) for { c := auxIntToInt16(v.AuxInt) l := v_0 @@ -2606,13 +2601,13 @@ func rewriteValue386_Op386CMPWconst(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - if !(l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l)) { + if !(l.Uses == 1 && clobber(l)) { break } b = l.Block v0 := b.NewValue0(l.Pos, Op386CMPWconstload, types.TypeFlags) v.copyOf(v0) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), int32(off))) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true @@ -2624,8 +2619,7 @@ func rewriteValue386_Op386CMPWload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) - // cond: validValAndOff(int64(int16(c)),int64(off)) - // result: (CMPWconstload {sym} [makeValAndOff32(int32(int16(c)),off)] ptr mem) + // result: (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -2635,11 +2629,8 @@ func rewriteValue386_Op386CMPWload(v *Value) bool { } c := auxIntToInt32(v_1.AuxInt) mem := v_2 - if !(validValAndOff(int64(int16(c)), int64(off))) { - break - } v.reset(Op386CMPWconstload) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int16(c)), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -3732,8 +3723,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { return true } // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) - // cond: validOff(int64(off)) - // result: (MOVBstoreconst [makeValAndOff32(c,off)] {sym} ptr mem) + // result: (MOVBstoreconst [makeValAndOff(c,off)] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -3743,11 +3733,8 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { } c := auxIntToInt32(v_1.AuxInt) mem := v_2 - if !(validOff(int64(off))) { - break - } v.reset(Op386MOVBstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -4087,7 +4074,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool { } // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x) - // result: (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p mem) + // result: (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem) for { c := auxIntToValAndOff(v.AuxInt) s := auxToSym(v.Aux) @@ -4105,14 +4092,14 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool { break } v.reset(Op386MOVWstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xff|c.Val()<<8), a.Off32())) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off())) v.Aux = symToAux(s) v.AddArg2(p, mem) return true } // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem)) // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x) - // result: (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p mem) + // result: (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem) for { a := auxIntToValAndOff(v.AuxInt) s := auxToSym(v.Aux) @@ -4130,14 +4117,14 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool { break } v.reset(Op386MOVWstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xff|c.Val()<<8), a.Off32())) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off())) v.Aux = symToAux(s) v.AddArg2(p, mem) return true } // match: (MOVBstoreconst [c] {s} p1 x:(MOVBstoreconst [a] {s} p0 mem)) // cond: x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 1) && clobber(x) - // result: (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p0 mem) + // result: (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p0 mem) for { c := auxIntToValAndOff(v.AuxInt) s := auxToSym(v.Aux) @@ -4156,14 +4143,14 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool { break } v.reset(Op386MOVWstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xff|c.Val()<<8), a.Off32())) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off())) v.Aux = symToAux(s) v.AddArg2(p0, mem) return true } // match: (MOVBstoreconst [a] {s} p0 x:(MOVBstoreconst [c] {s} p1 mem)) // cond: x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 1) && clobber(x) - // result: (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p0 mem) + // result: (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p0 mem) for { a := auxIntToValAndOff(v.AuxInt) s := auxToSym(v.Aux) @@ -4182,7 +4169,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool { break } v.reset(Op386MOVWstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xff|c.Val()<<8), a.Off32())) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off())) v.Aux = symToAux(s) v.AddArg2(p0, mem) return true @@ -4301,8 +4288,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { return true } // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) - // cond: validOff(int64(off)) - // result: (MOVLstoreconst [makeValAndOff32(c,off)] {sym} ptr mem) + // result: (MOVLstoreconst [makeValAndOff(c,off)] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -4312,11 +4298,8 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { } c := auxIntToInt32(v_1.AuxInt) mem := v_2 - if !(validOff(int64(off))) { - break - } v.reset(Op386MOVLstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -4599,8 +4582,8 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { break } // match: (MOVLstore {sym} [off] ptr y:(ADDLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(int64(c),int64(off)) - // result: (ADDLconstmodify [makeValAndOff32(c,off)] {sym} ptr mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (ADDLconstmodify [makeValAndOff(c,off)] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -4615,18 +4598,18 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { break } mem := l.Args[1] - if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(int64(c), int64(off))) { + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { break } v.reset(Op386ADDLconstmodify) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ANDLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(int64(c),int64(off)) - // result: (ANDLconstmodify [makeValAndOff32(c,off)] {sym} ptr mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (ANDLconstmodify [makeValAndOff(c,off)] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -4641,18 +4624,18 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { break } mem := l.Args[1] - if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(int64(c), int64(off))) { + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { break } v.reset(Op386ANDLconstmodify) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(ORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(int64(c),int64(off)) - // result: (ORLconstmodify [makeValAndOff32(c,off)] {sym} ptr mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (ORLconstmodify [makeValAndOff(c,off)] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -4667,18 +4650,18 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { break } mem := l.Args[1] - if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(int64(c), int64(off))) { + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { break } v.reset(Op386ORLconstmodify) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVLstore {sym} [off] ptr y:(XORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(int64(c),int64(off)) - // result: (XORLconstmodify [makeValAndOff32(c,off)] {sym} ptr mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (XORLconstmodify [makeValAndOff(c,off)] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -4693,11 +4676,11 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { break } mem := l.Args[1] - if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(int64(c), int64(off))) { + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { break } v.reset(Op386XORLconstmodify) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -5283,8 +5266,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { return true } // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) - // cond: validOff(int64(off)) - // result: (MOVWstoreconst [makeValAndOff32(c,off)] {sym} ptr mem) + // result: (MOVWstoreconst [makeValAndOff(c,off)] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -5294,11 +5276,8 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool { } c := auxIntToInt32(v_1.AuxInt) mem := v_2 - if !(validOff(int64(off))) { - break - } v.reset(Op386MOVWstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -5487,7 +5466,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { } // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) // cond: x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x) - // result: (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p mem) + // result: (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem) for { c := auxIntToValAndOff(v.AuxInt) s := auxToSym(v.Aux) @@ -5505,14 +5484,14 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { break } v.reset(Op386MOVLstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xffff|c.Val()<<16), a.Off32())) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off())) v.Aux = symToAux(s) v.AddArg2(p, mem) return true } // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem)) // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) - // result: (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p mem) + // result: (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem) for { a := auxIntToValAndOff(v.AuxInt) s := auxToSym(v.Aux) @@ -5530,14 +5509,14 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { break } v.reset(Op386MOVLstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xffff|c.Val()<<16), a.Off32())) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off())) v.Aux = symToAux(s) v.AddArg2(p, mem) return true } // match: (MOVWstoreconst [c] {s} p1 x:(MOVWstoreconst [a] {s} p0 mem)) // cond: x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 2) && clobber(x) - // result: (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p0 mem) + // result: (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p0 mem) for { c := auxIntToValAndOff(v.AuxInt) s := auxToSym(v.Aux) @@ -5556,14 +5535,14 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { break } v.reset(Op386MOVLstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xffff|c.Val()<<16), a.Off32())) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off())) v.Aux = symToAux(s) v.AddArg2(p0, mem) return true } // match: (MOVWstoreconst [a] {s} p0 x:(MOVWstoreconst [c] {s} p1 mem)) // cond: x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 2) && clobber(x) - // result: (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p0 mem) + // result: (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p0 mem) for { a := auxIntToValAndOff(v.AuxInt) s := auxToSym(v.Aux) @@ -5582,7 +5561,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { break } v.reset(Op386MOVLstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xffff|c.Val()<<16), a.Off32())) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off())) v.Aux = symToAux(s) v.AddArg2(p0, mem) return true @@ -11571,7 +11550,7 @@ func rewriteValue386_OpZero(v *Value) bool { return true } // match: (Zero [3] destptr mem) - // result: (MOVBstoreconst [makeValAndOff32(0,2)] destptr (MOVWstoreconst [makeValAndOff32(0,0)] destptr mem)) + // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)) for { if auxIntToInt64(v.AuxInt) != 3 { break @@ -11579,15 +11558,15 @@ func rewriteValue386_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(Op386MOVBstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 2)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2)) v0 := b.NewValue0(v.Pos, Op386MOVWstoreconst, types.TypeMem) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v0.AddArg2(destptr, mem) v.AddArg2(destptr, v0) return true } // match: (Zero [5] destptr mem) - // result: (MOVBstoreconst [makeValAndOff32(0,4)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)) + // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) for { if auxIntToInt64(v.AuxInt) != 5 { break @@ -11595,15 +11574,15 @@ func rewriteValue386_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(Op386MOVBstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v0.AddArg2(destptr, mem) v.AddArg2(destptr, v0) return true } // match: (Zero [6] destptr mem) - // result: (MOVWstoreconst [makeValAndOff32(0,4)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)) + // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) for { if auxIntToInt64(v.AuxInt) != 6 { break @@ -11611,15 +11590,15 @@ func rewriteValue386_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(Op386MOVWstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v0.AddArg2(destptr, mem) v.AddArg2(destptr, v0) return true } // match: (Zero [7] destptr mem) - // result: (MOVLstoreconst [makeValAndOff32(0,3)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)) + // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) for { if auxIntToInt64(v.AuxInt) != 7 { break @@ -11627,9 +11606,9 @@ func rewriteValue386_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(Op386MOVLstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 3)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3)) v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v0.AddArg2(destptr, mem) v.AddArg2(destptr, v0) return true @@ -11656,7 +11635,7 @@ func rewriteValue386_OpZero(v *Value) bool { return true } // match: (Zero [8] destptr mem) - // result: (MOVLstoreconst [makeValAndOff32(0,4)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)) + // result: (MOVLstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) for { if auxIntToInt64(v.AuxInt) != 8 { break @@ -11664,15 +11643,15 @@ func rewriteValue386_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(Op386MOVLstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v0.AddArg2(destptr, mem) v.AddArg2(destptr, v0) return true } // match: (Zero [12] destptr mem) - // result: (MOVLstoreconst [makeValAndOff32(0,8)] destptr (MOVLstoreconst [makeValAndOff32(0,4)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))) + // result: (MOVLstoreconst [makeValAndOff(0,8)] destptr (MOVLstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))) for { if auxIntToInt64(v.AuxInt) != 12 { break @@ -11680,18 +11659,18 @@ func rewriteValue386_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(Op386MOVLstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 8)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) - v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v1.AddArg2(destptr, mem) v0.AddArg2(destptr, v1) v.AddArg2(destptr, v0) return true } // match: (Zero [16] destptr mem) - // result: (MOVLstoreconst [makeValAndOff32(0,12)] destptr (MOVLstoreconst [makeValAndOff32(0,8)] destptr (MOVLstoreconst [makeValAndOff32(0,4)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)))) + // result: (MOVLstoreconst [makeValAndOff(0,12)] destptr (MOVLstoreconst [makeValAndOff(0,8)] destptr (MOVLstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)))) for { if auxIntToInt64(v.AuxInt) != 16 { break @@ -11699,13 +11678,13 @@ func rewriteValue386_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(Op386MOVLstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 12)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 12)) v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 8)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) - v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4)) + v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) v2 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) - v2.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v2.AddArg2(destptr, mem) v1.AddArg2(destptr, v2) v0.AddArg2(destptr, v1) diff --git a/src/cmd/compile/internal/ssa/rewrite386splitload.go b/src/cmd/compile/internal/ssa/rewrite386splitload.go index fff26fa77e0..670e7f4f8f6 100644 --- a/src/cmd/compile/internal/ssa/rewrite386splitload.go +++ b/src/cmd/compile/internal/ssa/rewrite386splitload.go @@ -26,7 +26,7 @@ func rewriteValue386splitload_Op386CMPBconstload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (CMPBconstload {sym} [vo] ptr mem) - // result: (CMPBconst (MOVBload {sym} [vo.Off32()] ptr mem) [vo.Val8()]) + // result: (CMPBconst (MOVBload {sym} [vo.Off()] ptr mem) [vo.Val8()]) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -35,7 +35,7 @@ func rewriteValue386splitload_Op386CMPBconstload(v *Value) bool { v.reset(Op386CMPBconst) v.AuxInt = int8ToAuxInt(vo.Val8()) v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(vo.Off32()) + v0.AuxInt = int32ToAuxInt(vo.Off()) v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) v.AddArg(v0) @@ -71,16 +71,16 @@ func rewriteValue386splitload_Op386CMPLconstload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (CMPLconstload {sym} [vo] ptr mem) - // result: (CMPLconst (MOVLload {sym} [vo.Off32()] ptr mem) [vo.Val32()]) + // result: (CMPLconst (MOVLload {sym} [vo.Off()] ptr mem) [vo.Val()]) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 mem := v_1 v.reset(Op386CMPLconst) - v.AuxInt = int32ToAuxInt(vo.Val32()) + v.AuxInt = int32ToAuxInt(vo.Val()) v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(vo.Off32()) + v0.AuxInt = int32ToAuxInt(vo.Off()) v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) v.AddArg(v0) @@ -116,7 +116,7 @@ func rewriteValue386splitload_Op386CMPWconstload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (CMPWconstload {sym} [vo] ptr mem) - // result: (CMPWconst (MOVWload {sym} [vo.Off32()] ptr mem) [vo.Val16()]) + // result: (CMPWconst (MOVWload {sym} [vo.Off()] ptr mem) [vo.Val16()]) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -125,7 +125,7 @@ func rewriteValue386splitload_Op386CMPWconstload(v *Value) bool { v.reset(Op386CMPWconst) v.AuxInt = int16ToAuxInt(vo.Val16()) v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) - v0.AuxInt = int32ToAuxInt(vo.Off32()) + v0.AuxInt = int32ToAuxInt(vo.Off()) v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) v.AddArg(v0) @@ -156,7 +156,5 @@ func rewriteValue386splitload_Op386CMPWload(v *Value) bool { } } func rewriteBlock386splitload(b *Block) bool { - switch b.Kind { - } return false } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 03498c719c0..efb5d271451 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -3,8 +3,9 @@ package ssa +import "internal/buildcfg" import "math" -import "cmd/compile/internal/base" +import "cmd/internal/obj" import "cmd/compile/internal/types" func rewriteValueAMD64(v *Value) bool { @@ -67,44 +68,20 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64BSFQ(v) case OpAMD64BTCLconst: return rewriteValueAMD64_OpAMD64BTCLconst(v) - case OpAMD64BTCLconstmodify: - return rewriteValueAMD64_OpAMD64BTCLconstmodify(v) - case OpAMD64BTCLmodify: - return rewriteValueAMD64_OpAMD64BTCLmodify(v) case OpAMD64BTCQconst: return rewriteValueAMD64_OpAMD64BTCQconst(v) - case OpAMD64BTCQconstmodify: - return rewriteValueAMD64_OpAMD64BTCQconstmodify(v) - case OpAMD64BTCQmodify: - return rewriteValueAMD64_OpAMD64BTCQmodify(v) case OpAMD64BTLconst: return rewriteValueAMD64_OpAMD64BTLconst(v) case OpAMD64BTQconst: return rewriteValueAMD64_OpAMD64BTQconst(v) case OpAMD64BTRLconst: return rewriteValueAMD64_OpAMD64BTRLconst(v) - case OpAMD64BTRLconstmodify: - return rewriteValueAMD64_OpAMD64BTRLconstmodify(v) - case OpAMD64BTRLmodify: - return rewriteValueAMD64_OpAMD64BTRLmodify(v) case OpAMD64BTRQconst: return rewriteValueAMD64_OpAMD64BTRQconst(v) - case OpAMD64BTRQconstmodify: - return rewriteValueAMD64_OpAMD64BTRQconstmodify(v) - case OpAMD64BTRQmodify: - return rewriteValueAMD64_OpAMD64BTRQmodify(v) case OpAMD64BTSLconst: return rewriteValueAMD64_OpAMD64BTSLconst(v) - case OpAMD64BTSLconstmodify: - return rewriteValueAMD64_OpAMD64BTSLconstmodify(v) - case OpAMD64BTSLmodify: - return rewriteValueAMD64_OpAMD64BTSLmodify(v) case OpAMD64BTSQconst: return rewriteValueAMD64_OpAMD64BTSQconst(v) - case OpAMD64BTSQconstmodify: - return rewriteValueAMD64_OpAMD64BTSQconstmodify(v) - case OpAMD64BTSQmodify: - return rewriteValueAMD64_OpAMD64BTSQmodify(v) case OpAMD64CMOVLCC: return rewriteValueAMD64_OpAMD64CMOVLCC(v) case OpAMD64CMOVLCS: @@ -1088,6 +1065,9 @@ func rewriteValueAMD64(v *Value) bool { case OpSqrt: v.Op = OpAMD64SQRTSD return true + case OpSqrt32: + v.Op = OpAMD64SQRTSS + return true case OpStaticCall: v.Op = OpAMD64CALLstatic return true @@ -3517,105 +3497,6 @@ func rewriteValueAMD64_OpAMD64BTCLconst(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64BTCLconstmodify(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (BTCLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd32(off2) - // result: (BTCLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) - for { - valoff1 := auxIntToValAndOff(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - base := v_0.Args[0] - mem := v_1 - if !(ValAndOff(valoff1).canAdd32(off2)) { - break - } - v.reset(OpAMD64BTCLconstmodify) - v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) - v.Aux = symToAux(sym) - v.AddArg2(base, mem) - return true - } - // match: (BTCLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) - // result: (BTCLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) - for { - valoff1 := auxIntToValAndOff(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - base := v_0.Args[0] - mem := v_1 - if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64BTCLconstmodify) - v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) - v.Aux = symToAux(mergeSym(sym1, sym2)) - v.AddArg2(base, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64BTCLmodify(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (BTCLmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) - // result: (BTCLmodify [off1+off2] {sym} base val mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - base := v_0.Args[0] - val := v_1 - mem := v_2 - if !(is32Bit(int64(off1) + int64(off2))) { - break - } - v.reset(OpAMD64BTCLmodify) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(sym) - v.AddArg3(base, val, mem) - return true - } - // match: (BTCLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (BTCLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - base := v_0.Args[0] - val := v_1 - mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64BTCLmodify) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSym(sym1, sym2)) - v.AddArg3(base, val, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool { v_0 := v.Args[0] // match: (BTCQconst [c] (XORQconst [d] x)) @@ -3668,105 +3549,6 @@ func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64BTCQconstmodify(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (BTCQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd32(off2) - // result: (BTCQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) - for { - valoff1 := auxIntToValAndOff(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - base := v_0.Args[0] - mem := v_1 - if !(ValAndOff(valoff1).canAdd32(off2)) { - break - } - v.reset(OpAMD64BTCQconstmodify) - v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) - v.Aux = symToAux(sym) - v.AddArg2(base, mem) - return true - } - // match: (BTCQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) - // result: (BTCQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) - for { - valoff1 := auxIntToValAndOff(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - base := v_0.Args[0] - mem := v_1 - if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64BTCQconstmodify) - v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) - v.Aux = symToAux(mergeSym(sym1, sym2)) - v.AddArg2(base, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64BTCQmodify(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (BTCQmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) - // result: (BTCQmodify [off1+off2] {sym} base val mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - base := v_0.Args[0] - val := v_1 - mem := v_2 - if !(is32Bit(int64(off1) + int64(off2))) { - break - } - v.reset(OpAMD64BTCQmodify) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(sym) - v.AddArg3(base, val, mem) - return true - } - // match: (BTCQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (BTCQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - base := v_0.Args[0] - val := v_1 - mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64BTCQmodify) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSym(sym1, sym2)) - v.AddArg3(base, val, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool { v_0 := v.Args[0] // match: (BTLconst [c] (SHRQconst [d] x)) @@ -4001,105 +3783,6 @@ func rewriteValueAMD64_OpAMD64BTRLconst(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64BTRLconstmodify(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (BTRLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd32(off2) - // result: (BTRLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) - for { - valoff1 := auxIntToValAndOff(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - base := v_0.Args[0] - mem := v_1 - if !(ValAndOff(valoff1).canAdd32(off2)) { - break - } - v.reset(OpAMD64BTRLconstmodify) - v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) - v.Aux = symToAux(sym) - v.AddArg2(base, mem) - return true - } - // match: (BTRLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) - // result: (BTRLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) - for { - valoff1 := auxIntToValAndOff(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - base := v_0.Args[0] - mem := v_1 - if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64BTRLconstmodify) - v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) - v.Aux = symToAux(mergeSym(sym1, sym2)) - v.AddArg2(base, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64BTRLmodify(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (BTRLmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) - // result: (BTRLmodify [off1+off2] {sym} base val mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - base := v_0.Args[0] - val := v_1 - mem := v_2 - if !(is32Bit(int64(off1) + int64(off2))) { - break - } - v.reset(OpAMD64BTRLmodify) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(sym) - v.AddArg3(base, val, mem) - return true - } - // match: (BTRLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (BTRLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - base := v_0.Args[0] - val := v_1 - mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64BTRLmodify) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSym(sym1, sym2)) - v.AddArg3(base, val, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool { v_0 := v.Args[0] // match: (BTRQconst [c] (BTSQconst [c] x)) @@ -4178,105 +3861,6 @@ func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64BTRQconstmodify(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (BTRQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd32(off2) - // result: (BTRQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) - for { - valoff1 := auxIntToValAndOff(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - base := v_0.Args[0] - mem := v_1 - if !(ValAndOff(valoff1).canAdd32(off2)) { - break - } - v.reset(OpAMD64BTRQconstmodify) - v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) - v.Aux = symToAux(sym) - v.AddArg2(base, mem) - return true - } - // match: (BTRQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) - // result: (BTRQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) - for { - valoff1 := auxIntToValAndOff(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - base := v_0.Args[0] - mem := v_1 - if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64BTRQconstmodify) - v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) - v.Aux = symToAux(mergeSym(sym1, sym2)) - v.AddArg2(base, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64BTRQmodify(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (BTRQmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) - // result: (BTRQmodify [off1+off2] {sym} base val mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - base := v_0.Args[0] - val := v_1 - mem := v_2 - if !(is32Bit(int64(off1) + int64(off2))) { - break - } - v.reset(OpAMD64BTRQmodify) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(sym) - v.AddArg3(base, val, mem) - return true - } - // match: (BTRQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (BTRQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - base := v_0.Args[0] - val := v_1 - mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64BTRQmodify) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSym(sym1, sym2)) - v.AddArg3(base, val, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64BTSLconst(v *Value) bool { v_0 := v.Args[0] // match: (BTSLconst [c] (BTRLconst [c] x)) @@ -4347,105 +3931,6 @@ func rewriteValueAMD64_OpAMD64BTSLconst(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64BTSLconstmodify(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (BTSLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd32(off2) - // result: (BTSLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) - for { - valoff1 := auxIntToValAndOff(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - base := v_0.Args[0] - mem := v_1 - if !(ValAndOff(valoff1).canAdd32(off2)) { - break - } - v.reset(OpAMD64BTSLconstmodify) - v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) - v.Aux = symToAux(sym) - v.AddArg2(base, mem) - return true - } - // match: (BTSLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) - // result: (BTSLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) - for { - valoff1 := auxIntToValAndOff(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - base := v_0.Args[0] - mem := v_1 - if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64BTSLconstmodify) - v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) - v.Aux = symToAux(mergeSym(sym1, sym2)) - v.AddArg2(base, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64BTSLmodify(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (BTSLmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) - // result: (BTSLmodify [off1+off2] {sym} base val mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - base := v_0.Args[0] - val := v_1 - mem := v_2 - if !(is32Bit(int64(off1) + int64(off2))) { - break - } - v.reset(OpAMD64BTSLmodify) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(sym) - v.AddArg3(base, val, mem) - return true - } - // match: (BTSLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (BTSLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - base := v_0.Args[0] - val := v_1 - mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64BTSLmodify) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSym(sym1, sym2)) - v.AddArg3(base, val, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool { v_0 := v.Args[0] // match: (BTSQconst [c] (BTRQconst [c] x)) @@ -4524,105 +4009,6 @@ func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64BTSQconstmodify(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (BTSQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd32(off2) - // result: (BTSQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) - for { - valoff1 := auxIntToValAndOff(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - base := v_0.Args[0] - mem := v_1 - if !(ValAndOff(valoff1).canAdd32(off2)) { - break - } - v.reset(OpAMD64BTSQconstmodify) - v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) - v.Aux = symToAux(sym) - v.AddArg2(base, mem) - return true - } - // match: (BTSQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) - // result: (BTSQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) - for { - valoff1 := auxIntToValAndOff(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - base := v_0.Args[0] - mem := v_1 - if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64BTSQconstmodify) - v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) - v.Aux = symToAux(mergeSym(sym1, sym2)) - v.AddArg2(base, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64BTSQmodify(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (BTSQmodify [off1] {sym} (ADDQconst [off2] base) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) - // result: (BTSQmodify [off1+off2] {sym} base val mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - base := v_0.Args[0] - val := v_1 - mem := v_2 - if !(is32Bit(int64(off1) + int64(off2))) { - break - } - v.reset(OpAMD64BTSQmodify) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(sym) - v.AddArg3(base, val, mem) - return true - } - // match: (BTSQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) - // result: (BTSQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := auxIntToInt32(v_0.AuxInt) - sym2 := auxToSym(v_0.Aux) - base := v_0.Args[0] - val := v_1 - mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64BTSQmodify) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSym(sym1, sym2)) - v.AddArg3(base, val, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -6958,7 +6344,7 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool { } // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c]) // cond: l.Uses == 1 && clobber(l) - // result: @l.Block (CMPBconstload {sym} [makeValAndOff32(int32(c),off)] ptr mem) + // result: @l.Block (CMPBconstload {sym} [makeValAndOff(int32(c),off)] ptr mem) for { c := auxIntToInt8(v.AuxInt) l := v_0 @@ -6975,7 +6361,7 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool { b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) v.copyOf(v0) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true @@ -7080,8 +6466,7 @@ func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool { return true } // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) - // cond: validValAndOff(int64(int8(c)),int64(off)) - // result: (CMPBconstload {sym} [makeValAndOff32(int32(int8(c)),off)] ptr mem) + // result: (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -7091,11 +6476,8 @@ func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool { } c := auxIntToInt32(v_1.AuxInt) mem := v_2 - if !(validValAndOff(int64(int8(c)), int64(off))) { - break - } v.reset(OpAMD64CMPBconstload) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int8(c)), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -7359,7 +6741,7 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool { } // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c]) // cond: l.Uses == 1 && clobber(l) - // result: @l.Block (CMPLconstload {sym} [makeValAndOff32(c,off)] ptr mem) + // result: @l.Block (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) for { c := auxIntToInt32(v.AuxInt) l := v_0 @@ -7376,7 +6758,7 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool { b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) v.copyOf(v0) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true @@ -7481,8 +6863,7 @@ func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool { return true } // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem) - // cond: validValAndOff(int64(c),int64(off)) - // result: (CMPLconstload {sym} [makeValAndOff32(c,off)] ptr mem) + // result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -7492,11 +6873,8 @@ func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool { } c := auxIntToInt32(v_1.AuxInt) mem := v_2 - if !(validValAndOff(int64(c), int64(off))) { - break - } v.reset(OpAMD64CMPLconstload) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -7929,7 +7307,7 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { } // match: (CMPQconst l:(MOVQload {sym} [off] ptr mem) [c]) // cond: l.Uses == 1 && clobber(l) - // result: @l.Block (CMPQconstload {sym} [makeValAndOff32(c,off)] ptr mem) + // result: @l.Block (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem) for { c := auxIntToInt32(v.AuxInt) l := v_0 @@ -7946,7 +7324,7 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) v.copyOf(v0) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true @@ -8051,8 +7429,8 @@ func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool { return true } // match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem) - // cond: validValAndOff(c,int64(off)) - // result: (CMPQconstload {sym} [makeValAndOff64(c,int64(off))] ptr mem) + // cond: validVal(c) + // result: (CMPQconstload {sym} [makeValAndOff(int32(c),off)] ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -8062,11 +7440,11 @@ func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) mem := v_2 - if !(validValAndOff(c, int64(off))) { + if !(validVal(c)) { break } v.reset(OpAMD64CMPQconstload) - v.AuxInt = valAndOffToAuxInt(makeValAndOff64(c, int64(off))) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -8315,7 +7693,7 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool { } // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c]) // cond: l.Uses == 1 && clobber(l) - // result: @l.Block (CMPWconstload {sym} [makeValAndOff32(int32(c),off)] ptr mem) + // result: @l.Block (CMPWconstload {sym} [makeValAndOff(int32(c),off)] ptr mem) for { c := auxIntToInt16(v.AuxInt) l := v_0 @@ -8332,7 +7710,7 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool { b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) v.copyOf(v0) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true @@ -8437,8 +7815,7 @@ func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool { return true } // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) - // cond: validValAndOff(int64(int16(c)),int64(off)) - // result: (CMPWconstload {sym} [makeValAndOff32(int32(int16(c)),off)] ptr mem) + // result: (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -8448,11 +7825,8 @@ func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool { } c := auxIntToInt32(v_1.AuxInt) mem := v_2 - if !(validValAndOff(int64(int16(c)), int64(off))) { - break - } v.reset(OpAMD64CMPWconstload) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int16(c)), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -10596,7 +9970,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { return true } // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) - // result: (MOVBstoreconst [makeValAndOff32(int32(int8(c)),off)] {sym} ptr mem) + // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -10607,13 +9981,13 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { c := auxIntToInt32(v_1.AuxInt) mem := v_2 v.reset(OpAMD64MOVBstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int8(c)), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem) - // result: (MOVBstoreconst [makeValAndOff32(int32(int8(c)),off)] {sym} ptr mem) + // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -10624,7 +9998,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mem := v_2 v.reset(OpAMD64MOVBstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int8(c)), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -11411,6 +10785,56 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.AddArg3(p0, w0, mem) return true } + // match: (MOVBstore [7] {s} p1 (SHRQconst [56] w) x1:(MOVWstore [5] {s} p1 (SHRQconst [40] w) x2:(MOVLstore [1] {s} p1 (SHRQconst [8] w) x3:(MOVBstore [0] {s} p1 w mem)))) + // cond: x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && clobber(x1, x2, x3) + // result: (MOVQstore {s} p1 w mem) + for { + if auxIntToInt32(v.AuxInt) != 7 { + break + } + s := auxToSym(v.Aux) + p1 := v_0 + if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 56 { + break + } + w := v_1.Args[0] + x1 := v_2 + if x1.Op != OpAMD64MOVWstore || auxIntToInt32(x1.AuxInt) != 5 || auxToSym(x1.Aux) != s { + break + } + _ = x1.Args[2] + if p1 != x1.Args[0] { + break + } + x1_1 := x1.Args[1] + if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 40 || w != x1_1.Args[0] { + break + } + x2 := x1.Args[2] + if x2.Op != OpAMD64MOVLstore || auxIntToInt32(x2.AuxInt) != 1 || auxToSym(x2.Aux) != s { + break + } + _ = x2.Args[2] + if p1 != x2.Args[0] { + break + } + x2_1 := x2.Args[1] + if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] { + break + } + x3 := x2.Args[2] + if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != 0 || auxToSym(x3.Aux) != s { + break + } + mem := x3.Args[2] + if p1 != x3.Args[0] || w != x3.Args[1] || !(x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && clobber(x1, x2, x3)) { + break + } + v.reset(OpAMD64MOVQstore) + v.Aux = symToAux(s) + v.AddArg3(p1, w, mem) + return true + } // match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem)) // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2) // result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem) @@ -11547,7 +10971,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { } // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x) - // result: (MOVWstoreconst [makeValAndOff64(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem) + // result: (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem) for { c := auxIntToValAndOff(v.AuxInt) s := auxToSym(v.Aux) @@ -11565,14 +10989,14 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { break } v.reset(OpAMD64MOVWstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff64(a.Val()&0xff|c.Val()<<8, a.Off())) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off())) v.Aux = symToAux(s) v.AddArg2(p, mem) return true } // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem)) // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x) - // result: (MOVWstoreconst [makeValAndOff64(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem) + // result: (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem) for { a := auxIntToValAndOff(v.AuxInt) s := auxToSym(v.Aux) @@ -11590,7 +11014,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { break } v.reset(OpAMD64MOVWstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff64(a.Val()&0xff|c.Val()<<8, a.Off())) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off())) v.Aux = symToAux(s) v.AddArg2(p, mem) return true @@ -12204,7 +11628,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { return true } // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) - // result: (MOVLstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem) + // result: (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -12215,13 +11639,13 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { c := auxIntToInt32(v_1.AuxInt) mem := v_2 v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem) - // result: (MOVLstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem) + // result: (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -12232,7 +11656,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mem := v_2 v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -12709,87 +12133,9 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { } break } - // match: (MOVLstore {sym} [off] ptr y:(BTCL l:(MOVLload [off] {sym} ptr mem) x) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) - // result: (BTCLmodify [off] {sym} ptr x mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - ptr := v_0 - y := v_1 - if y.Op != OpAMD64BTCL { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { - break - } - mem := l.Args[1] - if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { - break - } - v.reset(OpAMD64BTCLmodify) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(ptr, x, mem) - return true - } - // match: (MOVLstore {sym} [off] ptr y:(BTRL l:(MOVLload [off] {sym} ptr mem) x) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) - // result: (BTRLmodify [off] {sym} ptr x mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - ptr := v_0 - y := v_1 - if y.Op != OpAMD64BTRL { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { - break - } - mem := l.Args[1] - if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { - break - } - v.reset(OpAMD64BTRLmodify) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(ptr, x, mem) - return true - } - // match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) x) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) - // result: (BTSLmodify [off] {sym} ptr x mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - ptr := v_0 - y := v_1 - if y.Op != OpAMD64BTSL { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { - break - } - mem := l.Args[1] - if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { - break - } - v.reset(OpAMD64BTSLmodify) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(ptr, x, mem) - return true - } // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) - // result: (ADDLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) + // result: (ADDLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -12805,18 +12151,18 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { break } v.reset(OpAMD64ADDLconstmodify) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) - // result: (ANDLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) + // result: (ANDLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -12832,18 +12178,18 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { break } v.reset(OpAMD64ANDLconstmodify) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) - // result: (ORLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) + // result: (ORLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -12859,18 +12205,18 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { break } v.reset(OpAMD64ORLconstmodify) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) - // result: (XORLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) + // result: (XORLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -12886,92 +12232,11 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { break } v.reset(OpAMD64XORLconstmodify) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) - v.Aux = symToAux(sym) - v.AddArg2(ptr, mem) - return true - } - // match: (MOVLstore [off] {sym} ptr a:(BTCLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) - // result: (BTCLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - ptr := v_0 - a := v_1 - if a.Op != OpAMD64BTCLconst { - break - } - c := auxIntToInt8(a.AuxInt) - l := a.Args[0] - if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { - break - } - mem := l.Args[1] - ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { - break - } - v.reset(OpAMD64BTCLconstmodify) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) - v.Aux = symToAux(sym) - v.AddArg2(ptr, mem) - return true - } - // match: (MOVLstore [off] {sym} ptr a:(BTRLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) - // result: (BTRLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - ptr := v_0 - a := v_1 - if a.Op != OpAMD64BTRLconst { - break - } - c := auxIntToInt8(a.AuxInt) - l := a.Args[0] - if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { - break - } - mem := l.Args[1] - ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { - break - } - v.reset(OpAMD64BTRLconstmodify) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) - v.Aux = symToAux(sym) - v.AddArg2(ptr, mem) - return true - } - // match: (MOVLstore [off] {sym} ptr a:(BTSLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) - // result: (BTSLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - ptr := v_0 - a := v_1 - if a.Op != OpAMD64BTSLconst { - break - } - c := auxIntToInt8(a.AuxInt) - l := a.Args[0] - if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { - break - } - mem := l.Args[1] - ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { - break - } - v.reset(OpAMD64BTSLconstmodify) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -13045,7 +12310,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { } // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) // cond: x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x) - // result: (MOVQstore [a.Off32()] {s} p (MOVQconst [a.Val()&0xffffffff | c.Val()<<32]) mem) + // result: (MOVQstore [a.Off()] {s} p (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem) for { c := auxIntToValAndOff(v.AuxInt) s := auxToSym(v.Aux) @@ -13063,16 +12328,16 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { break } v.reset(OpAMD64MOVQstore) - v.AuxInt = int32ToAuxInt(a.Off32()) + v.AuxInt = int32ToAuxInt(a.Off()) v.Aux = symToAux(s) v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(a.Val()&0xffffffff | c.Val()<<32) + v0.AuxInt = int64ToAuxInt(a.Val64()&0xffffffff | c.Val64()<<32) v.AddArg3(p, v0, mem) return true } // match: (MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem)) // cond: x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x) - // result: (MOVQstore [a.Off32()] {s} p (MOVQconst [a.Val()&0xffffffff | c.Val()<<32]) mem) + // result: (MOVQstore [a.Off()] {s} p (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem) for { a := auxIntToValAndOff(v.AuxInt) s := auxToSym(v.Aux) @@ -13090,10 +12355,10 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { break } v.reset(OpAMD64MOVQstore) - v.AuxInt = int32ToAuxInt(a.Off32()) + v.AuxInt = int32ToAuxInt(a.Off()) v.Aux = symToAux(s) v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(a.Val()&0xffffffff | c.Val()<<32) + v0.AuxInt = int64ToAuxInt(a.Val64()&0xffffffff | c.Val64()<<32) v.AddArg3(p, v0, mem) return true } @@ -13549,7 +12814,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { } // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) // cond: validVal(c) - // result: (MOVQstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem) + // result: (MOVQstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -13563,7 +12828,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { break } v.reset(OpAMD64MOVQstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -13890,87 +13155,9 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { } break } - // match: (MOVQstore {sym} [off] ptr y:(BTCQ l:(MOVQload [off] {sym} ptr mem) x) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) - // result: (BTCQmodify [off] {sym} ptr x mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - ptr := v_0 - y := v_1 - if y.Op != OpAMD64BTCQ { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { - break - } - mem := l.Args[1] - if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { - break - } - v.reset(OpAMD64BTCQmodify) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(ptr, x, mem) - return true - } - // match: (MOVQstore {sym} [off] ptr y:(BTRQ l:(MOVQload [off] {sym} ptr mem) x) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) - // result: (BTRQmodify [off] {sym} ptr x mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - ptr := v_0 - y := v_1 - if y.Op != OpAMD64BTRQ { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { - break - } - mem := l.Args[1] - if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { - break - } - v.reset(OpAMD64BTRQmodify) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(ptr, x, mem) - return true - } - // match: (MOVQstore {sym} [off] ptr y:(BTSQ l:(MOVQload [off] {sym} ptr mem) x) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) - // result: (BTSQmodify [off] {sym} ptr x mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - ptr := v_0 - y := v_1 - if y.Op != OpAMD64BTSQ { - break - } - x := y.Args[1] - l := y.Args[0] - if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { - break - } - mem := l.Args[1] - if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { - break - } - v.reset(OpAMD64BTSQmodify) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(ptr, x, mem) - return true - } // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) - // result: (ADDQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) + // result: (ADDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -13986,18 +13173,18 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { break } v.reset(OpAMD64ADDQconstmodify) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) - // result: (ANDQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) + // result: (ANDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -14013,18 +13200,18 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { break } v.reset(OpAMD64ANDQconstmodify) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) - // result: (ORQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) + // result: (ORQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -14040,18 +13227,18 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { break } v.reset(OpAMD64ORQconstmodify) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) - // result: (XORQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) + // result: (XORQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -14067,92 +13254,11 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { break } v.reset(OpAMD64XORQconstmodify) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) - v.Aux = symToAux(sym) - v.AddArg2(ptr, mem) - return true - } - // match: (MOVQstore [off] {sym} ptr a:(BTCQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) - // result: (BTCQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - ptr := v_0 - a := v_1 - if a.Op != OpAMD64BTCQconst { - break - } - c := auxIntToInt8(a.AuxInt) - l := a.Args[0] - if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { - break - } - mem := l.Args[1] - ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { - break - } - v.reset(OpAMD64BTCQconstmodify) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) - v.Aux = symToAux(sym) - v.AddArg2(ptr, mem) - return true - } - // match: (MOVQstore [off] {sym} ptr a:(BTRQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) - // result: (BTRQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - ptr := v_0 - a := v_1 - if a.Op != OpAMD64BTRQconst { - break - } - c := auxIntToInt8(a.AuxInt) - l := a.Args[0] - if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { - break - } - mem := l.Args[1] - ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { - break - } - v.reset(OpAMD64BTRQconstmodify) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) - v.Aux = symToAux(sym) - v.AddArg2(ptr, mem) - return true - } - // match: (MOVQstore [off] {sym} ptr a:(BTSQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) - // result: (BTSQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) - for { - off := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - ptr := v_0 - a := v_1 - if a.Op != OpAMD64BTSQconst { - break - } - c := auxIntToInt8(a.AuxInt) - l := a.Args[0] - if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { - break - } - mem := l.Args[1] - ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { - break - } - v.reset(OpAMD64BTSQconstmodify) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -14226,7 +13332,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { } // match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem)) // cond: config.useSSE && x.Uses == 1 && c2.Off() + 8 == c.Off() && c.Val() == 0 && c2.Val() == 0 && clobber(x) - // result: (MOVOstorezero [c2.Off32()] {s} p mem) + // result: (MOVOstorezero [c2.Off()] {s} p mem) for { c := auxIntToValAndOff(v.AuxInt) s := auxToSym(v.Aux) @@ -14244,7 +13350,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { break } v.reset(OpAMD64MOVOstorezero) - v.AuxInt = int32ToAuxInt(c2.Off32()) + v.AuxInt = int32ToAuxInt(c2.Off()) v.Aux = symToAux(s) v.AddArg2(p, mem) return true @@ -15031,7 +14137,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { return true } // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) - // result: (MOVWstoreconst [makeValAndOff32(int32(int16(c)),off)] {sym} ptr mem) + // result: (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -15042,13 +14148,13 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { c := auxIntToInt32(v_1.AuxInt) mem := v_2 v.reset(OpAMD64MOVWstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int16(c)), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem) - // result: (MOVWstoreconst [makeValAndOff32(int32(int16(c)),off)] {sym} ptr mem) + // result: (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -15059,7 +14165,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mem := v_2 v.reset(OpAMD64MOVWstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int16(c)), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -15441,7 +14547,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { } // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) // cond: x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x) - // result: (MOVLstoreconst [makeValAndOff64(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem) + // result: (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem) for { c := auxIntToValAndOff(v.AuxInt) s := auxToSym(v.Aux) @@ -15459,14 +14565,14 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { break } v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff64(a.Val()&0xffff|c.Val()<<16, a.Off())) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off())) v.Aux = symToAux(s) v.AddArg2(p, mem) return true } // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem)) // cond: x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x) - // result: (MOVLstoreconst [makeValAndOff64(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem) + // result: (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem) for { a := auxIntToValAndOff(v.AuxInt) s := auxToSym(v.Aux) @@ -15484,7 +14590,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { break } v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff64(a.Val()&0xffff|c.Val()<<16, a.Off())) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off())) v.Aux = symToAux(s) v.AddArg2(p, mem) return true @@ -18701,6 +17807,54 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } break } + // match: (ORQ (SHRQ lo bits) (SHLQ hi (NEGQ bits))) + // result: (SHRDQ lo hi bits) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64SHRQ { + continue + } + bits := v_0.Args[1] + lo := v_0.Args[0] + if v_1.Op != OpAMD64SHLQ { + continue + } + _ = v_1.Args[1] + hi := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] { + continue + } + v.reset(OpAMD64SHRDQ) + v.AddArg3(lo, hi, bits) + return true + } + break + } + // match: (ORQ (SHLQ lo bits) (SHRQ hi (NEGQ bits))) + // result: (SHLDQ lo hi bits) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64SHLQ { + continue + } + bits := v_0.Args[1] + lo := v_0.Args[0] + if v_1.Op != OpAMD64SHRQ { + continue + } + _ = v_1.Args[1] + hi := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] { + continue + } + v.reset(OpAMD64SHLDQ) + v.AddArg3(lo, hi, bits) + return true + } + break + } // match: (ORQ (MOVQconst [c]) (MOVQconst [d])) // result: (MOVQconst [c|d]) for { @@ -27046,8 +26200,8 @@ func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool { break } // match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2) - // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l) - // result: @l.Block (CMPBconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem) + // cond: l == l2 && l.Uses == 2 && clobber(l) + // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0, off)] ptr mem) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { l := v_0 @@ -27059,13 +26213,13 @@ func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool { mem := l.Args[1] ptr := l.Args[0] l2 := v_1 - if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) { + if !(l == l2 && l.Uses == 2 && clobber(l)) { continue } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) v.copyOf(v0) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off))) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off)) v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true @@ -27114,8 +26268,8 @@ func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool { break } // match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2) - // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l) - // result: @l.Block (CMPLconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem) + // cond: l == l2 && l.Uses == 2 && clobber(l) + // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0, off)] ptr mem) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { l := v_0 @@ -27127,19 +26281,46 @@ func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool { mem := l.Args[1] ptr := l.Args[0] l2 := v_1 - if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) { + if !(l == l2 && l.Uses == 2 && clobber(l)) { continue } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) v.copyOf(v0) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off))) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off)) v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } break } + // match: (TESTL a:(ANDLload [off] {sym} x ptr mem) a) + // cond: a.Uses == 2 && a.Block == v.Block && clobber(a) + // result: (TESTL (MOVLload [off] {sym} ptr mem) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + if a.Op != OpAMD64ANDLload { + continue + } + off := auxIntToInt32(a.AuxInt) + sym := auxToSym(a.Aux) + mem := a.Args[2] + x := a.Args[0] + ptr := a.Args[1] + if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) { + continue + } + v.reset(OpAMD64TESTL) + v0 := b.NewValue0(a.Pos, OpAMD64MOVLload, a.Type) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) + return true + } + break + } return false } func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool { @@ -27219,8 +26400,8 @@ func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool { break } // match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2) - // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l) - // result: @l.Block (CMPQconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem) + // cond: l == l2 && l.Uses == 2 && clobber(l) + // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0, off)] ptr mem) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { l := v_0 @@ -27232,19 +26413,46 @@ func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool { mem := l.Args[1] ptr := l.Args[0] l2 := v_1 - if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) { + if !(l == l2 && l.Uses == 2 && clobber(l)) { continue } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) v.copyOf(v0) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off))) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off)) v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } break } + // match: (TESTQ a:(ANDQload [off] {sym} x ptr mem) a) + // cond: a.Uses == 2 && a.Block == v.Block && clobber(a) + // result: (TESTQ (MOVQload [off] {sym} ptr mem) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + if a.Op != OpAMD64ANDQload { + continue + } + off := auxIntToInt32(a.AuxInt) + sym := auxToSym(a.Aux) + mem := a.Args[2] + x := a.Args[0] + ptr := a.Args[1] + if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) { + continue + } + v.reset(OpAMD64TESTQ) + v0 := b.NewValue0(a.Pos, OpAMD64MOVQload, a.Type) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) + return true + } + break + } return false } func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool { @@ -27332,8 +26540,8 @@ func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool { break } // match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2) - // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l) - // result: @l.Block (CMPWconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem) + // cond: l == l2 && l.Uses == 2 && clobber(l) + // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0, off)] ptr mem) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { l := v_0 @@ -27345,13 +26553,13 @@ func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool { mem := l.Args[1] ptr := l.Args[0] l2 := v_1 - if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) { + if !(l == l2 && l.Uses == 2 && clobber(l)) { continue } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) v.copyOf(v0) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off))) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off)) v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true @@ -30129,11 +29337,11 @@ func rewriteValueAMD64_OpFloor(v *Value) bool { func rewriteValueAMD64_OpGetG(v *Value) bool { v_0 := v.Args[0] // match: (GetG mem) - // cond: !base.Flag.ABIWrap + // cond: !(buildcfg.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal) // result: (LoweredGetG mem) for { mem := v_0 - if !(!base.Flag.ABIWrap) { + if !(!(buildcfg.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal)) { break } v.reset(OpAMD64LoweredGetG) @@ -33952,7 +33160,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool { return true } // match: (Zero [1] destptr mem) - // result: (MOVBstoreconst [makeValAndOff32(0,0)] destptr mem) + // result: (MOVBstoreconst [makeValAndOff(0,0)] destptr mem) for { if auxIntToInt64(v.AuxInt) != 1 { break @@ -33960,12 +33168,12 @@ func rewriteValueAMD64_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpAMD64MOVBstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v.AddArg2(destptr, mem) return true } // match: (Zero [2] destptr mem) - // result: (MOVWstoreconst [makeValAndOff32(0,0)] destptr mem) + // result: (MOVWstoreconst [makeValAndOff(0,0)] destptr mem) for { if auxIntToInt64(v.AuxInt) != 2 { break @@ -33973,12 +33181,12 @@ func rewriteValueAMD64_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpAMD64MOVWstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v.AddArg2(destptr, mem) return true } // match: (Zero [4] destptr mem) - // result: (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem) + // result: (MOVLstoreconst [makeValAndOff(0,0)] destptr mem) for { if auxIntToInt64(v.AuxInt) != 4 { break @@ -33986,12 +33194,12 @@ func rewriteValueAMD64_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v.AddArg2(destptr, mem) return true } // match: (Zero [8] destptr mem) - // result: (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem) + // result: (MOVQstoreconst [makeValAndOff(0,0)] destptr mem) for { if auxIntToInt64(v.AuxInt) != 8 { break @@ -33999,12 +33207,12 @@ func rewriteValueAMD64_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpAMD64MOVQstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v.AddArg2(destptr, mem) return true } // match: (Zero [3] destptr mem) - // result: (MOVBstoreconst [makeValAndOff32(0,2)] destptr (MOVWstoreconst [makeValAndOff32(0,0)] destptr mem)) + // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)) for { if auxIntToInt64(v.AuxInt) != 3 { break @@ -34012,15 +33220,15 @@ func rewriteValueAMD64_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpAMD64MOVBstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 2)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2)) v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v0.AddArg2(destptr, mem) v.AddArg2(destptr, v0) return true } // match: (Zero [5] destptr mem) - // result: (MOVBstoreconst [makeValAndOff32(0,4)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)) + // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) for { if auxIntToInt64(v.AuxInt) != 5 { break @@ -34028,15 +33236,15 @@ func rewriteValueAMD64_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpAMD64MOVBstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v0.AddArg2(destptr, mem) v.AddArg2(destptr, v0) return true } // match: (Zero [6] destptr mem) - // result: (MOVWstoreconst [makeValAndOff32(0,4)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)) + // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) for { if auxIntToInt64(v.AuxInt) != 6 { break @@ -34044,15 +33252,15 @@ func rewriteValueAMD64_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpAMD64MOVWstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v0.AddArg2(destptr, mem) v.AddArg2(destptr, v0) return true } // match: (Zero [7] destptr mem) - // result: (MOVLstoreconst [makeValAndOff32(0,3)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)) + // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) for { if auxIntToInt64(v.AuxInt) != 7 { break @@ -34060,16 +33268,16 @@ func rewriteValueAMD64_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 3)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3)) v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v0.AddArg2(destptr, mem) v.AddArg2(destptr, v0) return true } // match: (Zero [s] destptr mem) // cond: s%8 != 0 && s > 8 && !config.useSSE - // result: (Zero [s-s%8] (OffPtr destptr [s%8]) (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)) + // result: (Zero [s-s%8] (OffPtr destptr [s%8]) (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) for { s := auxIntToInt64(v.AuxInt) destptr := v_0 @@ -34083,14 +33291,14 @@ func rewriteValueAMD64_OpZero(v *Value) bool { v0.AuxInt = int64ToAuxInt(s % 8) v0.AddArg(destptr) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) - v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v1.AddArg2(destptr, mem) v.AddArg2(v0, v1) return true } // match: (Zero [16] destptr mem) // cond: !config.useSSE - // result: (MOVQstoreconst [makeValAndOff32(0,8)] destptr (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)) + // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) for { if auxIntToInt64(v.AuxInt) != 16 { break @@ -34101,16 +33309,16 @@ func rewriteValueAMD64_OpZero(v *Value) bool { break } v.reset(OpAMD64MOVQstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 8)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v0.AddArg2(destptr, mem) v.AddArg2(destptr, v0) return true } // match: (Zero [24] destptr mem) // cond: !config.useSSE - // result: (MOVQstoreconst [makeValAndOff32(0,16)] destptr (MOVQstoreconst [makeValAndOff32(0,8)] destptr (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))) + // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))) for { if auxIntToInt64(v.AuxInt) != 24 { break @@ -34121,11 +33329,11 @@ func rewriteValueAMD64_OpZero(v *Value) bool { break } v.reset(OpAMD64MOVQstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 16)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16)) v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 8)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) - v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v1.AddArg2(destptr, mem) v0.AddArg2(destptr, v1) v.AddArg2(destptr, v0) @@ -34133,7 +33341,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool { } // match: (Zero [32] destptr mem) // cond: !config.useSSE - // result: (MOVQstoreconst [makeValAndOff32(0,24)] destptr (MOVQstoreconst [makeValAndOff32(0,16)] destptr (MOVQstoreconst [makeValAndOff32(0,8)] destptr (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)))) + // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)))) for { if auxIntToInt64(v.AuxInt) != 32 { break @@ -34144,13 +33352,13 @@ func rewriteValueAMD64_OpZero(v *Value) bool { break } v.reset(OpAMD64MOVQstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 24)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 24)) v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 16)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16)) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) - v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 8)) + v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) - v2.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v2.AddArg2(destptr, mem) v1.AddArg2(destptr, v2) v0.AddArg2(destptr, v1) @@ -34159,7 +33367,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool { } // match: (Zero [s] destptr mem) // cond: s > 8 && s < 16 && config.useSSE - // result: (MOVQstoreconst [makeValAndOff32(0,int32(s-8))] destptr (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)) + // result: (MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) for { s := auxIntToInt64(v.AuxInt) destptr := v_0 @@ -34168,9 +33376,9 @@ func rewriteValueAMD64_OpZero(v *Value) bool { break } v.reset(OpAMD64MOVQstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, int32(s-8))) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, int32(s-8))) v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v0.AddArg2(destptr, mem) v.AddArg2(destptr, v0) return true @@ -34197,7 +33405,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool { } // match: (Zero [s] destptr mem) // cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE - // result: (Zero [s-s%16] (OffPtr destptr [s%16]) (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)) + // result: (Zero [s-s%16] (OffPtr destptr [s%16]) (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) for { s := auxIntToInt64(v.AuxInt) destptr := v_0 @@ -34211,7 +33419,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool { v0.AuxInt = int64ToAuxInt(s % 16) v0.AddArg(destptr) v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) - v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0)) + v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) v1.AddArg2(destptr, mem) v.AddArg2(v0, v1) return true diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go index 65bfec0f684..ae50aaa466f 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go @@ -59,7 +59,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstload(v *Value) bool { typ := &b.Func.Config.Types // match: (CMPBconstload {sym} [vo] ptr mem) // cond: vo.Val() == 0 - // result: (TESTB x:(MOVBload {sym} [vo.Off32()] ptr mem) x) + // result: (TESTB x:(MOVBload {sym} [vo.Off()] ptr mem) x) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -70,7 +70,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstload(v *Value) bool { } v.reset(OpAMD64TESTB) x := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - x.AuxInt = int32ToAuxInt(vo.Off32()) + x.AuxInt = int32ToAuxInt(vo.Off()) x.Aux = symToAux(sym) x.AddArg2(ptr, mem) v.AddArg2(x, x) @@ -78,7 +78,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstload(v *Value) bool { } // match: (CMPBconstload {sym} [vo] ptr mem) // cond: vo.Val() != 0 - // result: (CMPBconst (MOVBload {sym} [vo.Off32()] ptr mem) [vo.Val8()]) + // result: (CMPBconst (MOVBload {sym} [vo.Off()] ptr mem) [vo.Val8()]) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -90,7 +90,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstload(v *Value) bool { v.reset(OpAMD64CMPBconst) v.AuxInt = int8ToAuxInt(vo.Val8()) v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(vo.Off32()) + v0.AuxInt = int32ToAuxInt(vo.Off()) v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) v.AddArg(v0) @@ -106,7 +106,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v *Value) bool { typ := &b.Func.Config.Types // match: (CMPBconstloadidx1 {sym} [vo] ptr idx mem) // cond: vo.Val() == 0 - // result: (TESTB x:(MOVBloadidx1 {sym} [vo.Off32()] ptr idx mem) x) + // result: (TESTB x:(MOVBloadidx1 {sym} [vo.Off()] ptr idx mem) x) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -118,7 +118,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v *Value) bool { } v.reset(OpAMD64TESTB) x := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, typ.UInt8) - x.AuxInt = int32ToAuxInt(vo.Off32()) + x.AuxInt = int32ToAuxInt(vo.Off()) x.Aux = symToAux(sym) x.AddArg3(ptr, idx, mem) v.AddArg2(x, x) @@ -126,7 +126,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v *Value) bool { } // match: (CMPBconstloadidx1 {sym} [vo] ptr idx mem) // cond: vo.Val() != 0 - // result: (CMPBconst (MOVBloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val8()]) + // result: (CMPBconst (MOVBloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val8()]) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -139,7 +139,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v *Value) bool { v.reset(OpAMD64CMPBconst) v.AuxInt = int8ToAuxInt(vo.Val8()) v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, typ.UInt8) - v0.AuxInt = int32ToAuxInt(vo.Off32()) + v0.AuxInt = int32ToAuxInt(vo.Off()) v0.Aux = symToAux(sym) v0.AddArg3(ptr, idx, mem) v.AddArg(v0) @@ -202,7 +202,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstload(v *Value) bool { typ := &b.Func.Config.Types // match: (CMPLconstload {sym} [vo] ptr mem) // cond: vo.Val() == 0 - // result: (TESTL x:(MOVLload {sym} [vo.Off32()] ptr mem) x) + // result: (TESTL x:(MOVLload {sym} [vo.Off()] ptr mem) x) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -213,7 +213,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstload(v *Value) bool { } v.reset(OpAMD64TESTL) x := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - x.AuxInt = int32ToAuxInt(vo.Off32()) + x.AuxInt = int32ToAuxInt(vo.Off()) x.Aux = symToAux(sym) x.AddArg2(ptr, mem) v.AddArg2(x, x) @@ -221,7 +221,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstload(v *Value) bool { } // match: (CMPLconstload {sym} [vo] ptr mem) // cond: vo.Val() != 0 - // result: (CMPLconst (MOVLload {sym} [vo.Off32()] ptr mem) [vo.Val32()]) + // result: (CMPLconst (MOVLload {sym} [vo.Off()] ptr mem) [vo.Val()]) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -231,9 +231,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstload(v *Value) bool { break } v.reset(OpAMD64CMPLconst) - v.AuxInt = int32ToAuxInt(vo.Val32()) + v.AuxInt = int32ToAuxInt(vo.Val()) v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(vo.Off32()) + v0.AuxInt = int32ToAuxInt(vo.Off()) v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) v.AddArg(v0) @@ -249,7 +249,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v *Value) bool { typ := &b.Func.Config.Types // match: (CMPLconstloadidx1 {sym} [vo] ptr idx mem) // cond: vo.Val() == 0 - // result: (TESTL x:(MOVLloadidx1 {sym} [vo.Off32()] ptr idx mem) x) + // result: (TESTL x:(MOVLloadidx1 {sym} [vo.Off()] ptr idx mem) x) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -261,7 +261,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v *Value) bool { } v.reset(OpAMD64TESTL) x := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - x.AuxInt = int32ToAuxInt(vo.Off32()) + x.AuxInt = int32ToAuxInt(vo.Off()) x.Aux = symToAux(sym) x.AddArg3(ptr, idx, mem) v.AddArg2(x, x) @@ -269,7 +269,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v *Value) bool { } // match: (CMPLconstloadidx1 {sym} [vo] ptr idx mem) // cond: vo.Val() != 0 - // result: (CMPLconst (MOVLloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()]) + // result: (CMPLconst (MOVLloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val()]) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -280,9 +280,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v *Value) bool { break } v.reset(OpAMD64CMPLconst) - v.AuxInt = int32ToAuxInt(vo.Val32()) + v.AuxInt = int32ToAuxInt(vo.Val()) v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) - v0.AuxInt = int32ToAuxInt(vo.Off32()) + v0.AuxInt = int32ToAuxInt(vo.Off()) v0.Aux = symToAux(sym) v0.AddArg3(ptr, idx, mem) v.AddArg(v0) @@ -298,7 +298,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v *Value) bool { typ := &b.Func.Config.Types // match: (CMPLconstloadidx4 {sym} [vo] ptr idx mem) // cond: vo.Val() == 0 - // result: (TESTL x:(MOVLloadidx4 {sym} [vo.Off32()] ptr idx mem) x) + // result: (TESTL x:(MOVLloadidx4 {sym} [vo.Off()] ptr idx mem) x) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -310,7 +310,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v *Value) bool { } v.reset(OpAMD64TESTL) x := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, typ.UInt32) - x.AuxInt = int32ToAuxInt(vo.Off32()) + x.AuxInt = int32ToAuxInt(vo.Off()) x.Aux = symToAux(sym) x.AddArg3(ptr, idx, mem) v.AddArg2(x, x) @@ -318,7 +318,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v *Value) bool { } // match: (CMPLconstloadidx4 {sym} [vo] ptr idx mem) // cond: vo.Val() != 0 - // result: (CMPLconst (MOVLloadidx4 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()]) + // result: (CMPLconst (MOVLloadidx4 {sym} [vo.Off()] ptr idx mem) [vo.Val()]) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -329,9 +329,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v *Value) bool { break } v.reset(OpAMD64CMPLconst) - v.AuxInt = int32ToAuxInt(vo.Val32()) + v.AuxInt = int32ToAuxInt(vo.Val()) v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, typ.UInt32) - v0.AuxInt = int32ToAuxInt(vo.Off32()) + v0.AuxInt = int32ToAuxInt(vo.Off()) v0.Aux = symToAux(sym) v0.AddArg3(ptr, idx, mem) v.AddArg(v0) @@ -419,7 +419,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstload(v *Value) bool { typ := &b.Func.Config.Types // match: (CMPQconstload {sym} [vo] ptr mem) // cond: vo.Val() == 0 - // result: (TESTQ x:(MOVQload {sym} [vo.Off32()] ptr mem) x) + // result: (TESTQ x:(MOVQload {sym} [vo.Off()] ptr mem) x) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -430,7 +430,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstload(v *Value) bool { } v.reset(OpAMD64TESTQ) x := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - x.AuxInt = int32ToAuxInt(vo.Off32()) + x.AuxInt = int32ToAuxInt(vo.Off()) x.Aux = symToAux(sym) x.AddArg2(ptr, mem) v.AddArg2(x, x) @@ -438,7 +438,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstload(v *Value) bool { } // match: (CMPQconstload {sym} [vo] ptr mem) // cond: vo.Val() != 0 - // result: (CMPQconst (MOVQload {sym} [vo.Off32()] ptr mem) [vo.Val32()]) + // result: (CMPQconst (MOVQload {sym} [vo.Off()] ptr mem) [vo.Val()]) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -448,9 +448,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstload(v *Value) bool { break } v.reset(OpAMD64CMPQconst) - v.AuxInt = int32ToAuxInt(vo.Val32()) + v.AuxInt = int32ToAuxInt(vo.Val()) v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AuxInt = int32ToAuxInt(vo.Off32()) + v0.AuxInt = int32ToAuxInt(vo.Off()) v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) v.AddArg(v0) @@ -466,7 +466,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v *Value) bool { typ := &b.Func.Config.Types // match: (CMPQconstloadidx1 {sym} [vo] ptr idx mem) // cond: vo.Val() == 0 - // result: (TESTQ x:(MOVQloadidx1 {sym} [vo.Off32()] ptr idx mem) x) + // result: (TESTQ x:(MOVQloadidx1 {sym} [vo.Off()] ptr idx mem) x) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -478,7 +478,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v *Value) bool { } v.reset(OpAMD64TESTQ) x := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - x.AuxInt = int32ToAuxInt(vo.Off32()) + x.AuxInt = int32ToAuxInt(vo.Off()) x.Aux = symToAux(sym) x.AddArg3(ptr, idx, mem) v.AddArg2(x, x) @@ -486,7 +486,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v *Value) bool { } // match: (CMPQconstloadidx1 {sym} [vo] ptr idx mem) // cond: vo.Val() != 0 - // result: (CMPQconst (MOVQloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()]) + // result: (CMPQconst (MOVQloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val()]) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -497,9 +497,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v *Value) bool { break } v.reset(OpAMD64CMPQconst) - v.AuxInt = int32ToAuxInt(vo.Val32()) + v.AuxInt = int32ToAuxInt(vo.Val()) v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) - v0.AuxInt = int32ToAuxInt(vo.Off32()) + v0.AuxInt = int32ToAuxInt(vo.Off()) v0.Aux = symToAux(sym) v0.AddArg3(ptr, idx, mem) v.AddArg(v0) @@ -515,7 +515,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v *Value) bool { typ := &b.Func.Config.Types // match: (CMPQconstloadidx8 {sym} [vo] ptr idx mem) // cond: vo.Val() == 0 - // result: (TESTQ x:(MOVQloadidx8 {sym} [vo.Off32()] ptr idx mem) x) + // result: (TESTQ x:(MOVQloadidx8 {sym} [vo.Off()] ptr idx mem) x) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -527,7 +527,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v *Value) bool { } v.reset(OpAMD64TESTQ) x := b.NewValue0(v.Pos, OpAMD64MOVQloadidx8, typ.UInt64) - x.AuxInt = int32ToAuxInt(vo.Off32()) + x.AuxInt = int32ToAuxInt(vo.Off()) x.Aux = symToAux(sym) x.AddArg3(ptr, idx, mem) v.AddArg2(x, x) @@ -535,7 +535,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v *Value) bool { } // match: (CMPQconstloadidx8 {sym} [vo] ptr idx mem) // cond: vo.Val() != 0 - // result: (CMPQconst (MOVQloadidx8 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()]) + // result: (CMPQconst (MOVQloadidx8 {sym} [vo.Off()] ptr idx mem) [vo.Val()]) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -546,9 +546,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v *Value) bool { break } v.reset(OpAMD64CMPQconst) - v.AuxInt = int32ToAuxInt(vo.Val32()) + v.AuxInt = int32ToAuxInt(vo.Val()) v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx8, typ.UInt64) - v0.AuxInt = int32ToAuxInt(vo.Off32()) + v0.AuxInt = int32ToAuxInt(vo.Off()) v0.Aux = symToAux(sym) v0.AddArg3(ptr, idx, mem) v.AddArg(v0) @@ -636,7 +636,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstload(v *Value) bool { typ := &b.Func.Config.Types // match: (CMPWconstload {sym} [vo] ptr mem) // cond: vo.Val() == 0 - // result: (TESTW x:(MOVWload {sym} [vo.Off32()] ptr mem) x) + // result: (TESTW x:(MOVWload {sym} [vo.Off()] ptr mem) x) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -647,7 +647,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstload(v *Value) bool { } v.reset(OpAMD64TESTW) x := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - x.AuxInt = int32ToAuxInt(vo.Off32()) + x.AuxInt = int32ToAuxInt(vo.Off()) x.Aux = symToAux(sym) x.AddArg2(ptr, mem) v.AddArg2(x, x) @@ -655,7 +655,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstload(v *Value) bool { } // match: (CMPWconstload {sym} [vo] ptr mem) // cond: vo.Val() != 0 - // result: (CMPWconst (MOVWload {sym} [vo.Off32()] ptr mem) [vo.Val16()]) + // result: (CMPWconst (MOVWload {sym} [vo.Off()] ptr mem) [vo.Val16()]) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -667,7 +667,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstload(v *Value) bool { v.reset(OpAMD64CMPWconst) v.AuxInt = int16ToAuxInt(vo.Val16()) v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AuxInt = int32ToAuxInt(vo.Off32()) + v0.AuxInt = int32ToAuxInt(vo.Off()) v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) v.AddArg(v0) @@ -683,7 +683,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v *Value) bool { typ := &b.Func.Config.Types // match: (CMPWconstloadidx1 {sym} [vo] ptr idx mem) // cond: vo.Val() == 0 - // result: (TESTW x:(MOVWloadidx1 {sym} [vo.Off32()] ptr idx mem) x) + // result: (TESTW x:(MOVWloadidx1 {sym} [vo.Off()] ptr idx mem) x) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -695,7 +695,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v *Value) bool { } v.reset(OpAMD64TESTW) x := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - x.AuxInt = int32ToAuxInt(vo.Off32()) + x.AuxInt = int32ToAuxInt(vo.Off()) x.Aux = symToAux(sym) x.AddArg3(ptr, idx, mem) v.AddArg2(x, x) @@ -703,7 +703,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v *Value) bool { } // match: (CMPWconstloadidx1 {sym} [vo] ptr idx mem) // cond: vo.Val() != 0 - // result: (CMPWconst (MOVWloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val16()]) + // result: (CMPWconst (MOVWloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val16()]) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -716,7 +716,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v *Value) bool { v.reset(OpAMD64CMPWconst) v.AuxInt = int16ToAuxInt(vo.Val16()) v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) - v0.AuxInt = int32ToAuxInt(vo.Off32()) + v0.AuxInt = int32ToAuxInt(vo.Off()) v0.Aux = symToAux(sym) v0.AddArg3(ptr, idx, mem) v.AddArg(v0) @@ -732,7 +732,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v *Value) bool { typ := &b.Func.Config.Types // match: (CMPWconstloadidx2 {sym} [vo] ptr idx mem) // cond: vo.Val() == 0 - // result: (TESTW x:(MOVWloadidx2 {sym} [vo.Off32()] ptr idx mem) x) + // result: (TESTW x:(MOVWloadidx2 {sym} [vo.Off()] ptr idx mem) x) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -744,7 +744,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v *Value) bool { } v.reset(OpAMD64TESTW) x := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, typ.UInt16) - x.AuxInt = int32ToAuxInt(vo.Off32()) + x.AuxInt = int32ToAuxInt(vo.Off()) x.Aux = symToAux(sym) x.AddArg3(ptr, idx, mem) v.AddArg2(x, x) @@ -752,7 +752,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v *Value) bool { } // match: (CMPWconstloadidx2 {sym} [vo] ptr idx mem) // cond: vo.Val() != 0 - // result: (CMPWconst (MOVWloadidx2 {sym} [vo.Off32()] ptr idx mem) [vo.Val16()]) + // result: (CMPWconst (MOVWloadidx2 {sym} [vo.Off()] ptr idx mem) [vo.Val16()]) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) @@ -765,7 +765,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v *Value) bool { v.reset(OpAMD64CMPWconst) v.AuxInt = int16ToAuxInt(vo.Val16()) v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, typ.UInt16) - v0.AuxInt = int32ToAuxInt(vo.Off32()) + v0.AuxInt = int32ToAuxInt(vo.Off()) v0.Aux = symToAux(sym) v0.AddArg3(ptr, idx, mem) v.AddArg(v0) @@ -847,7 +847,5 @@ func rewriteValueAMD64splitload_OpAMD64CMPWloadidx2(v *Value) bool { } } func rewriteBlockAMD64splitload(b *Block) bool { - switch b.Kind { - } return false } diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index c958aae2c4a..febb5566e33 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -3,7 +3,7 @@ package ssa -import "cmd/internal/objabi" +import "internal/buildcfg" import "cmd/compile/internal/types" func rewriteValueARM(v *Value) bool { @@ -202,6 +202,8 @@ func rewriteValueARM(v *Value) bool { return rewriteValueARM_OpARMMOVWloadshiftRA(v) case OpARMMOVWloadshiftRL: return rewriteValueARM_OpARMMOVWloadshiftRL(v) + case OpARMMOVWnop: + return rewriteValueARM_OpARMMOVWnop(v) case OpARMMOVWreg: return rewriteValueARM_OpARMMOVWreg(v) case OpARMMOVWstore: @@ -821,6 +823,9 @@ func rewriteValueARM(v *Value) bool { case OpSqrt: v.Op = OpARMSQRTD return true + case OpSqrt32: + v.Op = OpARMSQRTF + return true case OpStaticCall: v.Op = OpARMCALLstatic return true @@ -1470,7 +1475,7 @@ func rewriteValueARM_OpARMADDD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ADDD a (MULD x y)) - // cond: a.Uses == 1 && objabi.GOARM >= 6 + // cond: a.Uses == 1 && buildcfg.GOARM >= 6 // result: (MULAD a x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -1480,7 +1485,7 @@ func rewriteValueARM_OpARMADDD(v *Value) bool { } y := v_1.Args[1] x := v_1.Args[0] - if !(a.Uses == 1 && objabi.GOARM >= 6) { + if !(a.Uses == 1 && buildcfg.GOARM >= 6) { continue } v.reset(OpARMMULAD) @@ -1490,7 +1495,7 @@ func rewriteValueARM_OpARMADDD(v *Value) bool { break } // match: (ADDD a (NMULD x y)) - // cond: a.Uses == 1 && objabi.GOARM >= 6 + // cond: a.Uses == 1 && buildcfg.GOARM >= 6 // result: (MULSD a x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -1500,7 +1505,7 @@ func rewriteValueARM_OpARMADDD(v *Value) bool { } y := v_1.Args[1] x := v_1.Args[0] - if !(a.Uses == 1 && objabi.GOARM >= 6) { + if !(a.Uses == 1 && buildcfg.GOARM >= 6) { continue } v.reset(OpARMMULSD) @@ -1515,7 +1520,7 @@ func rewriteValueARM_OpARMADDF(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ADDF a (MULF x y)) - // cond: a.Uses == 1 && objabi.GOARM >= 6 + // cond: a.Uses == 1 && buildcfg.GOARM >= 6 // result: (MULAF a x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -1525,7 +1530,7 @@ func rewriteValueARM_OpARMADDF(v *Value) bool { } y := v_1.Args[1] x := v_1.Args[0] - if !(a.Uses == 1 && objabi.GOARM >= 6) { + if !(a.Uses == 1 && buildcfg.GOARM >= 6) { continue } v.reset(OpARMMULAF) @@ -1535,7 +1540,7 @@ func rewriteValueARM_OpARMADDF(v *Value) bool { break } // match: (ADDF a (NMULF x y)) - // cond: a.Uses == 1 && objabi.GOARM >= 6 + // cond: a.Uses == 1 && buildcfg.GOARM >= 6 // result: (MULSF a x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -1545,7 +1550,7 @@ func rewriteValueARM_OpARMADDF(v *Value) bool { } y := v_1.Args[1] x := v_1.Args[0] - if !(a.Uses == 1 && objabi.GOARM >= 6) { + if !(a.Uses == 1 && buildcfg.GOARM >= 6) { continue } v.reset(OpARMMULSF) @@ -1941,12 +1946,12 @@ func rewriteValueARM_OpARMADDconst(v *Value) bool { return true } // match: (ADDconst [c] x) - // cond: objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff + // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff // result: (SUBconst [-c] x) for { c := auxIntToInt32(v.AuxInt) x := v_0 - if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) { + if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) { break } v.reset(OpARMSUBconst) @@ -2077,7 +2082,7 @@ func rewriteValueARM_OpARMADDshiftLL(v *Value) bool { return true } // match: (ADDshiftLL [8] (SRLconst [24] (SLLconst [16] x)) x) - // cond: objabi.GOARM>=6 + // cond: buildcfg.GOARM>=6 // result: (REV16 x) for { if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 { @@ -2088,7 +2093,7 @@ func rewriteValueARM_OpARMADDshiftLL(v *Value) bool { break } x := v_0_0.Args[0] - if x != v_1 || !(objabi.GOARM >= 6) { + if x != v_1 || !(buildcfg.GOARM >= 6) { break } v.reset(OpARMREV16) @@ -2533,12 +2538,12 @@ func rewriteValueARM_OpARMANDconst(v *Value) bool { return true } // match: (ANDconst [c] x) - // cond: objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff + // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff // result: (BICconst [int32(^uint32(c))] x) for { c := auxIntToInt32(v.AuxInt) x := v_0 - if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) { + if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) { break } v.reset(OpARMBICconst) @@ -3028,12 +3033,12 @@ func rewriteValueARM_OpARMBICconst(v *Value) bool { return true } // match: (BICconst [c] x) - // cond: objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff + // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff // result: (ANDconst [int32(^uint32(c))] x) for { c := auxIntToInt32(v.AuxInt) x := v_0 - if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) { + if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) { break } v.reset(OpARMANDconst) @@ -6501,6 +6506,21 @@ func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value) bool { } return false } +func rewriteValueARM_OpARMMOVWnop(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVWnop (MOVWconst [c])) + // result: (MOVWconst [c]) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(c) + return true + } + return false +} func rewriteValueARM_OpARMMOVWreg(v *Value) bool { v_0 := v.Args[0] // match: (MOVWreg x) @@ -7521,7 +7541,7 @@ func rewriteValueARM_OpARMMULD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MULD (NEGD x) y) - // cond: objabi.GOARM >= 6 + // cond: buildcfg.GOARM >= 6 // result: (NMULD x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -7530,7 +7550,7 @@ func rewriteValueARM_OpARMMULD(v *Value) bool { } x := v_0.Args[0] y := v_1 - if !(objabi.GOARM >= 6) { + if !(buildcfg.GOARM >= 6) { continue } v.reset(OpARMNMULD) @@ -7545,7 +7565,7 @@ func rewriteValueARM_OpARMMULF(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MULF (NEGF x) y) - // cond: objabi.GOARM >= 6 + // cond: buildcfg.GOARM >= 6 // result: (NMULF x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -7554,7 +7574,7 @@ func rewriteValueARM_OpARMMULF(v *Value) bool { } x := v_0.Args[0] y := v_1 - if !(objabi.GOARM >= 6) { + if !(buildcfg.GOARM >= 6) { continue } v.reset(OpARMNMULF) @@ -8166,7 +8186,7 @@ func rewriteValueARM_OpARMMVNshiftRLreg(v *Value) bool { func rewriteValueARM_OpARMNEGD(v *Value) bool { v_0 := v.Args[0] // match: (NEGD (MULD x y)) - // cond: objabi.GOARM >= 6 + // cond: buildcfg.GOARM >= 6 // result: (NMULD x y) for { if v_0.Op != OpARMMULD { @@ -8174,7 +8194,7 @@ func rewriteValueARM_OpARMNEGD(v *Value) bool { } y := v_0.Args[1] x := v_0.Args[0] - if !(objabi.GOARM >= 6) { + if !(buildcfg.GOARM >= 6) { break } v.reset(OpARMNMULD) @@ -8186,7 +8206,7 @@ func rewriteValueARM_OpARMNEGD(v *Value) bool { func rewriteValueARM_OpARMNEGF(v *Value) bool { v_0 := v.Args[0] // match: (NEGF (MULF x y)) - // cond: objabi.GOARM >= 6 + // cond: buildcfg.GOARM >= 6 // result: (NMULF x y) for { if v_0.Op != OpARMMULF { @@ -8194,7 +8214,7 @@ func rewriteValueARM_OpARMNEGF(v *Value) bool { } y := v_0.Args[1] x := v_0.Args[0] - if !(objabi.GOARM >= 6) { + if !(buildcfg.GOARM >= 6) { break } v.reset(OpARMNMULF) @@ -8518,7 +8538,7 @@ func rewriteValueARM_OpARMORshiftLL(v *Value) bool { return true } // match: (ORshiftLL [8] (SRLconst [24] (SLLconst [16] x)) x) - // cond: objabi.GOARM>=6 + // cond: buildcfg.GOARM>=6 // result: (REV16 x) for { if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 { @@ -8529,7 +8549,7 @@ func rewriteValueARM_OpARMORshiftLL(v *Value) bool { break } x := v_0_0.Args[0] - if x != v_1 || !(objabi.GOARM >= 6) { + if x != v_1 || !(buildcfg.GOARM >= 6) { break } v.reset(OpARMREV16) @@ -8993,7 +9013,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { return true } // match: (RSB (MUL x y) a) - // cond: objabi.GOARM == 7 + // cond: buildcfg.GOARM == 7 // result: (MULS x y a) for { if v_0.Op != OpARMMUL { @@ -9002,7 +9022,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] a := v_1 - if !(objabi.GOARM == 7) { + if !(buildcfg.GOARM == 7) { break } v.reset(OpARMMULS) @@ -10429,7 +10449,7 @@ func rewriteValueARM_OpARMSRAconst(v *Value) bool { return true } // match: (SRAconst (SLLconst x [c]) [d]) - // cond: objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 + // cond: buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 // result: (BFX [(d-c)|(32-d)<<8] x) for { d := auxIntToInt32(v.AuxInt) @@ -10438,7 +10458,7 @@ func rewriteValueARM_OpARMSRAconst(v *Value) bool { } c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(objabi.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) { + if !(buildcfg.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) { break } v.reset(OpARMBFX) @@ -10481,7 +10501,7 @@ func rewriteValueARM_OpARMSRLconst(v *Value) bool { return true } // match: (SRLconst (SLLconst x [c]) [d]) - // cond: objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 + // cond: buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 // result: (BFXU [(d-c)|(32-d)<<8] x) for { d := auxIntToInt32(v.AuxInt) @@ -10490,7 +10510,7 @@ func rewriteValueARM_OpARMSRLconst(v *Value) bool { } c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(objabi.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) { + if !(buildcfg.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) { break } v.reset(OpARMBFXU) @@ -10703,7 +10723,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { return true } // match: (SUB a (MUL x y)) - // cond: objabi.GOARM == 7 + // cond: buildcfg.GOARM == 7 // result: (MULS x y a) for { a := v_0 @@ -10712,7 +10732,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { } y := v_1.Args[1] x := v_1.Args[0] - if !(objabi.GOARM == 7) { + if !(buildcfg.GOARM == 7) { break } v.reset(OpARMMULS) @@ -10725,7 +10745,7 @@ func rewriteValueARM_OpARMSUBD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SUBD a (MULD x y)) - // cond: a.Uses == 1 && objabi.GOARM >= 6 + // cond: a.Uses == 1 && buildcfg.GOARM >= 6 // result: (MULSD a x y) for { a := v_0 @@ -10734,7 +10754,7 @@ func rewriteValueARM_OpARMSUBD(v *Value) bool { } y := v_1.Args[1] x := v_1.Args[0] - if !(a.Uses == 1 && objabi.GOARM >= 6) { + if !(a.Uses == 1 && buildcfg.GOARM >= 6) { break } v.reset(OpARMMULSD) @@ -10742,7 +10762,7 @@ func rewriteValueARM_OpARMSUBD(v *Value) bool { return true } // match: (SUBD a (NMULD x y)) - // cond: a.Uses == 1 && objabi.GOARM >= 6 + // cond: a.Uses == 1 && buildcfg.GOARM >= 6 // result: (MULAD a x y) for { a := v_0 @@ -10751,7 +10771,7 @@ func rewriteValueARM_OpARMSUBD(v *Value) bool { } y := v_1.Args[1] x := v_1.Args[0] - if !(a.Uses == 1 && objabi.GOARM >= 6) { + if !(a.Uses == 1 && buildcfg.GOARM >= 6) { break } v.reset(OpARMMULAD) @@ -10764,7 +10784,7 @@ func rewriteValueARM_OpARMSUBF(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SUBF a (MULF x y)) - // cond: a.Uses == 1 && objabi.GOARM >= 6 + // cond: a.Uses == 1 && buildcfg.GOARM >= 6 // result: (MULSF a x y) for { a := v_0 @@ -10773,7 +10793,7 @@ func rewriteValueARM_OpARMSUBF(v *Value) bool { } y := v_1.Args[1] x := v_1.Args[0] - if !(a.Uses == 1 && objabi.GOARM >= 6) { + if !(a.Uses == 1 && buildcfg.GOARM >= 6) { break } v.reset(OpARMMULSF) @@ -10781,7 +10801,7 @@ func rewriteValueARM_OpARMSUBF(v *Value) bool { return true } // match: (SUBF a (NMULF x y)) - // cond: a.Uses == 1 && objabi.GOARM >= 6 + // cond: a.Uses == 1 && buildcfg.GOARM >= 6 // result: (MULAF a x y) for { a := v_0 @@ -10790,7 +10810,7 @@ func rewriteValueARM_OpARMSUBF(v *Value) bool { } y := v_1.Args[1] x := v_1.Args[0] - if !(a.Uses == 1 && objabi.GOARM >= 6) { + if !(a.Uses == 1 && buildcfg.GOARM >= 6) { break } v.reset(OpARMMULAF) @@ -11244,12 +11264,12 @@ func rewriteValueARM_OpARMSUBconst(v *Value) bool { return true } // match: (SUBconst [c] x) - // cond: objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff + // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff // result: (ADDconst [-c] x) for { c := auxIntToInt32(v.AuxInt) x := v_0 - if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) { + if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) { break } v.reset(OpARMADDconst) @@ -12557,7 +12577,7 @@ func rewriteValueARM_OpARMXORshiftLL(v *Value) bool { return true } // match: (XORshiftLL [8] (SRLconst [24] (SLLconst [16] x)) x) - // cond: objabi.GOARM>=6 + // cond: buildcfg.GOARM>=6 // result: (REV16 x) for { if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 { @@ -12568,7 +12588,7 @@ func rewriteValueARM_OpARMXORshiftLL(v *Value) bool { break } x := v_0_0.Args[0] - if x != v_1 || !(objabi.GOARM >= 6) { + if x != v_1 || !(buildcfg.GOARM >= 6) { break } v.reset(OpARMREV16) @@ -12919,12 +12939,12 @@ func rewriteValueARM_OpBswap32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Bswap32 x) - // cond: objabi.GOARM==5 + // cond: buildcfg.GOARM==5 // result: (XOR (SRLconst (BICconst (XOR x (SRRconst [16] x)) [0xff0000]) [8]) (SRRconst x [8])) for { t := v.Type x := v_0 - if !(objabi.GOARM == 5) { + if !(buildcfg.GOARM == 5) { break } v.reset(OpARMXOR) @@ -12947,11 +12967,11 @@ func rewriteValueARM_OpBswap32(v *Value) bool { return true } // match: (Bswap32 x) - // cond: objabi.GOARM>=6 + // cond: buildcfg.GOARM>=6 // result: (REV x) for { x := v_0 - if !(objabi.GOARM >= 6) { + if !(buildcfg.GOARM >= 6) { break } v.reset(OpARMREV) @@ -13011,12 +13031,12 @@ func rewriteValueARM_OpConst8(v *Value) bool { } } func rewriteValueARM_OpConstBool(v *Value) bool { - // match: (ConstBool [b]) - // result: (MOVWconst [b2i32(b)]) + // match: (ConstBool [t]) + // result: (MOVWconst [b2i32(t)]) for { - b := auxIntToBool(v.AuxInt) + t := auxIntToBool(v.AuxInt) v.reset(OpARMMOVWconst) - v.AuxInt = int32ToAuxInt(b2i32(b)) + v.AuxInt = int32ToAuxInt(b2i32(t)) return true } } @@ -13034,12 +13054,12 @@ func rewriteValueARM_OpCtz16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Ctz16 x) - // cond: objabi.GOARM<=6 + // cond: buildcfg.GOARM<=6 // result: (RSBconst [32] (CLZ (SUBconst (AND (ORconst [0x10000] x) (RSBconst [0] (ORconst [0x10000] x))) [1]))) for { t := v.Type x := v_0 - if !(objabi.GOARM <= 6) { + if !(buildcfg.GOARM <= 6) { break } v.reset(OpARMRSBconst) @@ -13061,12 +13081,12 @@ func rewriteValueARM_OpCtz16(v *Value) bool { return true } // match: (Ctz16 x) - // cond: objabi.GOARM==7 + // cond: buildcfg.GOARM==7 // result: (CLZ (RBIT (ORconst [0x10000] x))) for { t := v.Type x := v_0 - if !(objabi.GOARM == 7) { + if !(buildcfg.GOARM == 7) { break } v.reset(OpARMCLZ) @@ -13085,12 +13105,12 @@ func rewriteValueARM_OpCtz32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Ctz32 x) - // cond: objabi.GOARM<=6 + // cond: buildcfg.GOARM<=6 // result: (RSBconst [32] (CLZ (SUBconst (AND x (RSBconst [0] x)) [1]))) for { t := v.Type x := v_0 - if !(objabi.GOARM <= 6) { + if !(buildcfg.GOARM <= 6) { break } v.reset(OpARMRSBconst) @@ -13109,12 +13129,12 @@ func rewriteValueARM_OpCtz32(v *Value) bool { return true } // match: (Ctz32 x) - // cond: objabi.GOARM==7 + // cond: buildcfg.GOARM==7 // result: (CLZ (RBIT x)) for { t := v.Type x := v_0 - if !(objabi.GOARM == 7) { + if !(buildcfg.GOARM == 7) { break } v.reset(OpARMCLZ) @@ -13131,12 +13151,12 @@ func rewriteValueARM_OpCtz8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Ctz8 x) - // cond: objabi.GOARM<=6 + // cond: buildcfg.GOARM<=6 // result: (RSBconst [32] (CLZ (SUBconst (AND (ORconst [0x100] x) (RSBconst [0] (ORconst [0x100] x))) [1]))) for { t := v.Type x := v_0 - if !(objabi.GOARM <= 6) { + if !(buildcfg.GOARM <= 6) { break } v.reset(OpARMRSBconst) @@ -13158,12 +13178,12 @@ func rewriteValueARM_OpCtz8(v *Value) bool { return true } // match: (Ctz8 x) - // cond: objabi.GOARM==7 + // cond: buildcfg.GOARM==7 // result: (CLZ (RBIT (ORconst [0x100] x))) for { t := v.Type x := v_0 - if !(objabi.GOARM == 7) { + if !(buildcfg.GOARM == 7) { break } v.reset(OpARMCLZ) diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index ff1156d9011..0ba3951df57 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -69,6 +69,14 @@ func rewriteValueARM64(v *Value) bool { return rewriteValueARM64_OpARM64CSEL(v) case OpARM64CSEL0: return rewriteValueARM64_OpARM64CSEL0(v) + case OpARM64CSETM: + return rewriteValueARM64_OpARM64CSETM(v) + case OpARM64CSINC: + return rewriteValueARM64_OpARM64CSINC(v) + case OpARM64CSINV: + return rewriteValueARM64_OpARM64CSINV(v) + case OpARM64CSNEG: + return rewriteValueARM64_OpARM64CSNEG(v) case OpARM64DIV: return rewriteValueARM64_OpARM64DIV(v) case OpARM64DIVW: @@ -99,18 +107,26 @@ func rewriteValueARM64(v *Value) bool { return rewriteValueARM64_OpARM64FMOVDload(v) case OpARM64FMOVDloadidx: return rewriteValueARM64_OpARM64FMOVDloadidx(v) + case OpARM64FMOVDloadidx8: + return rewriteValueARM64_OpARM64FMOVDloadidx8(v) case OpARM64FMOVDstore: return rewriteValueARM64_OpARM64FMOVDstore(v) case OpARM64FMOVDstoreidx: return rewriteValueARM64_OpARM64FMOVDstoreidx(v) + case OpARM64FMOVDstoreidx8: + return rewriteValueARM64_OpARM64FMOVDstoreidx8(v) case OpARM64FMOVSload: return rewriteValueARM64_OpARM64FMOVSload(v) case OpARM64FMOVSloadidx: return rewriteValueARM64_OpARM64FMOVSloadidx(v) + case OpARM64FMOVSloadidx4: + return rewriteValueARM64_OpARM64FMOVSloadidx4(v) case OpARM64FMOVSstore: return rewriteValueARM64_OpARM64FMOVSstore(v) case OpARM64FMOVSstoreidx: return rewriteValueARM64_OpARM64FMOVSstoreidx(v) + case OpARM64FMOVSstoreidx4: + return rewriteValueARM64_OpARM64FMOVSstoreidx4(v) case OpARM64FMULD: return rewriteValueARM64_OpARM64FMULD(v) case OpARM64FMULS: @@ -189,6 +205,8 @@ func rewriteValueARM64(v *Value) bool { return rewriteValueARM64_OpARM64MOVDloadidx(v) case OpARM64MOVDloadidx8: return rewriteValueARM64_OpARM64MOVDloadidx8(v) + case OpARM64MOVDnop: + return rewriteValueARM64_OpARM64MOVDnop(v) case OpARM64MOVDreg: return rewriteValueARM64_OpARM64MOVDreg(v) case OpARM64MOVDstore: @@ -989,6 +1007,9 @@ func rewriteValueARM64(v *Value) bool { case OpSqrt: v.Op = OpARM64FSQRTD return true + case OpSqrt32: + v.Op = OpARM64FSQRTS + return true case OpStaticCall: v.Op = OpARM64CALLstatic return true @@ -1751,6 +1772,81 @@ func rewriteValueARM64_OpARM64ADDshiftLL(v *Value) bool { v.AddArg(x) return true } + // match: (ADDshiftLL [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x)) + // cond: uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff + // result: (REV16W x) + for { + if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 24) { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64ANDconst { + break + } + c1 := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if v_1.Op != OpARM64ANDconst { + break + } + c2 := auxIntToInt64(v_1.AuxInt) + if x != v_1.Args[0] || !(uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff) { + break + } + v.reset(OpARM64REV16W) + v.AddArg(x) + return true + } + // match: (ADDshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) + // cond: (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) + // result: (REV16 x) + for { + if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64ANDconst { + break + } + c1 := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if v_1.Op != OpARM64ANDconst { + break + } + c2 := auxIntToInt64(v_1.AuxInt) + if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) { + break + } + v.reset(OpARM64REV16) + v.AddArg(x) + return true + } + // match: (ADDshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) + // cond: (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) + // result: (REV16 (ANDconst [0xffffffff] x)) + for { + if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64ANDconst { + break + } + c1 := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if v_1.Op != OpARM64ANDconst { + break + } + c2 := auxIntToInt64(v_1.AuxInt) + if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) { + break + } + v.reset(OpARM64REV16) + v0 := b.NewValue0(v.Pos, OpARM64ANDconst, x.Type) + v0.AuxInt = int64ToAuxInt(0xffffffff) + v0.AddArg(x) + v.AddArg(v0) + return true + } // match: (ADDshiftLL [c] (SRLconst x [64-c]) x2) // result: (EXTRconst [64-c] x2 x) for { @@ -3202,6 +3298,32 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + // match: (CSEL [cc] (MOVDconst [-1]) (MOVDconst [0]) flag) + // result: (CSETM [cc] flag) + for { + cc := auxIntToOp(v.AuxInt) + if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != -1 || v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + flag := v_2 + v.reset(OpARM64CSETM) + v.AuxInt = opToAuxInt(cc) + v.AddArg(flag) + return true + } + // match: (CSEL [cc] (MOVDconst [0]) (MOVDconst [-1]) flag) + // result: (CSETM [arm64Negate(cc)] flag) + for { + cc := auxIntToOp(v.AuxInt) + if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 || v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 { + break + } + flag := v_2 + v.reset(OpARM64CSETM) + v.AuxInt = opToAuxInt(arm64Negate(cc)) + v.AddArg(flag) + return true + } // match: (CSEL [cc] x (MOVDconst [0]) flag) // result: (CSEL0 [cc] x flag) for { @@ -3230,6 +3352,96 @@ func rewriteValueARM64_OpARM64CSEL(v *Value) bool { v.AddArg2(y, flag) return true } + // match: (CSEL [cc] x (ADDconst [1] a) flag) + // result: (CSINC [cc] x a flag) + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 { + break + } + a := v_1.Args[0] + flag := v_2 + v.reset(OpARM64CSINC) + v.AuxInt = opToAuxInt(cc) + v.AddArg3(x, a, flag) + return true + } + // match: (CSEL [cc] (ADDconst [1] a) x flag) + // result: (CSINC [arm64Negate(cc)] x a flag) + for { + cc := auxIntToOp(v.AuxInt) + if v_0.Op != OpARM64ADDconst || auxIntToInt64(v_0.AuxInt) != 1 { + break + } + a := v_0.Args[0] + x := v_1 + flag := v_2 + v.reset(OpARM64CSINC) + v.AuxInt = opToAuxInt(arm64Negate(cc)) + v.AddArg3(x, a, flag) + return true + } + // match: (CSEL [cc] x (MVN a) flag) + // result: (CSINV [cc] x a flag) + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MVN { + break + } + a := v_1.Args[0] + flag := v_2 + v.reset(OpARM64CSINV) + v.AuxInt = opToAuxInt(cc) + v.AddArg3(x, a, flag) + return true + } + // match: (CSEL [cc] (MVN a) x flag) + // result: (CSINV [arm64Negate(cc)] x a flag) + for { + cc := auxIntToOp(v.AuxInt) + if v_0.Op != OpARM64MVN { + break + } + a := v_0.Args[0] + x := v_1 + flag := v_2 + v.reset(OpARM64CSINV) + v.AuxInt = opToAuxInt(arm64Negate(cc)) + v.AddArg3(x, a, flag) + return true + } + // match: (CSEL [cc] x (NEG a) flag) + // result: (CSNEG [cc] x a flag) + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64NEG { + break + } + a := v_1.Args[0] + flag := v_2 + v.reset(OpARM64CSNEG) + v.AuxInt = opToAuxInt(cc) + v.AddArg3(x, a, flag) + return true + } + // match: (CSEL [cc] (NEG a) x flag) + // result: (CSNEG [arm64Negate(cc)] x a flag) + for { + cc := auxIntToOp(v.AuxInt) + if v_0.Op != OpARM64NEG { + break + } + a := v_0.Args[0] + x := v_1 + flag := v_2 + v.reset(OpARM64CSNEG) + v.AuxInt = opToAuxInt(arm64Negate(cc)) + v.AddArg3(x, a, flag) + return true + } // match: (CSEL [cc] x y (InvertFlags cmp)) // result: (CSEL [arm64Invert(cc)] x y cmp) for { @@ -3392,6 +3604,194 @@ func rewriteValueARM64_OpARM64CSEL0(v *Value) bool { } return false } +func rewriteValueARM64_OpARM64CSETM(v *Value) bool { + v_0 := v.Args[0] + // match: (CSETM [cc] (InvertFlags cmp)) + // result: (CSETM [arm64Invert(cc)] cmp) + for { + cc := auxIntToOp(v.AuxInt) + if v_0.Op != OpARM64InvertFlags { + break + } + cmp := v_0.Args[0] + v.reset(OpARM64CSETM) + v.AuxInt = opToAuxInt(arm64Invert(cc)) + v.AddArg(cmp) + return true + } + // match: (CSETM [cc] flag) + // cond: ccARM64Eval(cc, flag) > 0 + // result: (MOVDconst [-1]) + for { + cc := auxIntToOp(v.AuxInt) + flag := v_0 + if !(ccARM64Eval(cc, flag) > 0) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + // match: (CSETM [cc] flag) + // cond: ccARM64Eval(cc, flag) < 0 + // result: (MOVDconst [0]) + for { + cc := auxIntToOp(v.AuxInt) + flag := v_0 + if !(ccARM64Eval(cc, flag) < 0) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64CSINC(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CSINC [cc] x y (InvertFlags cmp)) + // result: (CSINC [arm64Invert(cc)] x y cmp) + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + y := v_1 + if v_2.Op != OpARM64InvertFlags { + break + } + cmp := v_2.Args[0] + v.reset(OpARM64CSINC) + v.AuxInt = opToAuxInt(arm64Invert(cc)) + v.AddArg3(x, y, cmp) + return true + } + // match: (CSINC [cc] x _ flag) + // cond: ccARM64Eval(cc, flag) > 0 + // result: x + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + flag := v_2 + if !(ccARM64Eval(cc, flag) > 0) { + break + } + v.copyOf(x) + return true + } + // match: (CSINC [cc] _ y flag) + // cond: ccARM64Eval(cc, flag) < 0 + // result: (ADDconst [1] y) + for { + cc := auxIntToOp(v.AuxInt) + y := v_1 + flag := v_2 + if !(ccARM64Eval(cc, flag) < 0) { + break + } + v.reset(OpARM64ADDconst) + v.AuxInt = int64ToAuxInt(1) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64CSINV(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CSINV [cc] x y (InvertFlags cmp)) + // result: (CSINV [arm64Invert(cc)] x y cmp) + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + y := v_1 + if v_2.Op != OpARM64InvertFlags { + break + } + cmp := v_2.Args[0] + v.reset(OpARM64CSINV) + v.AuxInt = opToAuxInt(arm64Invert(cc)) + v.AddArg3(x, y, cmp) + return true + } + // match: (CSINV [cc] x _ flag) + // cond: ccARM64Eval(cc, flag) > 0 + // result: x + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + flag := v_2 + if !(ccARM64Eval(cc, flag) > 0) { + break + } + v.copyOf(x) + return true + } + // match: (CSINV [cc] _ y flag) + // cond: ccARM64Eval(cc, flag) < 0 + // result: (Not y) + for { + cc := auxIntToOp(v.AuxInt) + y := v_1 + flag := v_2 + if !(ccARM64Eval(cc, flag) < 0) { + break + } + v.reset(OpNot) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64CSNEG(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CSNEG [cc] x y (InvertFlags cmp)) + // result: (CSNEG [arm64Invert(cc)] x y cmp) + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + y := v_1 + if v_2.Op != OpARM64InvertFlags { + break + } + cmp := v_2.Args[0] + v.reset(OpARM64CSNEG) + v.AuxInt = opToAuxInt(arm64Invert(cc)) + v.AddArg3(x, y, cmp) + return true + } + // match: (CSNEG [cc] x _ flag) + // cond: ccARM64Eval(cc, flag) > 0 + // result: x + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + flag := v_2 + if !(ccARM64Eval(cc, flag) > 0) { + break + } + v.copyOf(x) + return true + } + // match: (CSNEG [cc] _ y flag) + // cond: ccARM64Eval(cc, flag) < 0 + // result: (NEG y) + for { + cc := auxIntToOp(v.AuxInt) + y := v_1 + flag := v_2 + if !(ccARM64Eval(cc, flag) < 0) { + break + } + v.reset(OpARM64NEG) + v.AddArg(y) + return true + } + return false +} func rewriteValueARM64_OpARM64DIV(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -3898,6 +4298,25 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool { v.AddArg3(ptr, idx, mem) return true } + // match: (FMOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (FMOVDloadidx8 ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64FMOVDloadidx8) + v.AddArg3(ptr, idx, mem) + return true + } // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) @@ -3962,6 +4381,56 @@ func rewriteValueARM64_OpARM64FMOVDloadidx(v *Value) bool { v.AddArg2(ptr, mem) return true } + // match: (FMOVDloadidx ptr (SLLconst [3] idx) mem) + // result: (FMOVDloadidx8 ptr idx mem) + for { + ptr := v_0 + if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 { + break + } + idx := v_1.Args[0] + mem := v_2 + v.reset(OpARM64FMOVDloadidx8) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (FMOVDloadidx (SLLconst [3] idx) ptr mem) + // result: (FMOVDloadidx8 ptr idx mem) + for { + if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 { + break + } + idx := v_0.Args[0] + ptr := v_1 + mem := v_2 + v.reset(OpARM64FMOVDloadidx8) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64FMOVDloadidx8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVDloadidx8 ptr (MOVDconst [c]) mem) + // cond: is32Bit(c<<3) + // result: (FMOVDload ptr [int32(c)<<3] mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c << 3)) { + break + } + v.reset(OpARM64FMOVDload) + v.AuxInt = int32ToAuxInt(int32(c) << 3) + v.AddArg2(ptr, mem) + return true + } return false } func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool { @@ -4029,6 +4498,26 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool { v.AddArg4(ptr, idx, val, mem) return true } + // match: (FMOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (FMOVDstoreidx8 ptr idx val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64FMOVDstoreidx8) + v.AddArg4(ptr, idx, val, mem) + return true + } // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) @@ -4097,6 +4586,60 @@ func rewriteValueARM64_OpARM64FMOVDstoreidx(v *Value) bool { v.AddArg3(idx, val, mem) return true } + // match: (FMOVDstoreidx ptr (SLLconst [3] idx) val mem) + // result: (FMOVDstoreidx8 ptr idx val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 { + break + } + idx := v_1.Args[0] + val := v_2 + mem := v_3 + v.reset(OpARM64FMOVDstoreidx8) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (FMOVDstoreidx (SLLconst [3] idx) ptr val mem) + // result: (FMOVDstoreidx8 ptr idx val mem) + for { + if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 { + break + } + idx := v_0.Args[0] + ptr := v_1 + val := v_2 + mem := v_3 + v.reset(OpARM64FMOVDstoreidx8) + v.AddArg4(ptr, idx, val, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64FMOVDstoreidx8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVDstoreidx8 ptr (MOVDconst [c]) val mem) + // cond: is32Bit(c<<3) + // result: (FMOVDstore [int32(c)<<3] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + val := v_2 + mem := v_3 + if !(is32Bit(c << 3)) { + break + } + v.reset(OpARM64FMOVDstore) + v.AuxInt = int32ToAuxInt(int32(c) << 3) + v.AddArg3(ptr, val, mem) + return true + } return false } func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool { @@ -4161,6 +4704,25 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool { v.AddArg3(ptr, idx, mem) return true } + // match: (FMOVSload [off] {sym} (ADDshiftLL [2] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (FMOVSloadidx4 ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64FMOVSloadidx4) + v.AddArg3(ptr, idx, mem) + return true + } // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) @@ -4225,6 +4787,56 @@ func rewriteValueARM64_OpARM64FMOVSloadidx(v *Value) bool { v.AddArg2(ptr, mem) return true } + // match: (FMOVSloadidx ptr (SLLconst [2] idx) mem) + // result: (FMOVSloadidx4 ptr idx mem) + for { + ptr := v_0 + if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 { + break + } + idx := v_1.Args[0] + mem := v_2 + v.reset(OpARM64FMOVSloadidx4) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (FMOVSloadidx (SLLconst [2] idx) ptr mem) + // result: (FMOVSloadidx4 ptr idx mem) + for { + if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 { + break + } + idx := v_0.Args[0] + ptr := v_1 + mem := v_2 + v.reset(OpARM64FMOVSloadidx4) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64FMOVSloadidx4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVSloadidx4 ptr (MOVDconst [c]) mem) + // cond: is32Bit(c<<2) + // result: (FMOVSload ptr [int32(c)<<2] mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c << 2)) { + break + } + v.reset(OpARM64FMOVSload) + v.AuxInt = int32ToAuxInt(int32(c) << 2) + v.AddArg2(ptr, mem) + return true + } return false } func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool { @@ -4292,6 +4904,26 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool { v.AddArg4(ptr, idx, val, mem) return true } + // match: (FMOVSstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (FMOVSstoreidx4 ptr idx val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64FMOVSstoreidx4) + v.AddArg4(ptr, idx, val, mem) + return true + } // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) @@ -4360,6 +4992,60 @@ func rewriteValueARM64_OpARM64FMOVSstoreidx(v *Value) bool { v.AddArg3(idx, val, mem) return true } + // match: (FMOVSstoreidx ptr (SLLconst [2] idx) val mem) + // result: (FMOVSstoreidx4 ptr idx val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 { + break + } + idx := v_1.Args[0] + val := v_2 + mem := v_3 + v.reset(OpARM64FMOVSstoreidx4) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (FMOVSstoreidx (SLLconst [2] idx) ptr val mem) + // result: (FMOVSstoreidx4 ptr idx val mem) + for { + if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 { + break + } + idx := v_0.Args[0] + ptr := v_1 + val := v_2 + mem := v_3 + v.reset(OpARM64FMOVSstoreidx4) + v.AddArg4(ptr, idx, val, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64FMOVSstoreidx4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVSstoreidx4 ptr (MOVDconst [c]) val mem) + // cond: is32Bit(c<<2) + // result: (FMOVSstore [int32(c)<<2] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + val := v_2 + mem := v_3 + if !(is32Bit(c << 2)) { + break + } + v.reset(OpARM64FMOVSstore) + v.AuxInt = int32ToAuxInt(int32(c) << 2) + v.AddArg3(ptr, val, mem) + return true + } return false } func rewriteValueARM64_OpARM64FMULD(v *Value) bool { @@ -6450,6 +7136,21 @@ func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool { v.AddArg(x) return true } + // match: (MOVBUreg (SLLconst [lc] x)) + // cond: lc >= 8 + // result: (MOVDconst [0]) + for { + if v_0.Op != OpARM64SLLconst { + break + } + lc := auxIntToInt64(v_0.AuxInt) + if !(lc >= 8) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } // match: (MOVBUreg (SLLconst [sc] x)) // cond: isARM64BFMask(sc, 1<<8-1, sc) // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x) @@ -9011,6 +9712,21 @@ func rewriteValueARM64_OpARM64MOVDloadidx8(v *Value) bool { } return false } +func rewriteValueARM64_OpARM64MOVDnop(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVDnop (MOVDconst [c])) + // result: (MOVDconst [c]) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(c) + return true + } + return false +} func rewriteValueARM64_OpARM64MOVDreg(v *Value) bool { v_0 := v.Args[0] // match: (MOVDreg x) @@ -9932,6 +10648,21 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool { v.AuxInt = int64ToAuxInt(int64(uint16(c))) return true } + // match: (MOVHUreg (SLLconst [lc] x)) + // cond: lc >= 16 + // result: (MOVDconst [0]) + for { + if v_0.Op != OpARM64SLLconst { + break + } + lc := auxIntToInt64(v_0.AuxInt) + if !(lc >= 16) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } // match: (MOVHUreg (SLLconst [sc] x)) // cond: isARM64BFMask(sc, 1<<16-1, sc) // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x) @@ -12029,6 +12760,21 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { v.AuxInt = int64ToAuxInt(int64(uint32(c))) return true } + // match: (MOVWUreg (SLLconst [lc] x)) + // cond: lc >= 32 + // result: (MOVDconst [0]) + for { + if v_0.Op != OpARM64SLLconst { + break + } + lc := auxIntToInt64(v_0.AuxInt) + if !(lc >= 32) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } // match: (MOVWUreg (SLLconst [sc] x)) // cond: isARM64BFMask(sc, 1<<32-1, sc) // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x) @@ -17332,6 +18078,81 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v.AddArg(x) return true } + // match: (ORshiftLL [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x)) + // cond: uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff + // result: (REV16W x) + for { + if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 24) { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64ANDconst { + break + } + c1 := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if v_1.Op != OpARM64ANDconst { + break + } + c2 := auxIntToInt64(v_1.AuxInt) + if x != v_1.Args[0] || !(uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff) { + break + } + v.reset(OpARM64REV16W) + v.AddArg(x) + return true + } + // match: (ORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) + // cond: (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) + // result: (REV16 x) + for { + if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64ANDconst { + break + } + c1 := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if v_1.Op != OpARM64ANDconst { + break + } + c2 := auxIntToInt64(v_1.AuxInt) + if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) { + break + } + v.reset(OpARM64REV16) + v.AddArg(x) + return true + } + // match: (ORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) + // cond: (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) + // result: (REV16 (ANDconst [0xffffffff] x)) + for { + if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64ANDconst { + break + } + c1 := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if v_1.Op != OpARM64ANDconst { + break + } + c2 := auxIntToInt64(v_1.AuxInt) + if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) { + break + } + v.reset(OpARM64REV16) + v0 := b.NewValue0(v.Pos, OpARM64ANDconst, x.Type) + v0.AuxInt = int64ToAuxInt(0xffffffff) + v0.AddArg(x) + v.AddArg(v0) + return true + } // match: ( ORshiftLL [c] (SRLconst x [64-c]) x2) // result: (EXTRconst [64-c] x2 x) for { @@ -19457,6 +20278,51 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool { v.AddArg(x) return true } + // match: (SRLconst [rc] (MOVWUreg x)) + // cond: rc >= 32 + // result: (MOVDconst [0]) + for { + rc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVWUreg { + break + } + if !(rc >= 32) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SRLconst [rc] (MOVHUreg x)) + // cond: rc >= 16 + // result: (MOVDconst [0]) + for { + rc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVHUreg { + break + } + if !(rc >= 16) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SRLconst [rc] (MOVBUreg x)) + // cond: rc >= 8 + // result: (MOVDconst [0]) + for { + rc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVBUreg { + break + } + if !(rc >= 8) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } // match: (SRLconst [rc] (SLLconst [lc] x)) // cond: lc > rc // result: (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x) @@ -21205,6 +22071,81 @@ func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool { v.AddArg(x) return true } + // match: (XORshiftLL [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x)) + // cond: uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff + // result: (REV16W x) + for { + if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 24) { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64ANDconst { + break + } + c1 := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if v_1.Op != OpARM64ANDconst { + break + } + c2 := auxIntToInt64(v_1.AuxInt) + if x != v_1.Args[0] || !(uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff) { + break + } + v.reset(OpARM64REV16W) + v.AddArg(x) + return true + } + // match: (XORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) + // cond: (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) + // result: (REV16 x) + for { + if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64ANDconst { + break + } + c1 := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if v_1.Op != OpARM64ANDconst { + break + } + c2 := auxIntToInt64(v_1.AuxInt) + if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) { + break + } + v.reset(OpARM64REV16) + v.AddArg(x) + return true + } + // match: (XORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) + // cond: (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) + // result: (REV16 (ANDconst [0xffffffff] x)) + for { + if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64ANDconst { + break + } + c1 := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if v_1.Op != OpARM64ANDconst { + break + } + c2 := auxIntToInt64(v_1.AuxInt) + if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) { + break + } + v.reset(OpARM64REV16) + v0 := b.NewValue0(v.Pos, OpARM64ANDconst, x.Type) + v0.AuxInt = int64ToAuxInt(0xffffffff) + v0.AddArg(x) + v.AddArg(v0) + return true + } // match: (XORshiftLL [c] (SRLconst x [64-c]) x2) // result: (EXTRconst [64-c] x2 x) for { @@ -21735,12 +22676,12 @@ func rewriteValueARM64_OpConst8(v *Value) bool { } } func rewriteValueARM64_OpConstBool(v *Value) bool { - // match: (ConstBool [b]) - // result: (MOVDconst [b2i(b)]) + // match: (ConstBool [t]) + // result: (MOVDconst [b2i(t)]) for { - b := auxIntToBool(v.AuxInt) + t := auxIntToBool(v.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = int64ToAuxInt(b2i(b)) + v.AuxInt = int64ToAuxInt(b2i(t)) return true } } diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go index 3fc55279550..429369d631e 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go @@ -297,6 +297,8 @@ func rewriteValueMIPS(v *Value) bool { return rewriteValueMIPS_OpMIPSMOVHstorezero(v) case OpMIPSMOVWload: return rewriteValueMIPS_OpMIPSMOVWload(v) + case OpMIPSMOVWnop: + return rewriteValueMIPS_OpMIPSMOVWnop(v) case OpMIPSMOVWreg: return rewriteValueMIPS_OpMIPSMOVWreg(v) case OpMIPSMOVWstore: @@ -514,6 +516,9 @@ func rewriteValueMIPS(v *Value) bool { case OpSqrt: v.Op = OpMIPSSQRTD return true + case OpSqrt32: + v.Op = OpMIPSSQRTF + return true case OpStaticCall: v.Op = OpMIPSCALLstatic return true @@ -867,12 +872,12 @@ func rewriteValueMIPS_OpConst8(v *Value) bool { } } func rewriteValueMIPS_OpConstBool(v *Value) bool { - // match: (ConstBool [b]) - // result: (MOVWconst [b2i32(b)]) + // match: (ConstBool [t]) + // result: (MOVWconst [b2i32(t)]) for { - b := auxIntToBool(v.AuxInt) + t := auxIntToBool(v.AuxInt) v.reset(OpMIPSMOVWconst) - v.AuxInt = int32ToAuxInt(b2i32(b)) + v.AuxInt = int32ToAuxInt(b2i32(t)) return true } } @@ -3647,6 +3652,21 @@ func rewriteValueMIPS_OpMIPSMOVWload(v *Value) bool { } return false } +func rewriteValueMIPS_OpMIPSMOVWnop(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVWnop (MOVWconst [c])) + // result: (MOVWconst [c]) + for { + if v_0.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(c) + return true + } + return false +} func rewriteValueMIPS_OpMIPSMOVWreg(v *Value) bool { v_0 := v.Args[0] // match: (MOVWreg x) diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go index d78f6089aff..772d7b66efe 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go @@ -339,6 +339,8 @@ func rewriteValueMIPS64(v *Value) bool { return rewriteValueMIPS64_OpMIPS64MOVHstorezero(v) case OpMIPS64MOVVload: return rewriteValueMIPS64_OpMIPS64MOVVload(v) + case OpMIPS64MOVVnop: + return rewriteValueMIPS64_OpMIPS64MOVVnop(v) case OpMIPS64MOVVreg: return rewriteValueMIPS64_OpMIPS64MOVVreg(v) case OpMIPS64MOVVstore: @@ -594,6 +596,9 @@ func rewriteValueMIPS64(v *Value) bool { case OpSqrt: v.Op = OpMIPS64SQRTD return true + case OpSqrt32: + v.Op = OpMIPS64SQRTF + return true case OpStaticCall: v.Op = OpMIPS64CALLstatic return true @@ -830,12 +835,12 @@ func rewriteValueMIPS64_OpConst8(v *Value) bool { } } func rewriteValueMIPS64_OpConstBool(v *Value) bool { - // match: (ConstBool [b]) - // result: (MOVVconst [int64(b2i(b))]) + // match: (ConstBool [t]) + // result: (MOVVconst [int64(b2i(t))]) for { - b := auxIntToBool(v.AuxInt) + t := auxIntToBool(v.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = int64ToAuxInt(int64(b2i(b))) + v.AuxInt = int64ToAuxInt(int64(b2i(t))) return true } } @@ -2663,6 +2668,19 @@ func rewriteValueMIPS64_OpMIPS64MOVBload(v *Value) bool { v.AddArg2(ptr, mem) return true } + // match: (MOVBload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVVconst [int64(read8(sym, int64(off)))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(read8(sym, int64(off)))) + return true + } return false } func rewriteValueMIPS64_OpMIPS64MOVBreg(v *Value) bool { @@ -3227,6 +3245,8 @@ func rewriteValueMIPS64_OpMIPS64MOVHUreg(v *Value) bool { func rewriteValueMIPS64_OpMIPS64MOVHload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(int64(off1)+off2) // result: (MOVHload [off1+int32(off2)] {sym} ptr mem) @@ -3270,6 +3290,19 @@ func rewriteValueMIPS64_OpMIPS64MOVHload(v *Value) bool { v.AddArg2(ptr, mem) return true } + // match: (MOVHload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))) + return true + } return false } func rewriteValueMIPS64_OpMIPS64MOVHreg(v *Value) bool { @@ -3539,6 +3572,8 @@ func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value) bool { func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(int64(off1)+off2) // result: (MOVVload [off1+int32(off2)] {sym} ptr mem) @@ -3582,6 +3617,34 @@ func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value) bool { v.AddArg2(ptr, mem) return true } + // match: (MOVVload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVVconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVVnop(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVVnop (MOVVconst [c])) + // result: (MOVVconst [c]) + for { + if v_0.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(c) + return true + } return false } func rewriteValueMIPS64_OpMIPS64MOVVreg(v *Value) bool { @@ -3858,6 +3921,8 @@ func rewriteValueMIPS64_OpMIPS64MOVWUreg(v *Value) bool { func rewriteValueMIPS64_OpMIPS64MOVWload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(int64(off1)+off2) // result: (MOVWload [off1+int32(off2)] {sym} ptr mem) @@ -3901,6 +3966,19 @@ func rewriteValueMIPS64_OpMIPS64MOVWload(v *Value) bool { v.AddArg2(ptr, mem) return true } + // match: (MOVWload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))) + return true + } return false } func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value) bool { diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 98f748e5fa4..96dee0bd21b 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -3,8 +3,8 @@ package ssa +import "internal/buildcfg" import "math" -import "cmd/internal/objabi" import "cmd/compile/internal/types" func rewriteValuePPC64(v *Value) bool { @@ -743,6 +743,9 @@ func rewriteValuePPC64(v *Value) bool { case OpSqrt: v.Op = OpPPC64FSQRT return true + case OpSqrt32: + v.Op = OpPPC64FSQRTS + return true case OpStaticCall: v.Op = OpPPC64CALLstatic return true @@ -1231,12 +1234,12 @@ func rewriteValuePPC64_OpConst8(v *Value) bool { } } func rewriteValuePPC64_OpConstBool(v *Value) bool { - // match: (ConstBool [b]) - // result: (MOVDconst [b2i(b)]) + // match: (ConstBool [t]) + // result: (MOVDconst [b2i(t)]) for { - b := auxIntToBool(v.AuxInt) + t := auxIntToBool(v.AuxInt) v.reset(OpPPC64MOVDconst) - v.AuxInt = int64ToAuxInt(b2i(b)) + v.AuxInt = int64ToAuxInt(b2i(t)) return true } } @@ -1287,11 +1290,11 @@ func rewriteValuePPC64_OpCtz32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Ctz32 x) - // cond: objabi.GOPPC64<=8 + // cond: buildcfg.GOPPC64<=8 // result: (POPCNTW (MOVWZreg (ANDN (ADDconst [-1] x) x))) for { x := v_0 - if !(objabi.GOPPC64 <= 8) { + if !(buildcfg.GOPPC64 <= 8) { break } v.reset(OpPPC64POPCNTW) @@ -1321,11 +1324,11 @@ func rewriteValuePPC64_OpCtz64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Ctz64 x) - // cond: objabi.GOPPC64<=8 + // cond: buildcfg.GOPPC64<=8 // result: (POPCNTD (ANDN (ADDconst [-1] x) x)) for { x := v_0 - if !(objabi.GOPPC64 <= 8) { + if !(buildcfg.GOPPC64 <= 8) { break } v.reset(OpPPC64POPCNTD) @@ -3283,12 +3286,12 @@ func rewriteValuePPC64_OpMod32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Mod32 x y) - // cond: objabi.GOPPC64 >= 9 + // cond: buildcfg.GOPPC64 >= 9 // result: (MODSW x y) for { x := v_0 y := v_1 - if !(objabi.GOPPC64 >= 9) { + if !(buildcfg.GOPPC64 >= 9) { break } v.reset(OpPPC64MODSW) @@ -3296,12 +3299,12 @@ func rewriteValuePPC64_OpMod32(v *Value) bool { return true } // match: (Mod32 x y) - // cond: objabi.GOPPC64 <= 8 + // cond: buildcfg.GOPPC64 <= 8 // result: (SUB x (MULLW y (DIVW x y))) for { x := v_0 y := v_1 - if !(objabi.GOPPC64 <= 8) { + if !(buildcfg.GOPPC64 <= 8) { break } v.reset(OpPPC64SUB) @@ -3320,12 +3323,12 @@ func rewriteValuePPC64_OpMod32u(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Mod32u x y) - // cond: objabi.GOPPC64 >= 9 + // cond: buildcfg.GOPPC64 >= 9 // result: (MODUW x y) for { x := v_0 y := v_1 - if !(objabi.GOPPC64 >= 9) { + if !(buildcfg.GOPPC64 >= 9) { break } v.reset(OpPPC64MODUW) @@ -3333,12 +3336,12 @@ func rewriteValuePPC64_OpMod32u(v *Value) bool { return true } // match: (Mod32u x y) - // cond: objabi.GOPPC64 <= 8 + // cond: buildcfg.GOPPC64 <= 8 // result: (SUB x (MULLW y (DIVWU x y))) for { x := v_0 y := v_1 - if !(objabi.GOPPC64 <= 8) { + if !(buildcfg.GOPPC64 <= 8) { break } v.reset(OpPPC64SUB) @@ -3357,12 +3360,12 @@ func rewriteValuePPC64_OpMod64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Mod64 x y) - // cond: objabi.GOPPC64 >=9 + // cond: buildcfg.GOPPC64 >=9 // result: (MODSD x y) for { x := v_0 y := v_1 - if !(objabi.GOPPC64 >= 9) { + if !(buildcfg.GOPPC64 >= 9) { break } v.reset(OpPPC64MODSD) @@ -3370,12 +3373,12 @@ func rewriteValuePPC64_OpMod64(v *Value) bool { return true } // match: (Mod64 x y) - // cond: objabi.GOPPC64 <=8 + // cond: buildcfg.GOPPC64 <=8 // result: (SUB x (MULLD y (DIVD x y))) for { x := v_0 y := v_1 - if !(objabi.GOPPC64 <= 8) { + if !(buildcfg.GOPPC64 <= 8) { break } v.reset(OpPPC64SUB) @@ -3394,12 +3397,12 @@ func rewriteValuePPC64_OpMod64u(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Mod64u x y) - // cond: objabi.GOPPC64 >= 9 + // cond: buildcfg.GOPPC64 >= 9 // result: (MODUD x y) for { x := v_0 y := v_1 - if !(objabi.GOPPC64 >= 9) { + if !(buildcfg.GOPPC64 >= 9) { break } v.reset(OpPPC64MODUD) @@ -3407,12 +3410,12 @@ func rewriteValuePPC64_OpMod64u(v *Value) bool { return true } // match: (Mod64u x y) - // cond: objabi.GOPPC64 <= 8 + // cond: buildcfg.GOPPC64 <= 8 // result: (SUB x (MULLD y (DIVDU x y))) for { x := v_0 y := v_1 - if !(objabi.GOPPC64 <= 8) { + if !(buildcfg.GOPPC64 <= 8) { break } v.reset(OpPPC64SUB) @@ -3525,46 +3528,20 @@ func rewriteValuePPC64_OpMove(v *Value) bool { return true } // match: (Move [8] {t} dst src mem) - // cond: t.Alignment()%4 == 0 // result: (MOVDstore dst (MOVDload src mem) mem) for { if auxIntToInt64(v.AuxInt) != 8 { break } - t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.Alignment()%4 == 0) { - break - } v.reset(OpPPC64MOVDstore) v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, typ.Int64) v0.AddArg2(src, mem) v.AddArg3(dst, v0, mem) return true } - // match: (Move [8] dst src mem) - // result: (MOVWstore [4] dst (MOVWZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 8 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpPPC64MOVWstore) - v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(4) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } // match: (Move [3] dst src mem) // result: (MOVBstore [2] dst (MOVBZload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)) for { @@ -3656,14 +3633,14 @@ func rewriteValuePPC64_OpMove(v *Value) bool { return true } // match: (Move [s] dst src mem) - // cond: s > 8 && objabi.GOPPC64 <= 8 && logLargeCopy(v, s) + // cond: s > 8 && buildcfg.GOPPC64 <= 8 && logLargeCopy(v, s) // result: (LoweredMove [s] dst src mem) for { s := auxIntToInt64(v.AuxInt) dst := v_0 src := v_1 mem := v_2 - if !(s > 8 && objabi.GOPPC64 <= 8 && logLargeCopy(v, s)) { + if !(s > 8 && buildcfg.GOPPC64 <= 8 && logLargeCopy(v, s)) { break } v.reset(OpPPC64LoweredMove) @@ -3672,14 +3649,14 @@ func rewriteValuePPC64_OpMove(v *Value) bool { return true } // match: (Move [s] dst src mem) - // cond: s > 8 && s <= 64 && objabi.GOPPC64 >= 9 + // cond: s > 8 && s <= 64 && buildcfg.GOPPC64 >= 9 // result: (LoweredQuadMoveShort [s] dst src mem) for { s := auxIntToInt64(v.AuxInt) dst := v_0 src := v_1 mem := v_2 - if !(s > 8 && s <= 64 && objabi.GOPPC64 >= 9) { + if !(s > 8 && s <= 64 && buildcfg.GOPPC64 >= 9) { break } v.reset(OpPPC64LoweredQuadMoveShort) @@ -3688,14 +3665,14 @@ func rewriteValuePPC64_OpMove(v *Value) bool { return true } // match: (Move [s] dst src mem) - // cond: s > 8 && objabi.GOPPC64 >= 9 && logLargeCopy(v, s) + // cond: s > 8 && buildcfg.GOPPC64 >= 9 && logLargeCopy(v, s) // result: (LoweredQuadMove [s] dst src mem) for { s := auxIntToInt64(v.AuxInt) dst := v_0 src := v_1 mem := v_2 - if !(s > 8 && objabi.GOPPC64 >= 9 && logLargeCopy(v, s)) { + if !(s > 8 && buildcfg.GOPPC64 >= 9 && logLargeCopy(v, s)) { break } v.reset(OpPPC64LoweredQuadMove) @@ -3905,7 +3882,7 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (ADD l:(MULLD x y) z) - // cond: objabi.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) + // cond: buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) // result: (MADDLD x y z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -3916,7 +3893,7 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { y := l.Args[1] x := l.Args[0] z := v_1 - if !(objabi.GOPPC64 >= 9 && l.Uses == 1 && clobber(l)) { + if !(buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l)) { continue } v.reset(OpPPC64MADDLD) @@ -7878,7 +7855,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { return true } // match: (MOVBstore [i7] {s} p (SRDconst w [56]) x0:(MOVBstore [i6] {s} p (SRDconst w [48]) x1:(MOVBstore [i5] {s} p (SRDconst w [40]) x2:(MOVBstore [i4] {s} p (SRDconst w [32]) x3:(MOVWstore [i0] {s} p w mem))))) - // cond: !config.BigEndian && i0%4 == 0 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0, x1, x2, x3) + // cond: !config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0, x1, x2, x3) // result: (MOVDstore [i0] {s} p w mem) for { i7 := auxIntToInt32(v.AuxInt) @@ -7945,7 +7922,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { break } mem := x3.Args[2] - if p != x3.Args[0] || w != x3.Args[1] || !(!config.BigEndian && i0%4 == 0 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0, x1, x2, x3)) { + if p != x3.Args[0] || w != x3.Args[1] || !(!config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0, x1, x2, x3)) { break } v.reset(OpPPC64MOVDstore) @@ -8389,7 +8366,7 @@ func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool { return true } // match: (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 + // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -8402,7 +8379,7 @@ func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool { sym2 := auxToSym(p.Aux) ptr := p.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) { + if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64MOVDload) @@ -8412,7 +8389,7 @@ func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool { return true } // match: (MOVDload [off1] {sym} (ADDconst [off2] x) mem) - // cond: is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0 + // cond: is16Bit(int64(off1)+off2) // result: (MOVDload [off1+int32(off2)] {sym} x mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -8423,7 +8400,7 @@ func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool { off2 := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] mem := v_1 - if !(is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0) { + if !(is16Bit(int64(off1) + off2)) { break } v.reset(OpPPC64MOVDload) @@ -8520,7 +8497,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool { return true } // match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) - // cond: is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0 + // cond: is16Bit(int64(off1)+off2) // result: (MOVDstore [off1+int32(off2)] {sym} x val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -8532,7 +8509,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool { x := v_0.Args[0] val := v_1 mem := v_2 - if !(is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0) { + if !(is16Bit(int64(off1) + off2)) { break } v.reset(OpPPC64MOVDstore) @@ -8542,7 +8519,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool { return true } // match: (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 + // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -8556,7 +8533,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool { ptr := p.Args[0] val := v_1 mem := v_2 - if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) { + if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64MOVDstore) @@ -8655,7 +8632,7 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) - // cond: is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0 + // cond: is16Bit(int64(off1)+off2) // result: (MOVDstorezero [off1+int32(off2)] {sym} x mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -8666,7 +8643,7 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value) bool { off2 := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] mem := v_1 - if !(is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0) { + if !(is16Bit(int64(off1) + off2)) { break } v.reset(OpPPC64MOVDstorezero) @@ -8676,7 +8653,7 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value) bool { return true } // match: (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) - // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 + // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1) // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -8689,7 +8666,7 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value) bool { sym2 := auxToSym(p.Aux) x := p.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) { + if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64MOVDstorezero) @@ -10595,7 +10572,7 @@ func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 + // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -10608,7 +10585,7 @@ func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool { sym2 := auxToSym(p.Aux) ptr := p.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) { + if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64MOVWload) @@ -10618,7 +10595,7 @@ func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool { return true } // match: (MOVWload [off1] {sym} (ADDconst [off2] x) mem) - // cond: is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0 + // cond: is16Bit(int64(off1)+off2) // result: (MOVWload [off1+int32(off2)] {sym} x mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -10629,7 +10606,7 @@ func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool { off2 := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] mem := v_1 - if !(is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0) { + if !(is16Bit(int64(off1) + off2)) { break } v.reset(OpPPC64MOVWload) @@ -12501,7 +12478,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { break } // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) x0:(MOVWZload {s} [i0] p mem))))) - // cond: !config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0, x4, x5, x6, x7, s3, s4, s5, s6, o3, o4, o5) + // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0, x4, x5, x6, x7, s3, s4, s5, s6, o3, o4, o5) // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) for { t := v.Type @@ -12599,7 +12576,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { continue } _ = x0.Args[1] - if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0, x4, x5, x6, x7, s3, s4, s5, s6, o3, o4, o5)) { + if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0, x4, x5, x6, x7, s3, s4, s5, s6, o3, o4, o5)) { continue } b = mergePoint(b, x0, x4, x5, x6, x7) @@ -13264,7 +13241,7 @@ func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool { break } // match: (SLDconst [c] z:(MOVWreg x)) - // cond: c < 32 && objabi.GOPPC64 >= 9 + // cond: c < 32 && buildcfg.GOPPC64 >= 9 // result: (EXTSWSLconst [c] x) for { c := auxIntToInt64(v.AuxInt) @@ -13273,7 +13250,7 @@ func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool { break } x := z.Args[0] - if !(c < 32 && objabi.GOPPC64 >= 9) { + if !(c < 32 && buildcfg.GOPPC64 >= 9) { break } v.reset(OpPPC64EXTSWSLconst) @@ -13387,7 +13364,7 @@ func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool { break } // match: (SLWconst [c] z:(MOVWreg x)) - // cond: c < 32 && objabi.GOPPC64 >= 9 + // cond: c < 32 && buildcfg.GOPPC64 >= 9 // result: (EXTSWSLconst [c] x) for { c := auxIntToInt64(v.AuxInt) @@ -13396,7 +13373,7 @@ func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool { break } x := z.Args[0] - if !(c < 32 && objabi.GOPPC64 >= 9) { + if !(c < 32 && buildcfg.GOPPC64 >= 9) { break } v.reset(OpPPC64EXTSWSLconst) @@ -16844,51 +16821,25 @@ func rewriteValuePPC64_OpZero(v *Value) bool { return true } // match: (Zero [8] {t} destptr mem) - // cond: t.Alignment()%4 == 0 // result: (MOVDstorezero destptr mem) for { if auxIntToInt64(v.AuxInt) != 8 { break } - t := auxToType(v.Aux) destptr := v_0 mem := v_1 - if !(t.Alignment()%4 == 0) { - break - } v.reset(OpPPC64MOVDstorezero) v.AddArg2(destptr, mem) return true } - // match: (Zero [8] destptr mem) - // result: (MOVWstorezero [4] destptr (MOVWstorezero [0] destptr mem)) - for { - if auxIntToInt64(v.AuxInt) != 8 { - break - } - destptr := v_0 - mem := v_1 - v.reset(OpPPC64MOVWstorezero) - v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg2(destptr, mem) - v.AddArg2(destptr, v0) - return true - } // match: (Zero [12] {t} destptr mem) - // cond: t.Alignment()%4 == 0 // result: (MOVWstorezero [8] destptr (MOVDstorezero [0] destptr mem)) for { if auxIntToInt64(v.AuxInt) != 12 { break } - t := auxToType(v.Aux) destptr := v_0 mem := v_1 - if !(t.Alignment()%4 == 0) { - break - } v.reset(OpPPC64MOVWstorezero) v.AuxInt = int32ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) @@ -16898,18 +16849,13 @@ func rewriteValuePPC64_OpZero(v *Value) bool { return true } // match: (Zero [16] {t} destptr mem) - // cond: t.Alignment()%4 == 0 // result: (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)) for { if auxIntToInt64(v.AuxInt) != 16 { break } - t := auxToType(v.Aux) destptr := v_0 mem := v_1 - if !(t.Alignment()%4 == 0) { - break - } v.reset(OpPPC64MOVDstorezero) v.AuxInt = int32ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) @@ -16919,18 +16865,13 @@ func rewriteValuePPC64_OpZero(v *Value) bool { return true } // match: (Zero [24] {t} destptr mem) - // cond: t.Alignment()%4 == 0 // result: (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))) for { if auxIntToInt64(v.AuxInt) != 24 { break } - t := auxToType(v.Aux) destptr := v_0 mem := v_1 - if !(t.Alignment()%4 == 0) { - break - } v.reset(OpPPC64MOVDstorezero) v.AuxInt = int32ToAuxInt(16) v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) @@ -16943,18 +16884,13 @@ func rewriteValuePPC64_OpZero(v *Value) bool { return true } // match: (Zero [32] {t} destptr mem) - // cond: t.Alignment()%4 == 0 // result: (MOVDstorezero [24] destptr (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)))) for { if auxIntToInt64(v.AuxInt) != 32 { break } - t := auxToType(v.Aux) destptr := v_0 mem := v_1 - if !(t.Alignment()%4 == 0) { - break - } v.reset(OpPPC64MOVDstorezero) v.AuxInt = int32ToAuxInt(24) v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) @@ -16970,13 +16906,13 @@ func rewriteValuePPC64_OpZero(v *Value) bool { return true } // match: (Zero [s] ptr mem) - // cond: objabi.GOPPC64 <= 8 && s < 64 + // cond: buildcfg.GOPPC64 <= 8 && s < 64 // result: (LoweredZeroShort [s] ptr mem) for { s := auxIntToInt64(v.AuxInt) ptr := v_0 mem := v_1 - if !(objabi.GOPPC64 <= 8 && s < 64) { + if !(buildcfg.GOPPC64 <= 8 && s < 64) { break } v.reset(OpPPC64LoweredZeroShort) @@ -16985,13 +16921,13 @@ func rewriteValuePPC64_OpZero(v *Value) bool { return true } // match: (Zero [s] ptr mem) - // cond: objabi.GOPPC64 <= 8 + // cond: buildcfg.GOPPC64 <= 8 // result: (LoweredZero [s] ptr mem) for { s := auxIntToInt64(v.AuxInt) ptr := v_0 mem := v_1 - if !(objabi.GOPPC64 <= 8) { + if !(buildcfg.GOPPC64 <= 8) { break } v.reset(OpPPC64LoweredZero) @@ -17000,13 +16936,13 @@ func rewriteValuePPC64_OpZero(v *Value) bool { return true } // match: (Zero [s] ptr mem) - // cond: s < 128 && objabi.GOPPC64 >= 9 + // cond: s < 128 && buildcfg.GOPPC64 >= 9 // result: (LoweredQuadZeroShort [s] ptr mem) for { s := auxIntToInt64(v.AuxInt) ptr := v_0 mem := v_1 - if !(s < 128 && objabi.GOPPC64 >= 9) { + if !(s < 128 && buildcfg.GOPPC64 >= 9) { break } v.reset(OpPPC64LoweredQuadZeroShort) @@ -17015,13 +16951,13 @@ func rewriteValuePPC64_OpZero(v *Value) bool { return true } // match: (Zero [s] ptr mem) - // cond: objabi.GOPPC64 >= 9 + // cond: buildcfg.GOPPC64 >= 9 // result: (LoweredQuadZero [s] ptr mem) for { s := auxIntToInt64(v.AuxInt) ptr := v_0 mem := v_1 - if !(objabi.GOPPC64 >= 9) { + if !(buildcfg.GOPPC64 >= 9) { break } v.reset(OpPPC64LoweredQuadZero) diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go index fb507b65c4c..431fb1aaf66 100644 --- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go +++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go @@ -52,6 +52,11 @@ func rewriteValueRISCV64(v *Value) bool { case OpAtomicAdd64: v.Op = OpRISCV64LoweredAtomicAdd64 return true + case OpAtomicAnd32: + v.Op = OpRISCV64LoweredAtomicAnd32 + return true + case OpAtomicAnd8: + return rewriteValueRISCV64_OpAtomicAnd8(v) case OpAtomicCompareAndSwap32: v.Op = OpRISCV64LoweredAtomicCas32 return true @@ -76,6 +81,11 @@ func rewriteValueRISCV64(v *Value) bool { case OpAtomicLoadPtr: v.Op = OpRISCV64LoweredAtomicLoad64 return true + case OpAtomicOr32: + v.Op = OpRISCV64LoweredAtomicOr32 + return true + case OpAtomicOr8: + return rewriteValueRISCV64_OpAtomicOr8(v) case OpAtomicStore32: v.Op = OpRISCV64LoweredAtomicStore32 return true @@ -106,21 +116,17 @@ func rewriteValueRISCV64(v *Value) bool { v.Op = OpRISCV64NOT return true case OpConst16: - v.Op = OpRISCV64MOVHconst - return true + return rewriteValueRISCV64_OpConst16(v) case OpConst32: - v.Op = OpRISCV64MOVWconst - return true + return rewriteValueRISCV64_OpConst32(v) case OpConst32F: return rewriteValueRISCV64_OpConst32F(v) case OpConst64: - v.Op = OpRISCV64MOVDconst - return true + return rewriteValueRISCV64_OpConst64(v) case OpConst64F: return rewriteValueRISCV64_OpConst64F(v) case OpConst8: - v.Op = OpRISCV64MOVBconst - return true + return rewriteValueRISCV64_OpConst8(v) case OpConstBool: return rewriteValueRISCV64_OpConstBool(v) case OpConstNil: @@ -229,7 +235,8 @@ func rewriteValueRISCV64(v *Value) bool { v.Op = OpLess64U return true case OpIsNonNil: - return rewriteValueRISCV64_OpIsNonNil(v) + v.Op = OpRISCV64SNEZ + return true case OpIsSliceInBounds: v.Op = OpLeq64U return true @@ -431,10 +438,10 @@ func rewriteValueRISCV64(v *Value) bool { return rewriteValueRISCV64_OpRISCV64MOVBstore(v) case OpRISCV64MOVBstorezero: return rewriteValueRISCV64_OpRISCV64MOVBstorezero(v) - case OpRISCV64MOVDconst: - return rewriteValueRISCV64_OpRISCV64MOVDconst(v) case OpRISCV64MOVDload: return rewriteValueRISCV64_OpRISCV64MOVDload(v) + case OpRISCV64MOVDnop: + return rewriteValueRISCV64_OpRISCV64MOVDnop(v) case OpRISCV64MOVDreg: return rewriteValueRISCV64_OpRISCV64MOVDreg(v) case OpRISCV64MOVDstore: @@ -580,6 +587,9 @@ func rewriteValueRISCV64(v *Value) bool { case OpSqrt: v.Op = OpRISCV64FSQRTD return true + case OpSqrt32: + v.Op = OpRISCV64FSQRTS + return true case OpStaticCall: v.Op = OpRISCV64CALLstatic return true @@ -676,6 +686,71 @@ func rewriteValueRISCV64_OpAddr(v *Value) bool { return true } } +func rewriteValueRISCV64_OpAtomicAnd8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAnd8 ptr val mem) + // result: (LoweredAtomicAnd32 (ANDI [^3] ptr) (NOT (SLL (XORI [0xff] (ZeroExt8to32 val)) (SLLI [3] (ANDI [3] ptr)))) mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpRISCV64LoweredAtomicAnd32) + v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Uintptr) + v0.AuxInt = int64ToAuxInt(^3) + v0.AddArg(ptr) + v1 := b.NewValue0(v.Pos, OpRISCV64NOT, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpRISCV64SLL, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpRISCV64XORI, typ.UInt32) + v3.AuxInt = int64ToAuxInt(0xff) + v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v4.AddArg(val) + v3.AddArg(v4) + v5 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64) + v5.AuxInt = int64ToAuxInt(3) + v6 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.UInt64) + v6.AuxInt = int64ToAuxInt(3) + v6.AddArg(ptr) + v5.AddArg(v6) + v2.AddArg2(v3, v5) + v1.AddArg(v2) + v.AddArg3(v0, v1, mem) + return true + } +} +func rewriteValueRISCV64_OpAtomicOr8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicOr8 ptr val mem) + // result: (LoweredAtomicOr32 (ANDI [^3] ptr) (SLL (ZeroExt8to32 val) (SLLI [3] (ANDI [3] ptr))) mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpRISCV64LoweredAtomicOr32) + v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Uintptr) + v0.AuxInt = int64ToAuxInt(^3) + v0.AddArg(ptr) + v1 := b.NewValue0(v.Pos, OpRISCV64SLL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(val) + v3 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64) + v3.AuxInt = int64ToAuxInt(3) + v4 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.UInt64) + v4.AuxInt = int64ToAuxInt(3) + v4.AddArg(ptr) + v3.AddArg(v4) + v1.AddArg2(v2, v3) + v.AddArg3(v0, v1, mem) + return true + } +} func rewriteValueRISCV64_OpAvg64u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -704,20 +779,50 @@ func rewriteValueRISCV64_OpAvg64u(v *Value) bool { return true } } +func rewriteValueRISCV64_OpConst16(v *Value) bool { + // match: (Const16 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt16(v.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueRISCV64_OpConst32(v *Value) bool { + // match: (Const32 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt32(v.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} func rewriteValueRISCV64_OpConst32F(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Const32F [val]) - // result: (FMVSX (MOVWconst [int32(math.Float32bits(val))])) + // result: (FMVSX (MOVDconst [int64(math.Float32bits(val))])) for { val := auxIntToFloat32(v.AuxInt) v.reset(OpRISCV64FMVSX) - v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(int32(math.Float32bits(val))) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(int64(math.Float32bits(val))) v.AddArg(v0) return true } } +func rewriteValueRISCV64_OpConst64(v *Value) bool { + // match: (Const64 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt64(v.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} func rewriteValueRISCV64_OpConst64F(v *Value) bool { b := v.Block typ := &b.Func.Config.Types @@ -732,13 +837,23 @@ func rewriteValueRISCV64_OpConst64F(v *Value) bool { return true } } +func rewriteValueRISCV64_OpConst8(v *Value) bool { + // match: (Const8 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt8(v.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} func rewriteValueRISCV64_OpConstBool(v *Value) bool { // match: (ConstBool [val]) - // result: (MOVBconst [int8(b2i(val))]) + // result: (MOVDconst [int64(b2i(val))]) for { val := auxIntToBool(v.AuxInt) - v.reset(OpRISCV64MOVBconst) - v.AuxInt = int8ToAuxInt(int8(b2i(val))) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(b2i(val))) return true } } @@ -890,14 +1005,19 @@ func rewriteValueRISCV64_OpEq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + typ := &b.Func.Config.Types // match: (Eq32 x y) - // result: (SEQZ (SUBW x y)) + // result: (SEQZ (SUB (ZeroExt32to64 x) (ZeroExt32to64 y))) for { x := v_0 y := v_1 v.reset(OpRISCV64SEQZ) - v0 := b.NewValue0(v.Pos, OpRISCV64SUBW, x.Type) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -1016,21 +1136,6 @@ func rewriteValueRISCV64_OpHmul32u(v *Value) bool { return true } } -func rewriteValueRISCV64_OpIsNonNil(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (IsNonNil p) - // result: (NeqPtr (MOVDconst [0]) p) - for { - p := v_0 - v.reset(OpNeqPtr) - v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(0) - v.AddArg2(v0, p) - return true - } -} func rewriteValueRISCV64_OpLeq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -2466,14 +2571,19 @@ func rewriteValueRISCV64_OpNeq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + typ := &b.Func.Config.Types // match: (Neq32 x y) - // result: (SNEZ (SUBW x y)) + // result: (SNEZ (SUB (ZeroExt32to64 x) (ZeroExt32to64 y))) for { x := v_0 y := v_1 v.reset(OpRISCV64SNEZ) - v0 := b.NewValue0(v.Pos, OpRISCV64SUBW, x.Type) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } @@ -2632,54 +2742,6 @@ func rewriteValueRISCV64_OpPanicBounds(v *Value) bool { func rewriteValueRISCV64_OpRISCV64ADD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ADD (MOVBconst [val]) x) - // result: (ADDI [int64(val)] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpRISCV64MOVBconst { - continue - } - val := auxIntToInt8(v_0.AuxInt) - x := v_1 - v.reset(OpRISCV64ADDI) - v.AuxInt = int64ToAuxInt(int64(val)) - v.AddArg(x) - return true - } - break - } - // match: (ADD (MOVHconst [val]) x) - // result: (ADDI [int64(val)] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpRISCV64MOVHconst { - continue - } - val := auxIntToInt16(v_0.AuxInt) - x := v_1 - v.reset(OpRISCV64ADDI) - v.AuxInt = int64ToAuxInt(int64(val)) - v.AddArg(x) - return true - } - break - } - // match: (ADD (MOVWconst [val]) x) - // result: (ADDI [int64(val)] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpRISCV64MOVWconst { - continue - } - val := auxIntToInt32(v_0.AuxInt) - x := v_1 - v.reset(OpRISCV64ADDI) - v.AuxInt = int64ToAuxInt(int64(val)) - v.AddArg(x) - return true - } - break - } // match: (ADD (MOVDconst [val]) x) // cond: is32Bit(val) // result: (ADDI [val] x) @@ -2739,54 +2801,6 @@ func rewriteValueRISCV64_OpRISCV64ADDI(v *Value) bool { func rewriteValueRISCV64_OpRISCV64AND(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AND (MOVBconst [val]) x) - // result: (ANDI [int64(val)] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpRISCV64MOVBconst { - continue - } - val := auxIntToInt8(v_0.AuxInt) - x := v_1 - v.reset(OpRISCV64ANDI) - v.AuxInt = int64ToAuxInt(int64(val)) - v.AddArg(x) - return true - } - break - } - // match: (AND (MOVHconst [val]) x) - // result: (ANDI [int64(val)] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpRISCV64MOVHconst { - continue - } - val := auxIntToInt16(v_0.AuxInt) - x := v_1 - v.reset(OpRISCV64ANDI) - v.AuxInt = int64ToAuxInt(int64(val)) - v.AddArg(x) - return true - } - break - } - // match: (AND (MOVWconst [val]) x) - // result: (ANDI [int64(val)] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpRISCV64MOVWconst { - continue - } - val := auxIntToInt32(v_0.AuxInt) - x := v_1 - v.reset(OpRISCV64ANDI) - v.AuxInt = int64ToAuxInt(int64(val)) - v.AddArg(x) - return true - } - break - } // match: (AND (MOVDconst [val]) x) // cond: is32Bit(val) // result: (ANDI [val] x) @@ -2860,13 +2874,13 @@ func rewriteValueRISCV64_OpRISCV64MOVBUload(v *Value) bool { func rewriteValueRISCV64_OpRISCV64MOVBUreg(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (MOVBUreg (MOVBconst [c])) + // match: (MOVBUreg (MOVDconst [c])) // result: (MOVDconst [int64(uint8(c))]) for { - if v_0.Op != OpRISCV64MOVBconst { + if v_0.Op != OpRISCV64MOVDconst { break } - c := auxIntToInt8(v_0.AuxInt) + c := auxIntToInt64(v_0.AuxInt) v.reset(OpRISCV64MOVDconst) v.AuxInt = int64ToAuxInt(int64(uint8(c))) return true @@ -2970,15 +2984,15 @@ func rewriteValueRISCV64_OpRISCV64MOVBload(v *Value) bool { func rewriteValueRISCV64_OpRISCV64MOVBreg(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (MOVBreg (MOVBconst [c])) - // result: (MOVDconst [int64(c)]) + // match: (MOVBreg (MOVDconst [c])) + // result: (MOVDconst [int64(int8(c))]) for { - if v_0.Op != OpRISCV64MOVBconst { + if v_0.Op != OpRISCV64MOVDconst { break } - c := auxIntToInt8(v_0.AuxInt) + c := auxIntToInt64(v_0.AuxInt) v.reset(OpRISCV64MOVDconst) - v.AuxInt = int64ToAuxInt(int64(c)) + v.AuxInt = int64ToAuxInt(int64(int8(c))) return true } // match: (MOVBreg x:(MOVBload _ _)) @@ -3078,13 +3092,13 @@ func rewriteValueRISCV64_OpRISCV64MOVBstore(v *Value) bool { v.AddArg3(base, val, mem) return true } - // match: (MOVBstore [off] {sym} ptr (MOVBconst [0]) mem) + // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) // result: (MOVBstorezero [off] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 - if v_1.Op != OpRISCV64MOVBconst || auxIntToInt8(v_1.AuxInt) != 0 { + if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { break } mem := v_2 @@ -3246,51 +3260,6 @@ func rewriteValueRISCV64_OpRISCV64MOVBstorezero(v *Value) bool { } return false } -func rewriteValueRISCV64_OpRISCV64MOVDconst(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (MOVDconst [c]) - // cond: !is32Bit(c) && int32(c) < 0 - // result: (ADD (SLLI [32] (MOVDconst [c>>32+1])) (MOVDconst [int64(int32(c))])) - for { - t := v.Type - c := auxIntToInt64(v.AuxInt) - if !(!is32Bit(c) && int32(c) < 0) { - break - } - v.reset(OpRISCV64ADD) - v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) - v0.AuxInt = int64ToAuxInt(32) - v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v1.AuxInt = int64ToAuxInt(c>>32 + 1) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v2.AuxInt = int64ToAuxInt(int64(int32(c))) - v.AddArg2(v0, v2) - return true - } - // match: (MOVDconst [c]) - // cond: !is32Bit(c) && int32(c) >= 0 - // result: (ADD (SLLI [32] (MOVDconst [c>>32+0])) (MOVDconst [int64(int32(c))])) - for { - t := v.Type - c := auxIntToInt64(v.AuxInt) - if !(!is32Bit(c) && int32(c) >= 0) { - break - } - v.reset(OpRISCV64ADD) - v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) - v0.AuxInt = int64ToAuxInt(32) - v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v1.AuxInt = int64ToAuxInt(c>>32 + 0) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v2.AuxInt = int64ToAuxInt(int64(int32(c))) - v.AddArg2(v0, v2) - return true - } - return false -} func rewriteValueRISCV64_OpRISCV64MOVDload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -3339,6 +3308,21 @@ func rewriteValueRISCV64_OpRISCV64MOVDload(v *Value) bool { } return false } +func rewriteValueRISCV64_OpRISCV64MOVDnop(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVDnop (MOVDconst [c])) + // result: (MOVDconst [c]) + for { + if v_0.Op != OpRISCV64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(c) + return true + } + return false +} func rewriteValueRISCV64_OpRISCV64MOVDreg(v *Value) bool { v_0 := v.Args[0] // match: (MOVDreg x) @@ -3521,24 +3505,13 @@ func rewriteValueRISCV64_OpRISCV64MOVHUload(v *Value) bool { func rewriteValueRISCV64_OpRISCV64MOVHUreg(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (MOVHUreg (MOVBconst [c])) + // match: (MOVHUreg (MOVDconst [c])) // result: (MOVDconst [int64(uint16(c))]) for { - if v_0.Op != OpRISCV64MOVBconst { + if v_0.Op != OpRISCV64MOVDconst { break } - c := auxIntToInt8(v_0.AuxInt) - v.reset(OpRISCV64MOVDconst) - v.AuxInt = int64ToAuxInt(int64(uint16(c))) - return true - } - // match: (MOVHUreg (MOVHconst [c])) - // result: (MOVDconst [int64(uint16(c))]) - for { - if v_0.Op != OpRISCV64MOVHconst { - break - } - c := auxIntToInt16(v_0.AuxInt) + c := auxIntToInt64(v_0.AuxInt) v.reset(OpRISCV64MOVDconst) v.AuxInt = int64ToAuxInt(int64(uint16(c))) return true @@ -3664,26 +3637,15 @@ func rewriteValueRISCV64_OpRISCV64MOVHload(v *Value) bool { func rewriteValueRISCV64_OpRISCV64MOVHreg(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (MOVHreg (MOVBconst [c])) - // result: (MOVDconst [int64(c)]) + // match: (MOVHreg (MOVDconst [c])) + // result: (MOVDconst [int64(int16(c))]) for { - if v_0.Op != OpRISCV64MOVBconst { + if v_0.Op != OpRISCV64MOVDconst { break } - c := auxIntToInt8(v_0.AuxInt) + c := auxIntToInt64(v_0.AuxInt) v.reset(OpRISCV64MOVDconst) - v.AuxInt = int64ToAuxInt(int64(c)) - return true - } - // match: (MOVHreg (MOVHconst [c])) - // result: (MOVDconst [int64(c)]) - for { - if v_0.Op != OpRISCV64MOVHconst { - break - } - c := auxIntToInt16(v_0.AuxInt) - v.reset(OpRISCV64MOVDconst) - v.AuxInt = int64ToAuxInt(int64(c)) + v.AuxInt = int64ToAuxInt(int64(int16(c))) return true } // match: (MOVHreg x:(MOVBload _ _)) @@ -3827,13 +3789,13 @@ func rewriteValueRISCV64_OpRISCV64MOVHstore(v *Value) bool { v.AddArg3(base, val, mem) return true } - // match: (MOVHstore [off] {sym} ptr (MOVHconst [0]) mem) + // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) // result: (MOVHstorezero [off] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 - if v_1.Op != OpRISCV64MOVHconst || auxIntToInt16(v_1.AuxInt) != 0 { + if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { break } mem := v_2 @@ -4012,35 +3974,13 @@ func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool { func rewriteValueRISCV64_OpRISCV64MOVWUreg(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (MOVWUreg (MOVBconst [c])) + // match: (MOVWUreg (MOVDconst [c])) // result: (MOVDconst [int64(uint32(c))]) for { - if v_0.Op != OpRISCV64MOVBconst { + if v_0.Op != OpRISCV64MOVDconst { break } - c := auxIntToInt8(v_0.AuxInt) - v.reset(OpRISCV64MOVDconst) - v.AuxInt = int64ToAuxInt(int64(uint32(c))) - return true - } - // match: (MOVWUreg (MOVHconst [c])) - // result: (MOVDconst [int64(uint32(c))]) - for { - if v_0.Op != OpRISCV64MOVHconst { - break - } - c := auxIntToInt16(v_0.AuxInt) - v.reset(OpRISCV64MOVDconst) - v.AuxInt = int64ToAuxInt(int64(uint32(c))) - return true - } - // match: (MOVWUreg (MOVWconst [c])) - // result: (MOVDconst [int64(uint32(c))]) - for { - if v_0.Op != OpRISCV64MOVWconst { - break - } - c := auxIntToInt32(v_0.AuxInt) + c := auxIntToInt64(v_0.AuxInt) v.reset(OpRISCV64MOVDconst) v.AuxInt = int64ToAuxInt(int64(uint32(c))) return true @@ -4188,37 +4128,15 @@ func rewriteValueRISCV64_OpRISCV64MOVWload(v *Value) bool { func rewriteValueRISCV64_OpRISCV64MOVWreg(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (MOVWreg (MOVBconst [c])) - // result: (MOVDconst [int64(c)]) + // match: (MOVWreg (MOVDconst [c])) + // result: (MOVDconst [int64(int32(c))]) for { - if v_0.Op != OpRISCV64MOVBconst { + if v_0.Op != OpRISCV64MOVDconst { break } - c := auxIntToInt8(v_0.AuxInt) + c := auxIntToInt64(v_0.AuxInt) v.reset(OpRISCV64MOVDconst) - v.AuxInt = int64ToAuxInt(int64(c)) - return true - } - // match: (MOVWreg (MOVHconst [c])) - // result: (MOVDconst [int64(c)]) - for { - if v_0.Op != OpRISCV64MOVHconst { - break - } - c := auxIntToInt16(v_0.AuxInt) - v.reset(OpRISCV64MOVDconst) - v.AuxInt = int64ToAuxInt(int64(c)) - return true - } - // match: (MOVWreg (MOVWconst [c])) - // result: (MOVDconst [int64(c)]) - for { - if v_0.Op != OpRISCV64MOVWconst { - break - } - c := auxIntToInt32(v_0.AuxInt) - v.reset(OpRISCV64MOVDconst) - v.AuxInt = int64ToAuxInt(int64(c)) + v.AuxInt = int64ToAuxInt(int64(int32(c))) return true } // match: (MOVWreg x:(MOVBload _ _)) @@ -4395,13 +4313,13 @@ func rewriteValueRISCV64_OpRISCV64MOVWstore(v *Value) bool { v.AddArg3(base, val, mem) return true } - // match: (MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) + // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) // result: (MOVWstorezero [off] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 - if v_1.Op != OpRISCV64MOVWconst || auxIntToInt32(v_1.AuxInt) != 0 { + if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { break } mem := v_2 @@ -4498,54 +4416,6 @@ func rewriteValueRISCV64_OpRISCV64MOVWstorezero(v *Value) bool { func rewriteValueRISCV64_OpRISCV64OR(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OR (MOVBconst [val]) x) - // result: (ORI [int64(val)] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpRISCV64MOVBconst { - continue - } - val := auxIntToInt8(v_0.AuxInt) - x := v_1 - v.reset(OpRISCV64ORI) - v.AuxInt = int64ToAuxInt(int64(val)) - v.AddArg(x) - return true - } - break - } - // match: (OR (MOVHconst [val]) x) - // result: (ORI [int64(val)] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpRISCV64MOVHconst { - continue - } - val := auxIntToInt16(v_0.AuxInt) - x := v_1 - v.reset(OpRISCV64ORI) - v.AuxInt = int64ToAuxInt(int64(val)) - v.AddArg(x) - return true - } - break - } - // match: (OR (MOVWconst [val]) x) - // result: (ORI [int64(val)] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpRISCV64MOVWconst { - continue - } - val := auxIntToInt32(v_0.AuxInt) - x := v_1 - v.reset(OpRISCV64ORI) - v.AuxInt = int64ToAuxInt(int64(val)) - v.AddArg(x) - return true - } - break - } // match: (OR (MOVDconst [val]) x) // cond: is32Bit(val) // result: (ORI [val] x) @@ -4571,45 +4441,6 @@ func rewriteValueRISCV64_OpRISCV64OR(v *Value) bool { func rewriteValueRISCV64_OpRISCV64SLL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SLL x (MOVBconst [val])) - // result: (SLLI [int64(val&63)] x) - for { - x := v_0 - if v_1.Op != OpRISCV64MOVBconst { - break - } - val := auxIntToInt8(v_1.AuxInt) - v.reset(OpRISCV64SLLI) - v.AuxInt = int64ToAuxInt(int64(val & 63)) - v.AddArg(x) - return true - } - // match: (SLL x (MOVHconst [val])) - // result: (SLLI [int64(val&63)] x) - for { - x := v_0 - if v_1.Op != OpRISCV64MOVHconst { - break - } - val := auxIntToInt16(v_1.AuxInt) - v.reset(OpRISCV64SLLI) - v.AuxInt = int64ToAuxInt(int64(val & 63)) - v.AddArg(x) - return true - } - // match: (SLL x (MOVWconst [val])) - // result: (SLLI [int64(val&63)] x) - for { - x := v_0 - if v_1.Op != OpRISCV64MOVWconst { - break - } - val := auxIntToInt32(v_1.AuxInt) - v.reset(OpRISCV64SLLI) - v.AuxInt = int64ToAuxInt(int64(val & 63)) - v.AddArg(x) - return true - } // match: (SLL x (MOVDconst [val])) // result: (SLLI [int64(val&63)] x) for { @@ -4628,45 +4459,6 @@ func rewriteValueRISCV64_OpRISCV64SLL(v *Value) bool { func rewriteValueRISCV64_OpRISCV64SRA(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SRA x (MOVBconst [val])) - // result: (SRAI [int64(val&63)] x) - for { - x := v_0 - if v_1.Op != OpRISCV64MOVBconst { - break - } - val := auxIntToInt8(v_1.AuxInt) - v.reset(OpRISCV64SRAI) - v.AuxInt = int64ToAuxInt(int64(val & 63)) - v.AddArg(x) - return true - } - // match: (SRA x (MOVHconst [val])) - // result: (SRAI [int64(val&63)] x) - for { - x := v_0 - if v_1.Op != OpRISCV64MOVHconst { - break - } - val := auxIntToInt16(v_1.AuxInt) - v.reset(OpRISCV64SRAI) - v.AuxInt = int64ToAuxInt(int64(val & 63)) - v.AddArg(x) - return true - } - // match: (SRA x (MOVWconst [val])) - // result: (SRAI [int64(val&63)] x) - for { - x := v_0 - if v_1.Op != OpRISCV64MOVWconst { - break - } - val := auxIntToInt32(v_1.AuxInt) - v.reset(OpRISCV64SRAI) - v.AuxInt = int64ToAuxInt(int64(val & 63)) - v.AddArg(x) - return true - } // match: (SRA x (MOVDconst [val])) // result: (SRAI [int64(val&63)] x) for { @@ -4685,45 +4477,6 @@ func rewriteValueRISCV64_OpRISCV64SRA(v *Value) bool { func rewriteValueRISCV64_OpRISCV64SRL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SRL x (MOVBconst [val])) - // result: (SRLI [int64(val&63)] x) - for { - x := v_0 - if v_1.Op != OpRISCV64MOVBconst { - break - } - val := auxIntToInt8(v_1.AuxInt) - v.reset(OpRISCV64SRLI) - v.AuxInt = int64ToAuxInt(int64(val & 63)) - v.AddArg(x) - return true - } - // match: (SRL x (MOVHconst [val])) - // result: (SRLI [int64(val&63)] x) - for { - x := v_0 - if v_1.Op != OpRISCV64MOVHconst { - break - } - val := auxIntToInt16(v_1.AuxInt) - v.reset(OpRISCV64SRLI) - v.AuxInt = int64ToAuxInt(int64(val & 63)) - v.AddArg(x) - return true - } - // match: (SRL x (MOVWconst [val])) - // result: (SRLI [int64(val&63)] x) - for { - x := v_0 - if v_1.Op != OpRISCV64MOVWconst { - break - } - val := auxIntToInt32(v_1.AuxInt) - v.reset(OpRISCV64SRLI) - v.AuxInt = int64ToAuxInt(int64(val & 63)) - v.AddArg(x) - return true - } // match: (SRL x (MOVDconst [val])) // result: (SRLI [int64(val&63)] x) for { @@ -4742,49 +4495,6 @@ func rewriteValueRISCV64_OpRISCV64SRL(v *Value) bool { func rewriteValueRISCV64_OpRISCV64SUB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SUB x (MOVBconst [val])) - // result: (ADDI [-int64(val)] x) - for { - x := v_0 - if v_1.Op != OpRISCV64MOVBconst { - break - } - val := auxIntToInt8(v_1.AuxInt) - v.reset(OpRISCV64ADDI) - v.AuxInt = int64ToAuxInt(-int64(val)) - v.AddArg(x) - return true - } - // match: (SUB x (MOVHconst [val])) - // result: (ADDI [-int64(val)] x) - for { - x := v_0 - if v_1.Op != OpRISCV64MOVHconst { - break - } - val := auxIntToInt16(v_1.AuxInt) - v.reset(OpRISCV64ADDI) - v.AuxInt = int64ToAuxInt(-int64(val)) - v.AddArg(x) - return true - } - // match: (SUB x (MOVWconst [val])) - // cond: is32Bit(-int64(val)) - // result: (ADDI [-int64(val)] x) - for { - x := v_0 - if v_1.Op != OpRISCV64MOVWconst { - break - } - val := auxIntToInt32(v_1.AuxInt) - if !(is32Bit(-int64(val))) { - break - } - v.reset(OpRISCV64ADDI) - v.AuxInt = int64ToAuxInt(-int64(val)) - v.AddArg(x) - return true - } // match: (SUB x (MOVDconst [val])) // cond: is32Bit(-val) // result: (ADDI [-val] x) @@ -4802,36 +4512,6 @@ func rewriteValueRISCV64_OpRISCV64SUB(v *Value) bool { v.AddArg(x) return true } - // match: (SUB x (MOVBconst [0])) - // result: x - for { - x := v_0 - if v_1.Op != OpRISCV64MOVBconst || auxIntToInt8(v_1.AuxInt) != 0 { - break - } - v.copyOf(x) - return true - } - // match: (SUB x (MOVHconst [0])) - // result: x - for { - x := v_0 - if v_1.Op != OpRISCV64MOVHconst || auxIntToInt16(v_1.AuxInt) != 0 { - break - } - v.copyOf(x) - return true - } - // match: (SUB x (MOVWconst [0])) - // result: x - for { - x := v_0 - if v_1.Op != OpRISCV64MOVWconst || auxIntToInt32(v_1.AuxInt) != 0 { - break - } - v.copyOf(x) - return true - } // match: (SUB x (MOVDconst [0])) // result: x for { @@ -4842,39 +4522,6 @@ func rewriteValueRISCV64_OpRISCV64SUB(v *Value) bool { v.copyOf(x) return true } - // match: (SUB (MOVBconst [0]) x) - // result: (NEG x) - for { - if v_0.Op != OpRISCV64MOVBconst || auxIntToInt8(v_0.AuxInt) != 0 { - break - } - x := v_1 - v.reset(OpRISCV64NEG) - v.AddArg(x) - return true - } - // match: (SUB (MOVHconst [0]) x) - // result: (NEG x) - for { - if v_0.Op != OpRISCV64MOVHconst || auxIntToInt16(v_0.AuxInt) != 0 { - break - } - x := v_1 - v.reset(OpRISCV64NEG) - v.AddArg(x) - return true - } - // match: (SUB (MOVWconst [0]) x) - // result: (NEG x) - for { - if v_0.Op != OpRISCV64MOVWconst || auxIntToInt32(v_0.AuxInt) != 0 { - break - } - x := v_1 - v.reset(OpRISCV64NEG) - v.AddArg(x) - return true - } // match: (SUB (MOVDconst [0]) x) // result: (NEG x) for { @@ -4891,11 +4538,11 @@ func rewriteValueRISCV64_OpRISCV64SUB(v *Value) bool { func rewriteValueRISCV64_OpRISCV64SUBW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SUBW x (MOVWconst [0])) + // match: (SUBW x (MOVDconst [0])) // result: (ADDIW [0] x) for { x := v_0 - if v_1.Op != OpRISCV64MOVWconst || auxIntToInt32(v_1.AuxInt) != 0 { + if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { break } v.reset(OpRISCV64ADDIW) @@ -4919,54 +4566,6 @@ func rewriteValueRISCV64_OpRISCV64SUBW(v *Value) bool { func rewriteValueRISCV64_OpRISCV64XOR(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (XOR (MOVBconst [val]) x) - // result: (XORI [int64(val)] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpRISCV64MOVBconst { - continue - } - val := auxIntToInt8(v_0.AuxInt) - x := v_1 - v.reset(OpRISCV64XORI) - v.AuxInt = int64ToAuxInt(int64(val)) - v.AddArg(x) - return true - } - break - } - // match: (XOR (MOVHconst [val]) x) - // result: (XORI [int64(val)] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpRISCV64MOVHconst { - continue - } - val := auxIntToInt16(v_0.AuxInt) - x := v_1 - v.reset(OpRISCV64XORI) - v.AuxInt = int64ToAuxInt(int64(val)) - v.AddArg(x) - return true - } - break - } - // match: (XOR (MOVWconst [val]) x) - // result: (XORI [int64(val)] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpRISCV64MOVWconst { - continue - } - val := auxIntToInt32(v_0.AuxInt) - x := v_1 - v.reset(OpRISCV64XORI) - v.AuxInt = int64ToAuxInt(int64(val)) - v.AddArg(x) - return true - } - break - } // match: (XOR (MOVDconst [val]) x) // cond: is32Bit(val) // result: (XORI [val] x) @@ -4994,23 +4593,23 @@ func rewriteValueRISCV64_OpRotateLeft16(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (RotateLeft16 x (MOVHconst [c])) - // result: (Or16 (Lsh16x64 x (MOVHconst [c&15])) (Rsh16Ux64 x (MOVHconst [-c&15]))) + // match: (RotateLeft16 x (MOVDconst [c])) + // result: (Or16 (Lsh16x64 x (MOVDconst [c&15])) (Rsh16Ux64 x (MOVDconst [-c&15]))) for { t := v.Type x := v_0 - if v_1.Op != OpRISCV64MOVHconst { + if v_1.Op != OpRISCV64MOVDconst { break } - c := auxIntToInt16(v_1.AuxInt) + c := auxIntToInt64(v_1.AuxInt) v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v1 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) - v1.AuxInt = int16ToAuxInt(c & 15) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 15) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) - v3 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) - v3.AuxInt = int16ToAuxInt(-c & 15) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 15) v2.AddArg2(x, v3) v.AddArg2(v0, v2) return true @@ -5022,23 +4621,23 @@ func rewriteValueRISCV64_OpRotateLeft32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (RotateLeft32 x (MOVWconst [c])) - // result: (Or32 (Lsh32x64 x (MOVWconst [c&31])) (Rsh32Ux64 x (MOVWconst [-c&31]))) + // match: (RotateLeft32 x (MOVDconst [c])) + // result: (Or32 (Lsh32x64 x (MOVDconst [c&31])) (Rsh32Ux64 x (MOVDconst [-c&31]))) for { t := v.Type x := v_0 - if v_1.Op != OpRISCV64MOVWconst { + if v_1.Op != OpRISCV64MOVDconst { break } - c := auxIntToInt32(v_1.AuxInt) + c := auxIntToInt64(v_1.AuxInt) v.reset(OpOr32) v0 := b.NewValue0(v.Pos, OpLsh32x64, t) - v1 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) - v1.AuxInt = int32ToAuxInt(c & 31) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 31) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t) - v3 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) - v3.AuxInt = int32ToAuxInt(-c & 31) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 31) v2.AddArg2(x, v3) v.AddArg2(v0, v2) return true @@ -5078,23 +4677,23 @@ func rewriteValueRISCV64_OpRotateLeft8(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (RotateLeft8 x (MOVBconst [c])) - // result: (Or8 (Lsh8x64 x (MOVBconst [c&7])) (Rsh8Ux64 x (MOVBconst [-c&7]))) + // match: (RotateLeft8 x (MOVDconst [c])) + // result: (Or8 (Lsh8x64 x (MOVDconst [c&7])) (Rsh8Ux64 x (MOVDconst [-c&7]))) for { t := v.Type x := v_0 - if v_1.Op != OpRISCV64MOVBconst { + if v_1.Op != OpRISCV64MOVDconst { break } - c := auxIntToInt8(v_1.AuxInt) + c := auxIntToInt64(v_1.AuxInt) v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v1 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) - v1.AuxInt = int8ToAuxInt(c & 7) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 7) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) - v3 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) - v3.AuxInt = int8ToAuxInt(-c & 7) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 7) v2.AddArg2(x, v3) v.AddArg2(v0, v2) return true @@ -6095,7 +5694,7 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { return true } // match: (Zero [1] ptr mem) - // result: (MOVBstore ptr (MOVBconst [0]) mem) + // result: (MOVBstore ptr (MOVDconst [0]) mem) for { if auxIntToInt64(v.AuxInt) != 1 { break @@ -6103,14 +5702,14 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { ptr := v_0 mem := v_1 v.reset(OpRISCV64MOVBstore) - v0 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] {t} ptr mem) // cond: t.Alignment()%2 == 0 - // result: (MOVHstore ptr (MOVHconst [0]) mem) + // result: (MOVHstore ptr (MOVDconst [0]) mem) for { if auxIntToInt64(v.AuxInt) != 2 { break @@ -6122,13 +5721,13 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { break } v.reset(OpRISCV64MOVHstore) - v0 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) - v0.AuxInt = int16ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] ptr mem) - // result: (MOVBstore [1] ptr (MOVBconst [0]) (MOVBstore ptr (MOVBconst [0]) mem)) + // result: (MOVBstore [1] ptr (MOVDconst [0]) (MOVBstore ptr (MOVDconst [0]) mem)) for { if auxIntToInt64(v.AuxInt) != 2 { break @@ -6137,8 +5736,8 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { mem := v_1 v.reset(OpRISCV64MOVBstore) v.AuxInt = int32ToAuxInt(1) - v0 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) @@ -6146,7 +5745,7 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { } // match: (Zero [4] {t} ptr mem) // cond: t.Alignment()%4 == 0 - // result: (MOVWstore ptr (MOVWconst [0]) mem) + // result: (MOVWstore ptr (MOVDconst [0]) mem) for { if auxIntToInt64(v.AuxInt) != 4 { break @@ -6158,14 +5757,14 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { break } v.reset(OpRISCV64MOVWstore) - v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [4] {t} ptr mem) // cond: t.Alignment()%2 == 0 - // result: (MOVHstore [2] ptr (MOVHconst [0]) (MOVHstore ptr (MOVHconst [0]) mem)) + // result: (MOVHstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem)) for { if auxIntToInt64(v.AuxInt) != 4 { break @@ -6178,15 +5777,15 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { } v.reset(OpRISCV64MOVHstore) v.AuxInt = int32ToAuxInt(2) - v0 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) - v0.AuxInt = int16ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [4] ptr mem) - // result: (MOVBstore [3] ptr (MOVBconst [0]) (MOVBstore [2] ptr (MOVBconst [0]) (MOVBstore [1] ptr (MOVBconst [0]) (MOVBstore ptr (MOVBconst [0]) mem)))) + // result: (MOVBstore [3] ptr (MOVDconst [0]) (MOVBstore [2] ptr (MOVDconst [0]) (MOVBstore [1] ptr (MOVDconst [0]) (MOVBstore ptr (MOVDconst [0]) mem)))) for { if auxIntToInt64(v.AuxInt) != 4 { break @@ -6195,8 +5794,8 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { mem := v_1 v.reset(OpRISCV64MOVBstore) v.AuxInt = int32ToAuxInt(3) - v0 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem) v1.AuxInt = int32ToAuxInt(2) v2 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem) @@ -6229,7 +5828,7 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { } // match: (Zero [8] {t} ptr mem) // cond: t.Alignment()%4 == 0 - // result: (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore ptr (MOVWconst [0]) mem)) + // result: (MOVWstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)) for { if auxIntToInt64(v.AuxInt) != 8 { break @@ -6242,8 +5841,8 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { } v.reset(OpRISCV64MOVWstore) v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) @@ -6251,7 +5850,7 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { } // match: (Zero [8] {t} ptr mem) // cond: t.Alignment()%2 == 0 - // result: (MOVHstore [6] ptr (MOVHconst [0]) (MOVHstore [4] ptr (MOVHconst [0]) (MOVHstore [2] ptr (MOVHconst [0]) (MOVHstore ptr (MOVHconst [0]) mem)))) + // result: (MOVHstore [6] ptr (MOVDconst [0]) (MOVHstore [4] ptr (MOVDconst [0]) (MOVHstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem)))) for { if auxIntToInt64(v.AuxInt) != 8 { break @@ -6264,8 +5863,8 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { } v.reset(OpRISCV64MOVHstore) v.AuxInt = int32ToAuxInt(6) - v0 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) - v0.AuxInt = int16ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem) v1.AuxInt = int32ToAuxInt(4) v2 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem) @@ -6278,7 +5877,7 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { return true } // match: (Zero [3] ptr mem) - // result: (MOVBstore [2] ptr (MOVBconst [0]) (MOVBstore [1] ptr (MOVBconst [0]) (MOVBstore ptr (MOVBconst [0]) mem))) + // result: (MOVBstore [2] ptr (MOVDconst [0]) (MOVBstore [1] ptr (MOVDconst [0]) (MOVBstore ptr (MOVDconst [0]) mem))) for { if auxIntToInt64(v.AuxInt) != 3 { break @@ -6287,8 +5886,8 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { mem := v_1 v.reset(OpRISCV64MOVBstore) v.AuxInt = int32ToAuxInt(2) - v0 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem) v1.AuxInt = int32ToAuxInt(1) v2 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem) @@ -6299,7 +5898,7 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { } // match: (Zero [6] {t} ptr mem) // cond: t.Alignment()%2 == 0 - // result: (MOVHstore [4] ptr (MOVHconst [0]) (MOVHstore [2] ptr (MOVHconst [0]) (MOVHstore ptr (MOVHconst [0]) mem))) + // result: (MOVHstore [4] ptr (MOVDconst [0]) (MOVHstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem))) for { if auxIntToInt64(v.AuxInt) != 6 { break @@ -6312,8 +5911,8 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { } v.reset(OpRISCV64MOVHstore) v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) - v0.AuxInt = int16ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem) v1.AuxInt = int32ToAuxInt(2) v2 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem) @@ -6324,7 +5923,7 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { } // match: (Zero [12] {t} ptr mem) // cond: t.Alignment()%4 == 0 - // result: (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore ptr (MOVWconst [0]) mem))) + // result: (MOVWstore [8] ptr (MOVDconst [0]) (MOVWstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem))) for { if auxIntToInt64(v.AuxInt) != 12 { break @@ -6337,8 +5936,8 @@ func rewriteValueRISCV64_OpZero(v *Value) bool { } v.reset(OpRISCV64MOVWstore) v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem) v1.AuxInt = int32ToAuxInt(4) v2 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem) diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index b52a1b6745e..8b41d62c315 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -792,6 +792,9 @@ func rewriteValueS390X(v *Value) bool { case OpSqrt: v.Op = OpS390XFSQRT return true + case OpSqrt32: + v.Op = OpS390XFSQRTS + return true case OpStaticCall: v.Op = OpS390XCALLstatic return true @@ -1318,12 +1321,12 @@ func rewriteValueS390X_OpConst8(v *Value) bool { } } func rewriteValueS390X_OpConstBool(v *Value) bool { - // match: (ConstBool [b]) - // result: (MOVDconst [b2i(b)]) + // match: (ConstBool [t]) + // result: (MOVDconst [b2i(t)]) for { - b := auxIntToBool(v.AuxInt) + t := auxIntToBool(v.AuxInt) v.reset(OpS390XMOVDconst) - v.AuxInt = int64ToAuxInt(b2i(b)) + v.AuxInt = int64ToAuxInt(b2i(t)) return true } } @@ -3430,7 +3433,7 @@ func rewriteValueS390X_OpMove(v *Value) bool { } // match: (Move [s] dst src mem) // cond: s > 0 && s <= 256 && logLargeCopy(v, s) - // result: (MVC [makeValAndOff32(int32(s), 0)] dst src mem) + // result: (MVC [makeValAndOff(int32(s), 0)] dst src mem) for { s := auxIntToInt64(v.AuxInt) dst := v_0 @@ -3440,13 +3443,13 @@ func rewriteValueS390X_OpMove(v *Value) bool { break } v.reset(OpS390XMVC) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(s), 0)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s), 0)) v.AddArg3(dst, src, mem) return true } // match: (Move [s] dst src mem) // cond: s > 256 && s <= 512 && logLargeCopy(v, s) - // result: (MVC [makeValAndOff32(int32(s)-256, 256)] dst src (MVC [makeValAndOff32(256, 0)] dst src mem)) + // result: (MVC [makeValAndOff(int32(s)-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)) for { s := auxIntToInt64(v.AuxInt) dst := v_0 @@ -3456,16 +3459,16 @@ func rewriteValueS390X_OpMove(v *Value) bool { break } v.reset(OpS390XMVC) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(s)-256, 256)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s)-256, 256)) v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(256, 0)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 0)) v0.AddArg3(dst, src, mem) v.AddArg3(dst, src, v0) return true } // match: (Move [s] dst src mem) // cond: s > 512 && s <= 768 && logLargeCopy(v, s) - // result: (MVC [makeValAndOff32(int32(s)-512, 512)] dst src (MVC [makeValAndOff32(256, 256)] dst src (MVC [makeValAndOff32(256, 0)] dst src mem))) + // result: (MVC [makeValAndOff(int32(s)-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))) for { s := auxIntToInt64(v.AuxInt) dst := v_0 @@ -3475,11 +3478,11 @@ func rewriteValueS390X_OpMove(v *Value) bool { break } v.reset(OpS390XMVC) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(s)-512, 512)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s)-512, 512)) v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(256, 256)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 256)) v1 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) - v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(256, 0)) + v1.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 0)) v1.AddArg3(dst, src, mem) v0.AddArg3(dst, src, v1) v.AddArg3(dst, src, v0) @@ -3487,7 +3490,7 @@ func rewriteValueS390X_OpMove(v *Value) bool { } // match: (Move [s] dst src mem) // cond: s > 768 && s <= 1024 && logLargeCopy(v, s) - // result: (MVC [makeValAndOff32(int32(s)-768, 768)] dst src (MVC [makeValAndOff32(256, 512)] dst src (MVC [makeValAndOff32(256, 256)] dst src (MVC [makeValAndOff32(256, 0)] dst src mem)))) + // result: (MVC [makeValAndOff(int32(s)-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))) for { s := auxIntToInt64(v.AuxInt) dst := v_0 @@ -3497,13 +3500,13 @@ func rewriteValueS390X_OpMove(v *Value) bool { break } v.reset(OpS390XMVC) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(s)-768, 768)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s)-768, 768)) v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) - v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(256, 512)) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 512)) v1 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) - v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(256, 256)) + v1.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 256)) v2 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) - v2.AuxInt = valAndOffToAuxInt(makeValAndOff32(256, 0)) + v2.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 0)) v2.AddArg3(dst, src, mem) v1.AddArg3(dst, src, v2) v0.AddArg3(dst, src, v1) @@ -8614,7 +8617,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { } // match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) // cond: is20Bit(int64(off)) && ptr.Op != OpSB - // result: (MOVBstoreconst [makeValAndOff32(int32(int8(c)),off)] {sym} ptr mem) + // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -8628,7 +8631,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { break } v.reset(OpS390XMOVBstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int8(c)), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -8880,13 +8883,63 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.AddArg3(p, w0, mem) return true } + // match: (MOVBstore [7] {s} p1 (SRDconst w) x1:(MOVHBRstore [5] {s} p1 (SRDconst w) x2:(MOVWBRstore [1] {s} p1 (SRDconst w) x3:(MOVBstore [0] {s} p1 w mem)))) + // cond: x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && clobber(x1, x2, x3) + // result: (MOVDBRstore {s} p1 w mem) + for { + if auxIntToInt32(v.AuxInt) != 7 { + break + } + s := auxToSym(v.Aux) + p1 := v_0 + if v_1.Op != OpS390XSRDconst { + break + } + w := v_1.Args[0] + x1 := v_2 + if x1.Op != OpS390XMOVHBRstore || auxIntToInt32(x1.AuxInt) != 5 || auxToSym(x1.Aux) != s { + break + } + _ = x1.Args[2] + if p1 != x1.Args[0] { + break + } + x1_1 := x1.Args[1] + if x1_1.Op != OpS390XSRDconst || w != x1_1.Args[0] { + break + } + x2 := x1.Args[2] + if x2.Op != OpS390XMOVWBRstore || auxIntToInt32(x2.AuxInt) != 1 || auxToSym(x2.Aux) != s { + break + } + _ = x2.Args[2] + if p1 != x2.Args[0] { + break + } + x2_1 := x2.Args[1] + if x2_1.Op != OpS390XSRDconst || w != x2_1.Args[0] { + break + } + x3 := x2.Args[2] + if x3.Op != OpS390XMOVBstore || auxIntToInt32(x3.AuxInt) != 0 || auxToSym(x3.Aux) != s { + break + } + mem := x3.Args[2] + if p1 != x3.Args[0] || w != x3.Args[1] || !(x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && clobber(x1, x2, x3)) { + break + } + v.reset(OpS390XMOVDBRstore) + v.Aux = symToAux(s) + v.AddArg3(p1, w, mem) + return true + } return false } func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) - // cond: is20Bit(sc.Off()+int64(off)) + // cond: is20Bit(sc.Off64()+int64(off)) // result: (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem) for { sc := auxIntToValAndOff(v.AuxInt) @@ -8897,7 +8950,7 @@ func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value) bool { off := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is20Bit(sc.Off() + int64(off))) { + if !(is20Bit(sc.Off64() + int64(off))) { break } v.reset(OpS390XMOVBstoreconst) @@ -8930,7 +8983,7 @@ func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value) bool { } // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) // cond: p.Op != OpSB && x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x) - // result: (MOVHstoreconst [makeValAndOff32(c.Val32()&0xff | a.Val32()<<8, a.Off32())] {s} p mem) + // result: (MOVHstoreconst [makeValAndOff(c.Val()&0xff | a.Val()<<8, a.Off())] {s} p mem) for { c := auxIntToValAndOff(v.AuxInt) s := auxToSym(v.Aux) @@ -8948,7 +9001,7 @@ func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value) bool { break } v.reset(OpS390XMOVHstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c.Val32()&0xff|a.Val32()<<8, a.Off32())) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(c.Val()&0xff|a.Val()<<8, a.Off())) v.Aux = symToAux(s) v.AddArg2(p, mem) return true @@ -9160,7 +9213,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { } // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) // cond: is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB - // result: (MOVDstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem) + // result: (MOVDstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -9174,7 +9227,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { break } v.reset(OpS390XMOVDstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -9290,7 +9343,7 @@ func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) - // cond: isU12Bit(sc.Off()+int64(off)) + // cond: isU12Bit(sc.Off64()+int64(off)) // result: (MOVDstoreconst [sc.addOffset32(off)] {s} ptr mem) for { sc := auxIntToValAndOff(v.AuxInt) @@ -9301,7 +9354,7 @@ func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value) bool { off := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(isU12Bit(sc.Off() + int64(off))) { + if !(isU12Bit(sc.Off64() + int64(off))) { break } v.reset(OpS390XMOVDstoreconst) @@ -10026,7 +10079,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { } // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) // cond: isU12Bit(int64(off)) && ptr.Op != OpSB - // result: (MOVHstoreconst [makeValAndOff32(int32(int16(c)),off)] {sym} ptr mem) + // result: (MOVHstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -10040,7 +10093,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { break } v.reset(OpS390XMOVHstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int16(c)), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -10191,7 +10244,7 @@ func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) - // cond: isU12Bit(sc.Off()+int64(off)) + // cond: isU12Bit(sc.Off64()+int64(off)) // result: (MOVHstoreconst [sc.addOffset32(off)] {s} ptr mem) for { sc := auxIntToValAndOff(v.AuxInt) @@ -10202,7 +10255,7 @@ func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool { off := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(isU12Bit(sc.Off() + int64(off))) { + if !(isU12Bit(sc.Off64() + int64(off))) { break } v.reset(OpS390XMOVHstoreconst) @@ -10235,7 +10288,7 @@ func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool { } // match: (MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem)) // cond: p.Op != OpSB && x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x) - // result: (MOVWstore [a.Off32()] {s} p (MOVDconst [int64(c.Val32()&0xffff | a.Val32()<<16)]) mem) + // result: (MOVWstore [a.Off()] {s} p (MOVDconst [int64(c.Val()&0xffff | a.Val()<<16)]) mem) for { c := auxIntToValAndOff(v.AuxInt) s := auxToSym(v.Aux) @@ -10253,10 +10306,10 @@ func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool { break } v.reset(OpS390XMOVWstore) - v.AuxInt = int32ToAuxInt(a.Off32()) + v.AuxInt = int32ToAuxInt(a.Off()) v.Aux = symToAux(s) v0 := b.NewValue0(x.Pos, OpS390XMOVDconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(int64(c.Val32()&0xffff | a.Val32()<<16)) + v0.AuxInt = int64ToAuxInt(int64(c.Val()&0xffff | a.Val()<<16)) v.AddArg3(p, v0, mem) return true } @@ -10864,7 +10917,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { } // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) // cond: is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB - // result: (MOVWstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem) + // result: (MOVWstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -10878,7 +10931,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { break } v.reset(OpS390XMOVWstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true @@ -11052,7 +11105,7 @@ func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) - // cond: isU12Bit(sc.Off()+int64(off)) + // cond: isU12Bit(sc.Off64()+int64(off)) // result: (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem) for { sc := auxIntToValAndOff(v.AuxInt) @@ -11063,7 +11116,7 @@ func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool { off := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(isU12Bit(sc.Off() + int64(off))) { + if !(isU12Bit(sc.Off64() + int64(off))) { break } v.reset(OpS390XMOVWstoreconst) @@ -11096,7 +11149,7 @@ func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool { } // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) // cond: p.Op != OpSB && x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x) - // result: (MOVDstore [a.Off32()] {s} p (MOVDconst [c.Val()&0xffffffff | a.Val()<<32]) mem) + // result: (MOVDstore [a.Off()] {s} p (MOVDconst [c.Val64()&0xffffffff | a.Val64()<<32]) mem) for { c := auxIntToValAndOff(v.AuxInt) s := auxToSym(v.Aux) @@ -11114,10 +11167,10 @@ func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool { break } v.reset(OpS390XMOVDstore) - v.AuxInt = int32ToAuxInt(a.Off32()) + v.AuxInt = int32ToAuxInt(a.Off()) v.Aux = symToAux(s) v0 := b.NewValue0(x.Pos, OpS390XMOVDconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(c.Val()&0xffffffff | a.Val()<<32) + v0.AuxInt = int64ToAuxInt(c.Val64()&0xffffffff | a.Val64()<<32) v.AddArg3(p, v0, mem) return true } @@ -15865,7 +15918,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { return true } // match: (Zero [3] destptr mem) - // result: (MOVBstoreconst [makeValAndOff32(0,2)] destptr (MOVHstoreconst [0] destptr mem)) + // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVHstoreconst [0] destptr mem)) for { if auxIntToInt64(v.AuxInt) != 3 { break @@ -15873,7 +15926,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpS390XMOVBstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 2)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2)) v0 := b.NewValue0(v.Pos, OpS390XMOVHstoreconst, types.TypeMem) v0.AuxInt = valAndOffToAuxInt(0) v0.AddArg2(destptr, mem) @@ -15881,7 +15934,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { return true } // match: (Zero [5] destptr mem) - // result: (MOVBstoreconst [makeValAndOff32(0,4)] destptr (MOVWstoreconst [0] destptr mem)) + // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVWstoreconst [0] destptr mem)) for { if auxIntToInt64(v.AuxInt) != 5 { break @@ -15889,7 +15942,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpS390XMOVBstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem) v0.AuxInt = valAndOffToAuxInt(0) v0.AddArg2(destptr, mem) @@ -15897,7 +15950,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { return true } // match: (Zero [6] destptr mem) - // result: (MOVHstoreconst [makeValAndOff32(0,4)] destptr (MOVWstoreconst [0] destptr mem)) + // result: (MOVHstoreconst [makeValAndOff(0,4)] destptr (MOVWstoreconst [0] destptr mem)) for { if auxIntToInt64(v.AuxInt) != 6 { break @@ -15905,7 +15958,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpS390XMOVHstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem) v0.AuxInt = valAndOffToAuxInt(0) v0.AddArg2(destptr, mem) @@ -15913,7 +15966,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { return true } // match: (Zero [7] destptr mem) - // result: (MOVWstoreconst [makeValAndOff32(0,3)] destptr (MOVWstoreconst [0] destptr mem)) + // result: (MOVWstoreconst [makeValAndOff(0,3)] destptr (MOVWstoreconst [0] destptr mem)) for { if auxIntToInt64(v.AuxInt) != 7 { break @@ -15921,7 +15974,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { destptr := v_0 mem := v_1 v.reset(OpS390XMOVWstoreconst) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 3)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3)) v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem) v0.AuxInt = valAndOffToAuxInt(0) v0.AddArg2(destptr, mem) @@ -15930,7 +15983,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { } // match: (Zero [s] destptr mem) // cond: s > 0 && s <= 1024 - // result: (CLEAR [makeValAndOff32(int32(s), 0)] destptr mem) + // result: (CLEAR [makeValAndOff(int32(s), 0)] destptr mem) for { s := auxIntToInt64(v.AuxInt) destptr := v_0 @@ -15939,7 +15992,7 @@ func rewriteValueS390X_OpZero(v *Value) bool { break } v.reset(OpS390XCLEAR) - v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(s), 0)) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s), 0)) v.AddArg2(destptr, mem) return true } diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go index c8ecefc7367..5dab09f85b3 100644 --- a/src/cmd/compile/internal/ssa/rewriteWasm.go +++ b/src/cmd/compile/internal/ssa/rewriteWasm.go @@ -3,8 +3,8 @@ package ssa +import "internal/buildcfg" import "math" -import "cmd/internal/objabi" import "cmd/compile/internal/types" func rewriteValueWasm(v *Value) bool { @@ -527,6 +527,9 @@ func rewriteValueWasm(v *Value) bool { case OpSqrt: v.Op = OpWasmF64Sqrt return true + case OpSqrt32: + v.Op = OpWasmF32Sqrt + return true case OpStaticCall: v.Op = OpWasmLoweredStaticCall return true @@ -3190,11 +3193,11 @@ func rewriteValueWasm_OpSignExt16to32(v *Value) bool { return true } // match: (SignExt16to32 x) - // cond: objabi.GOWASM.SignExt + // cond: buildcfg.GOWASM.SignExt // result: (I64Extend16S x) for { x := v_0 - if !(objabi.GOWASM.SignExt) { + if !(buildcfg.GOWASM.SignExt) { break } v.reset(OpWasmI64Extend16S) @@ -3229,11 +3232,11 @@ func rewriteValueWasm_OpSignExt16to64(v *Value) bool { return true } // match: (SignExt16to64 x) - // cond: objabi.GOWASM.SignExt + // cond: buildcfg.GOWASM.SignExt // result: (I64Extend16S x) for { x := v_0 - if !(objabi.GOWASM.SignExt) { + if !(buildcfg.GOWASM.SignExt) { break } v.reset(OpWasmI64Extend16S) @@ -3268,11 +3271,11 @@ func rewriteValueWasm_OpSignExt32to64(v *Value) bool { return true } // match: (SignExt32to64 x) - // cond: objabi.GOWASM.SignExt + // cond: buildcfg.GOWASM.SignExt // result: (I64Extend32S x) for { x := v_0 - if !(objabi.GOWASM.SignExt) { + if !(buildcfg.GOWASM.SignExt) { break } v.reset(OpWasmI64Extend32S) @@ -3307,11 +3310,11 @@ func rewriteValueWasm_OpSignExt8to16(v *Value) bool { return true } // match: (SignExt8to16 x) - // cond: objabi.GOWASM.SignExt + // cond: buildcfg.GOWASM.SignExt // result: (I64Extend8S x) for { x := v_0 - if !(objabi.GOWASM.SignExt) { + if !(buildcfg.GOWASM.SignExt) { break } v.reset(OpWasmI64Extend8S) @@ -3346,11 +3349,11 @@ func rewriteValueWasm_OpSignExt8to32(v *Value) bool { return true } // match: (SignExt8to32 x) - // cond: objabi.GOWASM.SignExt + // cond: buildcfg.GOWASM.SignExt // result: (I64Extend8S x) for { x := v_0 - if !(objabi.GOWASM.SignExt) { + if !(buildcfg.GOWASM.SignExt) { break } v.reset(OpWasmI64Extend8S) @@ -3385,11 +3388,11 @@ func rewriteValueWasm_OpSignExt8to64(v *Value) bool { return true } // match: (SignExt8to64 x) - // cond: objabi.GOWASM.SignExt + // cond: buildcfg.GOWASM.SignExt // result: (I64Extend8S x) for { x := v_0 - if !(objabi.GOWASM.SignExt) { + if !(buildcfg.GOWASM.SignExt) { break } v.reset(OpWasmI64Extend8S) @@ -4899,7 +4902,5 @@ func rewriteValueWasm_OpZeroExt8to64(v *Value) bool { } } func rewriteBlockWasm(b *Block) bool { - switch b.Kind { - } return false } diff --git a/src/cmd/compile/internal/ssa/rewrite_test.go b/src/cmd/compile/internal/ssa/rewrite_test.go index 6fe429e85a6..357fe1183fa 100644 --- a/src/cmd/compile/internal/ssa/rewrite_test.go +++ b/src/cmd/compile/internal/ssa/rewrite_test.go @@ -13,7 +13,7 @@ func TestMove(t *testing.T) { copy(x[1:], x[:]) for i := 1; i < len(x); i++ { if int(x[i]) != i { - t.Errorf("Memmove got converted to OpMove in alias-unsafe way. Got %d insted of %d in position %d", int(x[i]), i, i+1) + t.Errorf("Memmove got converted to OpMove in alias-unsafe way. Got %d instead of %d in position %d", int(x[i]), i, i+1) } } } @@ -205,6 +205,7 @@ func TestMergePPC64AndSrwi(t *testing.T) { {0x00000000, 4, false, 0, 0}, {0xF0000000, 4, false, 0, 0}, {0xF0000000, 32, false, 0, 0}, + {0xFFFFFFFF, 0, true, 0, 0xFFFFFFFF}, } for i, v := range tests { result := mergePPC64AndSrwi(v.and, v.srw) diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go index e0fa9768d95..2a73a5ddc83 100644 --- a/src/cmd/compile/internal/ssa/rewritedec.go +++ b/src/cmd/compile/internal/ssa/rewritedec.go @@ -23,6 +23,8 @@ func rewriteValuedec(v *Value) bool { return rewriteValuedec_OpSliceLen(v) case OpSlicePtr: return rewriteValuedec_OpSlicePtr(v) + case OpSlicePtrUnchecked: + return rewriteValuedec_OpSlicePtrUnchecked(v) case OpStore: return rewriteValuedec_OpStore(v) case OpStringLen: @@ -248,6 +250,20 @@ func rewriteValuedec_OpSlicePtr(v *Value) bool { } return false } +func rewriteValuedec_OpSlicePtrUnchecked(v *Value) bool { + v_0 := v.Args[0] + // match: (SlicePtrUnchecked (SliceMake ptr _ _ )) + // result: ptr + for { + if v_0.Op != OpSliceMake { + break + } + ptr := v_0.Args[0] + v.copyOf(ptr) + return true + } + return false +} func rewriteValuedec_OpStore(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -409,7 +425,5 @@ func rewriteValuedec_OpStringPtr(v *Value) bool { return false } func rewriteBlockdec(b *Block) bool { - switch b.Kind { - } return false } diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go index 60b727f45fd..7d9656a4c82 100644 --- a/src/cmd/compile/internal/ssa/rewritedec64.go +++ b/src/cmd/compile/internal/ssa/rewritedec64.go @@ -2458,7 +2458,5 @@ func rewriteValuedec64_OpZeroExt8to64(v *Value) bool { } } func rewriteBlockdec64(b *Block) bool { - switch b.Kind { - } return false } diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 958e24d29f0..52258201ca1 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -122,8 +122,6 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpEqSlice(v) case OpIMake: return rewriteValuegeneric_OpIMake(v) - case OpInterCall: - return rewriteValuegeneric_OpInterCall(v) case OpInterLECall: return rewriteValuegeneric_OpInterLECall(v) case OpIsInBounds: @@ -392,8 +390,6 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpSlicemask(v) case OpSqrt: return rewriteValuegeneric_OpSqrt(v) - case OpStaticCall: - return rewriteValuegeneric_OpStaticCall(v) case OpStaticLECall: return rewriteValuegeneric_OpStaticLECall(v) case OpStore: @@ -4089,6 +4085,26 @@ func rewriteValuegeneric_OpCvt64Fto32F(v *Value) bool { v.AuxInt = float32ToAuxInt(float32(c)) return true } + // match: (Cvt64Fto32F sqrt0:(Sqrt (Cvt32Fto64F x))) + // cond: sqrt0.Uses==1 + // result: (Sqrt32 x) + for { + sqrt0 := v_0 + if sqrt0.Op != OpSqrt { + break + } + sqrt0_0 := sqrt0.Args[0] + if sqrt0_0.Op != OpCvt32Fto64F { + break + } + x := sqrt0_0.Args[0] + if !(sqrt0.Uses == 1) { + break + } + v.reset(OpSqrt32) + v.AddArg(x) + return true + } return false } func rewriteValuegeneric_OpCvt64Fto64(v *Value) bool { @@ -8136,32 +8152,32 @@ func rewriteValuegeneric_OpEqPtr(v *Value) bool { v.AuxInt = boolToAuxInt(true) return true } - // match: (EqPtr (Addr {a} _) (Addr {b} _)) - // result: (ConstBool [a == b]) + // match: (EqPtr (Addr {x} _) (Addr {y} _)) + // result: (ConstBool [x == y]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAddr { continue } - a := auxToSym(v_0.Aux) + x := auxToSym(v_0.Aux) if v_1.Op != OpAddr { continue } - b := auxToSym(v_1.Aux) + y := auxToSym(v_1.Aux) v.reset(OpConstBool) - v.AuxInt = boolToAuxInt(a == b) + v.AuxInt = boolToAuxInt(x == y) return true } break } - // match: (EqPtr (Addr {a} _) (OffPtr [o] (Addr {b} _))) - // result: (ConstBool [a == b && o == 0]) + // match: (EqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _))) + // result: (ConstBool [x == y && o == 0]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAddr { continue } - a := auxToSym(v_0.Aux) + x := auxToSym(v_0.Aux) if v_1.Op != OpOffPtr { continue } @@ -8170,15 +8186,15 @@ func rewriteValuegeneric_OpEqPtr(v *Value) bool { if v_1_0.Op != OpAddr { continue } - b := auxToSym(v_1_0.Aux) + y := auxToSym(v_1_0.Aux) v.reset(OpConstBool) - v.AuxInt = boolToAuxInt(a == b && o == 0) + v.AuxInt = boolToAuxInt(x == y && o == 0) return true } break } - // match: (EqPtr (OffPtr [o1] (Addr {a} _)) (OffPtr [o2] (Addr {b} _))) - // result: (ConstBool [a == b && o1 == o2]) + // match: (EqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _))) + // result: (ConstBool [x == y && o1 == o2]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOffPtr { @@ -8189,7 +8205,7 @@ func rewriteValuegeneric_OpEqPtr(v *Value) bool { if v_0_0.Op != OpAddr { continue } - a := auxToSym(v_0_0.Aux) + x := auxToSym(v_0_0.Aux) if v_1.Op != OpOffPtr { continue } @@ -8198,39 +8214,39 @@ func rewriteValuegeneric_OpEqPtr(v *Value) bool { if v_1_0.Op != OpAddr { continue } - b := auxToSym(v_1_0.Aux) + y := auxToSym(v_1_0.Aux) v.reset(OpConstBool) - v.AuxInt = boolToAuxInt(a == b && o1 == o2) + v.AuxInt = boolToAuxInt(x == y && o1 == o2) return true } break } - // match: (EqPtr (LocalAddr {a} _ _) (LocalAddr {b} _ _)) - // result: (ConstBool [a == b]) + // match: (EqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _)) + // result: (ConstBool [x == y]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpLocalAddr { continue } - a := auxToSym(v_0.Aux) + x := auxToSym(v_0.Aux) if v_1.Op != OpLocalAddr { continue } - b := auxToSym(v_1.Aux) + y := auxToSym(v_1.Aux) v.reset(OpConstBool) - v.AuxInt = boolToAuxInt(a == b) + v.AuxInt = boolToAuxInt(x == y) return true } break } - // match: (EqPtr (LocalAddr {a} _ _) (OffPtr [o] (LocalAddr {b} _ _))) - // result: (ConstBool [a == b && o == 0]) + // match: (EqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _))) + // result: (ConstBool [x == y && o == 0]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpLocalAddr { continue } - a := auxToSym(v_0.Aux) + x := auxToSym(v_0.Aux) if v_1.Op != OpOffPtr { continue } @@ -8239,15 +8255,15 @@ func rewriteValuegeneric_OpEqPtr(v *Value) bool { if v_1_0.Op != OpLocalAddr { continue } - b := auxToSym(v_1_0.Aux) + y := auxToSym(v_1_0.Aux) v.reset(OpConstBool) - v.AuxInt = boolToAuxInt(a == b && o == 0) + v.AuxInt = boolToAuxInt(x == y && o == 0) return true } break } - // match: (EqPtr (OffPtr [o1] (LocalAddr {a} _ _)) (OffPtr [o2] (LocalAddr {b} _ _))) - // result: (ConstBool [a == b && o1 == o2]) + // match: (EqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _))) + // result: (ConstBool [x == y && o1 == o2]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOffPtr { @@ -8258,7 +8274,7 @@ func rewriteValuegeneric_OpEqPtr(v *Value) bool { if v_0_0.Op != OpLocalAddr { continue } - a := auxToSym(v_0_0.Aux) + x := auxToSym(v_0_0.Aux) if v_1.Op != OpOffPtr { continue } @@ -8267,9 +8283,9 @@ func rewriteValuegeneric_OpEqPtr(v *Value) bool { if v_1_0.Op != OpLocalAddr { continue } - b := auxToSym(v_1_0.Aux) + y := auxToSym(v_1_0.Aux) v.reset(OpConstBool) - v.AuxInt = boolToAuxInt(a == b && o1 == o2) + v.AuxInt = boolToAuxInt(x == y && o1 == o2) return true } break @@ -8516,74 +8532,28 @@ func rewriteValuegeneric_OpEqSlice(v *Value) bool { func rewriteValuegeneric_OpIMake(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (IMake typ (StructMake1 val)) - // result: (IMake typ val) + // match: (IMake _typ (StructMake1 val)) + // result: (IMake _typ val) for { - typ := v_0 + _typ := v_0 if v_1.Op != OpStructMake1 { break } val := v_1.Args[0] v.reset(OpIMake) - v.AddArg2(typ, val) + v.AddArg2(_typ, val) return true } - // match: (IMake typ (ArrayMake1 val)) - // result: (IMake typ val) + // match: (IMake _typ (ArrayMake1 val)) + // result: (IMake _typ val) for { - typ := v_0 + _typ := v_0 if v_1.Op != OpArrayMake1 { break } val := v_1.Args[0] v.reset(OpIMake) - v.AddArg2(typ, val) - return true - } - return false -} -func rewriteValuegeneric_OpInterCall(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (InterCall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) mem) - // cond: devirt(v, auxCall, itab, off) != nil - // result: (StaticCall [int32(argsize)] {devirt(v, auxCall, itab, off)} mem) - for { - argsize := auxIntToInt32(v.AuxInt) - auxCall := auxToCall(v.Aux) - if v_0.Op != OpLoad { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpOffPtr { - break - } - off := auxIntToInt64(v_0_0.AuxInt) - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpITab { - break - } - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpIMake { - break - } - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpAddr { - break - } - itab := auxToSym(v_0_0_0_0_0.Aux) - v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpSB { - break - } - mem := v_1 - if !(devirt(v, auxCall, itab, off) != nil) { - break - } - v.reset(OpStaticCall) - v.AuxInt = int32ToAuxInt(int32(argsize)) - v.Aux = callToAux(devirt(v, auxCall, itab, off)) - v.AddArg(mem) + v.AddArg2(_typ, val) return true } return false @@ -15740,32 +15710,32 @@ func rewriteValuegeneric_OpNeqPtr(v *Value) bool { v.AuxInt = boolToAuxInt(false) return true } - // match: (NeqPtr (Addr {a} _) (Addr {b} _)) - // result: (ConstBool [a != b]) + // match: (NeqPtr (Addr {x} _) (Addr {y} _)) + // result: (ConstBool [x != y]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAddr { continue } - a := auxToSym(v_0.Aux) + x := auxToSym(v_0.Aux) if v_1.Op != OpAddr { continue } - b := auxToSym(v_1.Aux) + y := auxToSym(v_1.Aux) v.reset(OpConstBool) - v.AuxInt = boolToAuxInt(a != b) + v.AuxInt = boolToAuxInt(x != y) return true } break } - // match: (NeqPtr (Addr {a} _) (OffPtr [o] (Addr {b} _))) - // result: (ConstBool [a != b || o != 0]) + // match: (NeqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _))) + // result: (ConstBool [x != y || o != 0]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAddr { continue } - a := auxToSym(v_0.Aux) + x := auxToSym(v_0.Aux) if v_1.Op != OpOffPtr { continue } @@ -15774,15 +15744,15 @@ func rewriteValuegeneric_OpNeqPtr(v *Value) bool { if v_1_0.Op != OpAddr { continue } - b := auxToSym(v_1_0.Aux) + y := auxToSym(v_1_0.Aux) v.reset(OpConstBool) - v.AuxInt = boolToAuxInt(a != b || o != 0) + v.AuxInt = boolToAuxInt(x != y || o != 0) return true } break } - // match: (NeqPtr (OffPtr [o1] (Addr {a} _)) (OffPtr [o2] (Addr {b} _))) - // result: (ConstBool [a != b || o1 != o2]) + // match: (NeqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _))) + // result: (ConstBool [x != y || o1 != o2]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOffPtr { @@ -15793,7 +15763,7 @@ func rewriteValuegeneric_OpNeqPtr(v *Value) bool { if v_0_0.Op != OpAddr { continue } - a := auxToSym(v_0_0.Aux) + x := auxToSym(v_0_0.Aux) if v_1.Op != OpOffPtr { continue } @@ -15802,39 +15772,39 @@ func rewriteValuegeneric_OpNeqPtr(v *Value) bool { if v_1_0.Op != OpAddr { continue } - b := auxToSym(v_1_0.Aux) + y := auxToSym(v_1_0.Aux) v.reset(OpConstBool) - v.AuxInt = boolToAuxInt(a != b || o1 != o2) + v.AuxInt = boolToAuxInt(x != y || o1 != o2) return true } break } - // match: (NeqPtr (LocalAddr {a} _ _) (LocalAddr {b} _ _)) - // result: (ConstBool [a != b]) + // match: (NeqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _)) + // result: (ConstBool [x != y]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpLocalAddr { continue } - a := auxToSym(v_0.Aux) + x := auxToSym(v_0.Aux) if v_1.Op != OpLocalAddr { continue } - b := auxToSym(v_1.Aux) + y := auxToSym(v_1.Aux) v.reset(OpConstBool) - v.AuxInt = boolToAuxInt(a != b) + v.AuxInt = boolToAuxInt(x != y) return true } break } - // match: (NeqPtr (LocalAddr {a} _ _) (OffPtr [o] (LocalAddr {b} _ _))) - // result: (ConstBool [a != b || o != 0]) + // match: (NeqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _))) + // result: (ConstBool [x != y || o != 0]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpLocalAddr { continue } - a := auxToSym(v_0.Aux) + x := auxToSym(v_0.Aux) if v_1.Op != OpOffPtr { continue } @@ -15843,15 +15813,15 @@ func rewriteValuegeneric_OpNeqPtr(v *Value) bool { if v_1_0.Op != OpLocalAddr { continue } - b := auxToSym(v_1_0.Aux) + y := auxToSym(v_1_0.Aux) v.reset(OpConstBool) - v.AuxInt = boolToAuxInt(a != b || o != 0) + v.AuxInt = boolToAuxInt(x != y || o != 0) return true } break } - // match: (NeqPtr (OffPtr [o1] (LocalAddr {a} _ _)) (OffPtr [o2] (LocalAddr {b} _ _))) - // result: (ConstBool [a != b || o1 != o2]) + // match: (NeqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _))) + // result: (ConstBool [x != y || o1 != o2]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpOffPtr { @@ -15862,7 +15832,7 @@ func rewriteValuegeneric_OpNeqPtr(v *Value) bool { if v_0_0.Op != OpLocalAddr { continue } - a := auxToSym(v_0_0.Aux) + x := auxToSym(v_0_0.Aux) if v_1.Op != OpOffPtr { continue } @@ -15871,9 +15841,9 @@ func rewriteValuegeneric_OpNeqPtr(v *Value) bool { if v_1_0.Op != OpLocalAddr { continue } - b := auxToSym(v_1_0.Aux) + y := auxToSym(v_1_0.Aux) v.reset(OpConstBool) - v.AuxInt = boolToAuxInt(a != b || o1 != o2) + v.AuxInt = boolToAuxInt(x != y || o1 != o2) return true } break @@ -16113,7 +16083,6 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - config := b.Func.Config fe := b.Func.fe // match: (NilCheck (GetG mem) mem) // result: mem @@ -16128,67 +16097,7 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool { v.copyOf(mem) return true } - // match: (NilCheck (Load (OffPtr [c] (SP)) (StaticCall {sym} _)) _) - // cond: isSameCall(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check") - // result: (Invalid) - for { - if v_0.Op != OpLoad { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpOffPtr { - break - } - c := auxIntToInt64(v_0_0.AuxInt) - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpSP { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpStaticCall { - break - } - sym := auxToCall(v_0_1.Aux) - if !(isSameCall(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")) { - break - } - v.reset(OpInvalid) - return true - } - // match: (NilCheck (OffPtr (Load (OffPtr [c] (SP)) (StaticCall {sym} _))) _) - // cond: isSameCall(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check") - // result: (Invalid) - for { - if v_0.Op != OpOffPtr { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLoad { - break - } - _ = v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpOffPtr { - break - } - c := auxIntToInt64(v_0_0_0.AuxInt) - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpSP { - break - } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpStaticCall { - break - } - sym := auxToCall(v_0_0_1.Aux) - if !(isSameCall(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")) { - break - } - v.reset(OpInvalid) - return true - } - // match: (NilCheck (SelectN [0] call:(StaticLECall _ _)) (SelectN [1] call)) + // match: (NilCheck (SelectN [0] call:(StaticLECall _ _)) _) // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check") // result: (Invalid) for { @@ -16196,13 +16105,13 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool { break } call := v_0.Args[0] - if call.Op != OpStaticLECall || len(call.Args) != 2 || v_1.Op != OpSelectN || auxIntToInt64(v_1.AuxInt) != 1 || call != v_1.Args[0] || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) { + if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) { break } v.reset(OpInvalid) return true } - // match: (NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) (SelectN [1] call)) + // match: (NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) _) // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check") // result: (Invalid) for { @@ -16214,7 +16123,7 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool { break } call := v_0_0.Args[0] - if call.Op != OpStaticLECall || len(call.Args) != 2 || v_1.Op != OpSelectN || auxIntToInt64(v_1.AuxInt) != 1 || call != v_1.Args[0] || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) { + if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) { break } v.reset(OpInvalid) @@ -16623,17 +16532,17 @@ func rewriteValuegeneric_OpNot(v *Value) bool { } func rewriteValuegeneric_OpOffPtr(v *Value) bool { v_0 := v.Args[0] - // match: (OffPtr (OffPtr p [b]) [a]) - // result: (OffPtr p [a+b]) + // match: (OffPtr (OffPtr p [y]) [x]) + // result: (OffPtr p [x+y]) for { - a := auxIntToInt64(v.AuxInt) + x := auxIntToInt64(v.AuxInt) if v_0.Op != OpOffPtr { break } - b := auxIntToInt64(v_0.AuxInt) + y := auxIntToInt64(v_0.AuxInt) p := v_0.Args[0] v.reset(OpOffPtr) - v.AuxInt = int64ToAuxInt(a + b) + v.AuxInt = int64ToAuxInt(x + y) v.AddArg(p) return true } @@ -20769,34 +20678,180 @@ func rewriteValuegeneric_OpSelectN(v *Value) bool { v_0 := v.Args[0] b := v.Block config := b.Func.Config - // match: (SelectN [0] (MakeResult a ___)) - // result: a + // match: (SelectN [0] (MakeResult x ___)) + // result: x for { if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpMakeResult || len(v_0.Args) < 1 { break } - a := v_0.Args[0] - v.copyOf(a) + x := v_0.Args[0] + v.copyOf(x) return true } - // match: (SelectN [1] (MakeResult a b ___)) - // result: b + // match: (SelectN [1] (MakeResult x y ___)) + // result: y for { if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpMakeResult || len(v_0.Args) < 2 { break } - b := v_0.Args[1] - v.copyOf(b) + y := v_0.Args[1] + v.copyOf(y) return true } - // match: (SelectN [2] (MakeResult a b c ___)) - // result: c + // match: (SelectN [2] (MakeResult x y z ___)) + // result: z for { if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpMakeResult || len(v_0.Args) < 3 { break } - c := v_0.Args[2] - v.copyOf(c) + z := v_0.Args[2] + v.copyOf(z) + return true + } + // match: (SelectN [0] call:(StaticCall {sym} s1:(Store _ (Const64 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem))))) + // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call) + // result: (Move {t.Elem()} [int64(sz)] dst src mem) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpStaticCall || len(call.Args) != 1 { + break + } + sym := auxToCall(call.Aux) + s1 := call.Args[0] + if s1.Op != OpStore { + break + } + _ = s1.Args[2] + s1_1 := s1.Args[1] + if s1_1.Op != OpConst64 { + break + } + sz := auxIntToInt64(s1_1.AuxInt) + s2 := s1.Args[2] + if s2.Op != OpStore { + break + } + _ = s2.Args[2] + src := s2.Args[1] + s3 := s2.Args[2] + if s3.Op != OpStore { + break + } + t := auxToType(s3.Aux) + mem := s3.Args[2] + dst := s3.Args[1] + if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(int64(sz)) + v.Aux = typeToAux(t.Elem()) + v.AddArg3(dst, src, mem) + return true + } + // match: (SelectN [0] call:(StaticCall {sym} s1:(Store _ (Const32 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem))))) + // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call) + // result: (Move {t.Elem()} [int64(sz)] dst src mem) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpStaticCall || len(call.Args) != 1 { + break + } + sym := auxToCall(call.Aux) + s1 := call.Args[0] + if s1.Op != OpStore { + break + } + _ = s1.Args[2] + s1_1 := s1.Args[1] + if s1_1.Op != OpConst32 { + break + } + sz := auxIntToInt32(s1_1.AuxInt) + s2 := s1.Args[2] + if s2.Op != OpStore { + break + } + _ = s2.Args[2] + src := s2.Args[1] + s3 := s2.Args[2] + if s3.Op != OpStore { + break + } + t := auxToType(s3.Aux) + mem := s3.Args[2] + dst := s3.Args[1] + if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(int64(sz)) + v.Aux = typeToAux(t.Elem()) + v.AddArg3(dst, src, mem) + return true + } + // match: (SelectN [0] call:(StaticCall {sym} dst src (Const64 [sz]) mem)) + // cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && dst.Type.IsPtr() && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call) + // result: (Move {dst.Type.Elem()} [int64(sz)] dst src mem) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpStaticCall || len(call.Args) != 4 { + break + } + sym := auxToCall(call.Aux) + mem := call.Args[3] + dst := call.Args[0] + src := call.Args[1] + call_2 := call.Args[2] + if call_2.Op != OpConst64 { + break + } + sz := auxIntToInt64(call_2.AuxInt) + if !(sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && dst.Type.IsPtr() && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(int64(sz)) + v.Aux = typeToAux(dst.Type.Elem()) + v.AddArg3(dst, src, mem) + return true + } + // match: (SelectN [0] call:(StaticCall {sym} dst src (Const32 [sz]) mem)) + // cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && dst.Type.IsPtr() && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call) + // result: (Move {dst.Type.Elem()} [int64(sz)] dst src mem) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpStaticCall || len(call.Args) != 4 { + break + } + sym := auxToCall(call.Aux) + mem := call.Args[3] + dst := call.Args[0] + src := call.Args[1] + call_2 := call.Args[2] + if call_2.Op != OpConst32 { + break + } + sz := auxIntToInt32(call_2.AuxInt) + if !(sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && dst.Type.IsPtr() && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(int64(sz)) + v.Aux = typeToAux(dst.Type.Elem()) + v.AddArg3(dst, src, mem) return true } // match: (SelectN [0] call:(StaticLECall {sym} dst src (Const64 [sz]) mem)) @@ -20857,6 +20912,44 @@ func rewriteValuegeneric_OpSelectN(v *Value) bool { v.AddArg3(dst, src, mem) return true } + // match: (SelectN [0] call:(StaticLECall {sym} a x)) + // cond: needRaceCleanup(sym, call) && clobber(call) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpStaticLECall || len(call.Args) != 2 { + break + } + sym := auxToCall(call.Aux) + x := call.Args[1] + if !(needRaceCleanup(sym, call) && clobber(call)) { + break + } + v.copyOf(x) + return true + } + // match: (SelectN [0] call:(StaticLECall {sym} x)) + // cond: needRaceCleanup(sym, call) && clobber(call) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpStaticLECall || len(call.Args) != 1 { + break + } + sym := auxToCall(call.Aux) + x := call.Args[0] + if !(needRaceCleanup(sym, call) && clobber(call)) { + break + } + v.copyOf(x) + return true + } return false } func rewriteValuegeneric_OpSignExt16to32(v *Value) bool { @@ -21307,98 +21400,6 @@ func rewriteValuegeneric_OpSqrt(v *Value) bool { } return false } -func rewriteValuegeneric_OpStaticCall(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - config := b.Func.Config - // match: (StaticCall {sym} s1:(Store _ (Const64 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))) - // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3) - // result: (Move {t.Elem()} [int64(sz)] dst src mem) - for { - sym := auxToCall(v.Aux) - s1 := v_0 - if s1.Op != OpStore { - break - } - _ = s1.Args[2] - s1_1 := s1.Args[1] - if s1_1.Op != OpConst64 { - break - } - sz := auxIntToInt64(s1_1.AuxInt) - s2 := s1.Args[2] - if s2.Op != OpStore { - break - } - _ = s2.Args[2] - src := s2.Args[1] - s3 := s2.Args[2] - if s3.Op != OpStore { - break - } - t := auxToType(s3.Aux) - mem := s3.Args[2] - dst := s3.Args[1] - if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3)) { - break - } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(int64(sz)) - v.Aux = typeToAux(t.Elem()) - v.AddArg3(dst, src, mem) - return true - } - // match: (StaticCall {sym} s1:(Store _ (Const32 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))) - // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3) - // result: (Move {t.Elem()} [int64(sz)] dst src mem) - for { - sym := auxToCall(v.Aux) - s1 := v_0 - if s1.Op != OpStore { - break - } - _ = s1.Args[2] - s1_1 := s1.Args[1] - if s1_1.Op != OpConst32 { - break - } - sz := auxIntToInt32(s1_1.AuxInt) - s2 := s1.Args[2] - if s2.Op != OpStore { - break - } - _ = s2.Args[2] - src := s2.Args[1] - s3 := s2.Args[2] - if s3.Op != OpStore { - break - } - t := auxToType(s3.Aux) - mem := s3.Args[2] - dst := s3.Args[1] - if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3)) { - break - } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(int64(sz)) - v.Aux = typeToAux(t.Elem()) - v.AddArg3(dst, src, mem) - return true - } - // match: (StaticCall {sym} x) - // cond: needRaceCleanup(sym, v) - // result: x - for { - sym := auxToCall(v.Aux) - x := v_0 - if !(needRaceCleanup(sym, v)) { - break - } - v.copyOf(x) - return true - } - return false -} func rewriteValuegeneric_OpStaticLECall(v *Value) bool { b := v.Block typ := &b.Func.Config.Types @@ -21442,7 +21443,6 @@ func rewriteValuegeneric_OpStore(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - config := b.Func.Config fe := b.Func.fe // match: (Store {t1} p1 (Load p2 mem) mem) // cond: isSamePtr(p1, p2) && t2.Size() == t1.Size() @@ -21890,58 +21890,6 @@ func rewriteValuegeneric_OpStore(v *Value) bool { v.AddArg3(dst, e, mem) return true } - // match: (Store (Load (OffPtr [c] (SP)) mem) x mem) - // cond: isConstZero(x) && mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize - // result: mem - for { - if v_0.Op != OpLoad { - break - } - mem := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpOffPtr { - break - } - c := auxIntToInt64(v_0_0.AuxInt) - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpSP { - break - } - x := v_1 - if mem != v_2 || !(isConstZero(x) && mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { - break - } - v.copyOf(mem) - return true - } - // match: (Store (OffPtr (Load (OffPtr [c] (SP)) mem)) x mem) - // cond: isConstZero(x) && mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize - // result: mem - for { - if v_0.Op != OpOffPtr { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLoad { - break - } - mem := v_0_0.Args[1] - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpOffPtr { - break - } - c := auxIntToInt64(v_0_0_0.AuxInt) - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpSP { - break - } - x := v_1 - if mem != v_2 || !(isConstZero(x) && mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { - break - } - v.copyOf(mem) - return true - } // match: (Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call)) // cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject") // result: mem @@ -24660,27 +24608,6 @@ func rewriteValuegeneric_OpZero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - config := b.Func.Config - // match: (Zero (Load (OffPtr [c] (SP)) mem) mem) - // cond: mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize - // result: mem - for { - if v_0.Op != OpLoad { - break - } - mem := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpOffPtr { - break - } - c := auxIntToInt64(v_0_0.AuxInt) - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpSP || mem != v_1 || !(mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { - break - } - v.copyOf(mem) - return true - } // match: (Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call)) // cond: isSameCall(call.Aux, "runtime.newobject") // result: mem diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go index 8facb91100c..4e3e5e75e35 100644 --- a/src/cmd/compile/internal/ssa/schedule.go +++ b/src/cmd/compile/internal/ssa/schedule.go @@ -137,6 +137,13 @@ func schedule(f *Func) { case v.Op == OpVarDef: // We want all the vardefs next. score[v.ID] = ScoreVarDef + case v.Op == OpArgIntReg || v.Op == OpArgFloatReg: + // In-register args must be scheduled as early as possible to ensure that the + // context register is not stomped. They should only appear in the entry block. + if b != f.Entry { + f.Fatalf("%s appeared outside of entry block, b=%s", v.Op, b.String()) + } + score[v.ID] = ScorePhi case v.Op == OpArg: // We want all the args as early as possible, for better debugging. score[v.ID] = ScoreArg @@ -145,7 +152,7 @@ func schedule(f *Func) { // reduce register pressure. It also helps make sure // VARDEF ops are scheduled before the corresponding LEA. score[v.ID] = ScoreMemory - case v.Op == OpSelect0 || v.Op == OpSelect1: + case v.Op == OpSelect0 || v.Op == OpSelect1 || v.Op == OpSelectN: // Schedule the pseudo-op of reading part of a tuple // immediately after the tuple-generating op, since // this value is already live. This also removes its @@ -270,6 +277,20 @@ func schedule(f *Func) { tuples[v.Args[0].ID] = make([]*Value, 2) } tuples[v.Args[0].ID][1] = v + case v.Op == OpSelectN: + if tuples[v.Args[0].ID] == nil { + tuples[v.Args[0].ID] = make([]*Value, v.Args[0].Type.NumFields()) + } + tuples[v.Args[0].ID][v.AuxInt] = v + case v.Type.IsResults() && tuples[v.ID] != nil: + tup := tuples[v.ID] + for i := len(tup) - 1; i >= 0; i-- { + if tup[i] != nil { + order = append(order, tup[i]) + } + } + delete(tuples, v.ID) + order = append(order, v) case v.Type.IsTuple() && tuples[v.ID] != nil: if tuples[v.ID][1] != nil { order = append(order, tuples[v.ID][1]) diff --git a/src/cmd/compile/internal/ssa/shortcircuit.go b/src/cmd/compile/internal/ssa/shortcircuit.go index 7b4ee2e81c2..29abf3c591f 100644 --- a/src/cmd/compile/internal/ssa/shortcircuit.go +++ b/src/cmd/compile/internal/ssa/shortcircuit.go @@ -138,6 +138,24 @@ func shortcircuitBlock(b *Block) bool { if len(b.Values) != nval+nOtherPhi { return false } + if nOtherPhi > 0 { + // Check for any phi which is the argument of another phi. + // These cases are tricky, as substitutions done by replaceUses + // are no longer trivial to do in any ordering. See issue 45175. + m := make(map[*Value]bool, 1+nOtherPhi) + for _, v := range b.Values { + if v.Op == OpPhi { + m[v] = true + } + } + for v := range m { + for _, a := range v.Args { + if a != v && m[a] { + return false + } + } + } + } // Locate index of first const phi arg. cidx := -1 @@ -266,6 +284,13 @@ func shortcircuitPhiPlan(b *Block, ctl *Value, cidx int, ti int64) func(*Value, // u is the "untaken" branch: the successor we never go to when coming in from p. u := b.Succs[1^ti].b + // In the following CFG matching, ensure that b's preds are entirely distinct from b's succs. + // This is probably a stronger condition than required, but this happens extremely rarely, + // and it makes it easier to avoid getting deceived by pretty ASCII charts. See #44465. + if p0, p1 := b.Preds[0].b, b.Preds[1].b; p0 == t || p1 == t || p0 == u || p1 == u { + return nil + } + // Look for some common CFG structures // in which the outbound paths from b merge, // with no other preds joining them. diff --git a/src/cmd/compile/internal/ssa/sparsetree.go b/src/cmd/compile/internal/ssa/sparsetree.go index 1be20b2cdae..be914c8644d 100644 --- a/src/cmd/compile/internal/ssa/sparsetree.go +++ b/src/cmd/compile/internal/ssa/sparsetree.go @@ -178,6 +178,12 @@ func (t SparseTree) Child(x *Block) *Block { return t[x.ID].child } +// Parent returns the parent of x in the dominator tree, or +// nil if x is the function's entry. +func (t SparseTree) Parent(x *Block) *Block { + return t[x.ID].parent +} + // isAncestorEq reports whether x is an ancestor of or equal to y. func (t SparseTree) IsAncestorEq(x, y *Block) bool { if x == y { diff --git a/src/cmd/compile/internal/ssa/sparsetreemap.go b/src/cmd/compile/internal/ssa/sparsetreemap.go deleted file mode 100644 index d26467517e1..00000000000 --- a/src/cmd/compile/internal/ssa/sparsetreemap.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -import "fmt" - -// A SparseTreeMap encodes a subset of nodes within a tree -// used for sparse-ancestor queries. -// -// Combined with a SparseTreeHelper, this supports an Insert -// to add a tree node to the set and a Find operation to locate -// the nearest tree ancestor of a given node such that the -// ancestor is also in the set. -// -// Given a set of blocks {B1, B2, B3} within the dominator tree, established -// by stm.Insert()ing B1, B2, B3, etc, a query at block B -// (performed with stm.Find(stm, B, adjust, helper)) -// will return the member of the set that is the nearest strict -// ancestor of B within the dominator tree, or nil if none exists. -// The expected complexity of this operation is the log of the size -// the set, given certain assumptions about sparsity (the log complexity -// could be guaranteed with additional data structures whose constant- -// factor overhead has not yet been justified.) -// -// The adjust parameter allows positioning of the insertion -// and lookup points within a block -- one of -// AdjustBefore, AdjustWithin, AdjustAfter, -// where lookups at AdjustWithin can find insertions at -// AdjustBefore in the same block, and lookups at AdjustAfter -// can find insertions at either AdjustBefore or AdjustWithin -// in the same block. (Note that this assumes a gappy numbering -// such that exit number or exit number is separated from its -// nearest neighbor by at least 3). -// -// The Sparse Tree lookup algorithm is described by -// Paul F. Dietz. Maintaining order in a linked list. In -// Proceedings of the Fourteenth Annual ACM Symposium on -// Theory of Computing, pages 122–127, May 1982. -// and by -// Ben Wegbreit. Faster retrieval from context trees. -// Communications of the ACM, 19(9):526–529, September 1976. -type SparseTreeMap RBTint32 - -// A SparseTreeHelper contains indexing and allocation data -// structures common to a collection of SparseTreeMaps, as well -// as exposing some useful control-flow-related data to other -// packages, such as gc. -type SparseTreeHelper struct { - Sdom []SparseTreeNode // indexed by block.ID - Po []*Block // exported data; the blocks, in a post-order - Dom []*Block // exported data; the dominator of this block. - Ponums []int32 // exported data; Po[Ponums[b.ID]] == b; the index of b in Po -} - -// NewSparseTreeHelper returns a SparseTreeHelper for use -// in the gc package, for example in phi-function placement. -func NewSparseTreeHelper(f *Func) *SparseTreeHelper { - dom := f.Idom() - ponums := make([]int32, f.NumBlocks()) - po := postorderWithNumbering(f, ponums) - return makeSparseTreeHelper(newSparseTree(f, dom), dom, po, ponums) -} - -func (h *SparseTreeHelper) NewTree() *SparseTreeMap { - return &SparseTreeMap{} -} - -func makeSparseTreeHelper(sdom SparseTree, dom, po []*Block, ponums []int32) *SparseTreeHelper { - helper := &SparseTreeHelper{Sdom: []SparseTreeNode(sdom), - Dom: dom, - Po: po, - Ponums: ponums, - } - return helper -} - -// A sparseTreeMapEntry contains the data stored in a binary search -// data structure indexed by (dominator tree walk) entry and exit numbers. -// Each entry is added twice, once keyed by entry-1/entry/entry+1 and -// once keyed by exit+1/exit/exit-1. -// -// Within a sparse tree, the two entries added bracket all their descendant -// entries within the tree; the first insertion is keyed by entry number, -// which comes before all the entry and exit numbers of descendants, and -// the second insertion is keyed by exit number, which comes after all the -// entry and exit numbers of the descendants. -type sparseTreeMapEntry struct { - index *SparseTreeNode // references the entry and exit numbers for a block in the sparse tree - block *Block // TODO: store this in a separate index. - data interface{} - sparseParent *sparseTreeMapEntry // references the nearest ancestor of this block in the sparse tree. - adjust int32 // at what adjustment was this node entered into the sparse tree? The same block may be entered more than once, but at different adjustments. -} - -// Insert creates a definition within b with data x. -// adjust indicates where in the block should be inserted: -// AdjustBefore means defined at a phi function (visible Within or After in the same block) -// AdjustWithin means defined within the block (visible After in the same block) -// AdjustAfter means after the block (visible within child blocks) -func (m *SparseTreeMap) Insert(b *Block, adjust int32, x interface{}, helper *SparseTreeHelper) { - rbtree := (*RBTint32)(m) - blockIndex := &helper.Sdom[b.ID] - if blockIndex.entry == 0 { - // assert unreachable - return - } - // sp will be the sparse parent in this sparse tree (nearest ancestor in the larger tree that is also in this sparse tree) - sp := m.findEntry(b, adjust, helper) - entry := &sparseTreeMapEntry{index: blockIndex, block: b, data: x, sparseParent: sp, adjust: adjust} - - right := blockIndex.exit - adjust - _ = rbtree.Insert(right, entry) - - left := blockIndex.entry + adjust - _ = rbtree.Insert(left, entry) - - // This newly inserted block may now be the sparse parent of some existing nodes (the new sparse children of this block) - // Iterate over nodes bracketed by this new node to correct their parent, but not over the proper sparse descendants of those nodes. - _, d := rbtree.Lub(left) // Lub (not EQ) of left is either right or a sparse child - for tme := d.(*sparseTreeMapEntry); tme != entry; tme = d.(*sparseTreeMapEntry) { - tme.sparseParent = entry - // all descendants of tme are unchanged; - // next sparse sibling (or right-bracketing sparse parent == entry) is first node after tme.index.exit - tme.adjust - _, d = rbtree.Lub(tme.index.exit - tme.adjust) - } -} - -// Find returns the definition visible from block b, or nil if none can be found. -// Adjust indicates where the block should be searched. -// AdjustBefore searches before the phi functions of b. -// AdjustWithin searches starting at the phi functions of b. -// AdjustAfter searches starting at the exit from the block, including normal within-block definitions. -// -// Note that Finds are properly nested with Inserts: -// m.Insert(b, a) followed by m.Find(b, a) will not return the result of the insert, -// but m.Insert(b, AdjustBefore) followed by m.Find(b, AdjustWithin) will. -// -// Another way to think of this is that Find searches for inputs, Insert defines outputs. -func (m *SparseTreeMap) Find(b *Block, adjust int32, helper *SparseTreeHelper) interface{} { - v := m.findEntry(b, adjust, helper) - if v == nil { - return nil - } - return v.data -} - -func (m *SparseTreeMap) findEntry(b *Block, adjust int32, helper *SparseTreeHelper) *sparseTreeMapEntry { - rbtree := (*RBTint32)(m) - if rbtree == nil { - return nil - } - blockIndex := &helper.Sdom[b.ID] - - // The Glb (not EQ) of this probe is either the entry-indexed end of a sparse parent - // or the exit-indexed end of a sparse sibling - _, v := rbtree.Glb(blockIndex.entry + adjust) - - if v == nil { - return nil - } - - otherEntry := v.(*sparseTreeMapEntry) - if otherEntry.index.exit >= blockIndex.exit { // otherEntry exit after blockIndex exit; therefore, brackets - return otherEntry - } - // otherEntry is a sparse Sibling, and shares the same sparse parent (nearest ancestor within larger tree) - sp := otherEntry.sparseParent - if sp != nil { - if sp.index.exit < blockIndex.exit { // no ancestor found - return nil - } - return sp - } - return nil -} - -func (m *SparseTreeMap) String() string { - tree := (*RBTint32)(m) - return tree.String() -} - -func (e *sparseTreeMapEntry) String() string { - if e == nil { - return "nil" - } - return fmt.Sprintf("(index=%v, block=%v, data=%v)->%v", e.index, e.block, e.data, e.sparseParent) -} diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go index 68a6f08a2a9..d41f3996af7 100644 --- a/src/cmd/compile/internal/ssa/stackalloc.go +++ b/src/cmd/compile/internal/ssa/stackalloc.go @@ -112,7 +112,7 @@ func (s *stackAllocState) init(f *Func, spillLive [][]ID) { for _, v := range b.Values { s.values[v.ID].typ = v.Type s.values[v.ID].needSlot = !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && f.getHome(v.ID) == nil && !v.rematerializeable() && !v.OnWasmStack - s.values[v.ID].isArg = v.Op == OpArg + s.values[v.ID].isArg = hasAnyArgOp(v) if f.pass.debug > stackDebug && s.values[v.ID].needSlot { fmt.Printf("%s needs a stack slot\n", v) } @@ -141,27 +141,72 @@ func (s *stackAllocState) stackalloc() { s.names = make([]LocalSlot, n) } names := s.names + empty := LocalSlot{} for _, name := range f.Names { // Note: not "range f.NamedValues" above, because // that would be nondeterministic. - for _, v := range f.NamedValues[name] { - names[v.ID] = name + for _, v := range f.NamedValues[*name] { + if v.Op == OpArgIntReg || v.Op == OpArgFloatReg { + aux := v.Aux.(*AuxNameOffset) + // Never let an arg be bound to a differently named thing. + if name.N != aux.Name || name.Off != aux.Offset { + if f.pass.debug > stackDebug { + fmt.Printf("stackalloc register arg %s skipping name %s\n", v, name) + } + continue + } + } else if name.N.Class == ir.PPARAM && v.Op != OpArg { + // PPARAM's only bind to OpArg + if f.pass.debug > stackDebug { + fmt.Printf("stackalloc PPARAM name %s skipping non-Arg %s\n", name, v) + } + continue + } + + if names[v.ID] == empty { + if f.pass.debug > stackDebug { + fmt.Printf("stackalloc value %s to name %s\n", v, *name) + } + names[v.ID] = *name + } } } // Allocate args to their assigned locations. for _, v := range f.Entry.Values { - if v.Op != OpArg { + if !hasAnyArgOp(v) { continue } if v.Aux == nil { f.Fatalf("%s has nil Aux\n", v.LongString()) } - loc := LocalSlot{N: v.Aux.(*ir.Name), Type: v.Type, Off: v.AuxInt} - if f.pass.debug > stackDebug { - fmt.Printf("stackalloc %s to %s\n", v, loc) + if v.Op == OpArg { + loc := LocalSlot{N: v.Aux.(*ir.Name), Type: v.Type, Off: v.AuxInt} + if f.pass.debug > stackDebug { + fmt.Printf("stackalloc OpArg %s to %s\n", v, loc) + } + f.setHome(v, loc) + continue } - f.setHome(v, loc) + // You might think this below would be the right idea, but you would be wrong. + // It almost works; as of 105a6e9518 - 2021-04-23, + // GOSSAHASH=11011011001011111 == cmd/compile/internal/noder.(*noder).embedded + // is compiled incorrectly. I believe the cause is one of those SSA-to-registers + // puzzles that the register allocator untangles; in the event that a register + // parameter does not end up bound to a name, "fixing" it is a bad idea. + // + //if f.DebugTest { + // if v.Op == OpArgIntReg || v.Op == OpArgFloatReg { + // aux := v.Aux.(*AuxNameOffset) + // loc := LocalSlot{N: aux.Name, Type: v.Type, Off: aux.Offset} + // if f.pass.debug > stackDebug { + // fmt.Printf("stackalloc Op%s %s to %s\n", v.Op, v, loc) + // } + // names[v.ID] = loc + // continue + // } + //} + } // For each type, we keep track of all the stack slots we @@ -198,7 +243,7 @@ func (s *stackAllocState) stackalloc() { s.nNotNeed++ continue } - if v.Op == OpArg { + if hasAnyArgOp(v) { s.nArgSlot++ continue // already picked } @@ -385,7 +430,7 @@ func (s *stackAllocState) buildInterferenceGraph() { for _, id := range live.contents() { // Note: args can have different types and still interfere // (with each other or with other values). See issue 23522. - if s.values[v.ID].typ.Compare(s.values[id].typ) == types.CMPeq || v.Op == OpArg || s.values[id].isArg { + if s.values[v.ID].typ.Compare(s.values[id].typ) == types.CMPeq || hasAnyArgOp(v) || s.values[id].isArg { s.interfere[v.ID] = append(s.interfere[v.ID], id) s.interfere[id] = append(s.interfere[id], v.ID) } @@ -396,13 +441,15 @@ func (s *stackAllocState) buildInterferenceGraph() { live.add(a.ID) } } - if v.Op == OpArg && s.values[v.ID].needSlot { + if hasAnyArgOp(v) && s.values[v.ID].needSlot { // OpArg is an input argument which is pre-spilled. // We add back v.ID here because we want this value // to appear live even before this point. Being live // all the way to the start of the entry block prevents other // values from being allocated to the same slot and clobbering // the input value before we have a chance to load it. + + // TODO(register args) this is apparently not wrong for register args -- is it necessary? live.add(v.ID) } } @@ -419,3 +466,7 @@ func (s *stackAllocState) buildInterferenceGraph() { } } } + +func hasAnyArgOp(v *Value) bool { + return v.Op == OpArg || v.Op == OpArgIntReg || v.Op == OpArgFloatReg +} diff --git a/src/cmd/compile/internal/ssa/stmtlines_test.go b/src/cmd/compile/internal/ssa/stmtlines_test.go index f5ff3a59272..a510d0b3d06 100644 --- a/src/cmd/compile/internal/ssa/stmtlines_test.go +++ b/src/cmd/compile/internal/ssa/stmtlines_test.go @@ -117,6 +117,7 @@ func TestStmtLines(t *testing.T) { } else if len(nonStmtLines)*100 > 2*len(lines) { // expect 98% elsewhere. t.Errorf("Saw too many (not amd64, > 2%%) lines without statement marks, total=%d, nostmt=%d ('-run TestStmtLines -v' lists failing lines)\n", len(lines), len(nonStmtLines)) } + t.Logf("Saw %d out of %d lines without statement marks", len(nonStmtLines), len(lines)) if testing.Verbose() { sort.Slice(nonStmtLines, func(i, j int) bool { if nonStmtLines[i].File != nonStmtLines[j].File { diff --git a/src/cmd/compile/internal/ssa/tighten.go b/src/cmd/compile/internal/ssa/tighten.go index 5dfc4536495..214bf628bd8 100644 --- a/src/cmd/compile/internal/ssa/tighten.go +++ b/src/cmd/compile/internal/ssa/tighten.go @@ -18,10 +18,11 @@ func tighten(f *Func) { continue } switch v.Op { - case OpPhi, OpArg, OpSelect0, OpSelect1: + case OpPhi, OpArg, OpArgIntReg, OpArgFloatReg, OpSelect0, OpSelect1, OpSelectN: // Phis need to stay in their block. // Arg must stay in the entry block. // Tuple selectors must stay with the tuple generator. + // SelectN is typically, ultimately, a register. continue } if v.MemoryArg() != nil { diff --git a/src/cmd/compile/internal/ssa/tuple.go b/src/cmd/compile/internal/ssa/tuple.go index 38deabf83d2..289df40431a 100644 --- a/src/cmd/compile/internal/ssa/tuple.go +++ b/src/cmd/compile/internal/ssa/tuple.go @@ -4,8 +4,8 @@ package ssa -// tightenTupleSelectors ensures that tuple selectors (Select0 and -// Select1 ops) are in the same block as their tuple generator. The +// tightenTupleSelectors ensures that tuple selectors (Select0, Select1, +// and SelectN ops) are in the same block as their tuple generator. The // function also ensures that there are no duplicate tuple selectors. // These properties are expected by the scheduler but may not have // been maintained by the optimization pipeline up to this point. @@ -13,28 +13,40 @@ package ssa // See issues 16741 and 39472. func tightenTupleSelectors(f *Func) { selectors := make(map[struct { - id ID - op Op + id ID + which int }]*Value) for _, b := range f.Blocks { for _, selector := range b.Values { - if selector.Op != OpSelect0 && selector.Op != OpSelect1 { + // Key fields for de-duplication + var tuple *Value + idx := 0 + switch selector.Op { + default: continue - } - - // Get the tuple generator to use as a key for de-duplication. - tuple := selector.Args[0] - if !tuple.Type.IsTuple() { - f.Fatalf("arg of tuple selector %s is not a tuple: %s", selector.String(), tuple.LongString()) + case OpSelect1: + idx = 1 + fallthrough + case OpSelect0: + tuple = selector.Args[0] + if !tuple.Type.IsTuple() { + f.Fatalf("arg of tuple selector %s is not a tuple: %s", selector.String(), tuple.LongString()) + } + case OpSelectN: + tuple = selector.Args[0] + idx = int(selector.AuxInt) + if !tuple.Type.IsResults() { + f.Fatalf("arg of result selector %s is not a results: %s", selector.String(), tuple.LongString()) + } } // If there is a pre-existing selector in the target block then // use that. Do this even if the selector is already in the // target block to avoid duplicate tuple selectors. key := struct { - id ID - op Op - }{tuple.ID, selector.Op} + id ID + which int + }{tuple.ID, idx} if t := selectors[key]; t != nil { if selector != t { selector.copyOf(t) diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index d000b7cce01..630e4814b99 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -78,7 +78,7 @@ func (v *Value) String() string { } func (v *Value) AuxInt8() int8 { - if opcodeTable[v.Op].auxType != auxInt8 { + if opcodeTable[v.Op].auxType != auxInt8 && opcodeTable[v.Op].auxType != auxNameOffsetInt8 { v.Fatalf("op %s doesn't have an int8 aux field", v.Op) } return int8(v.AuxInt) @@ -139,6 +139,9 @@ func (v *Value) AuxArm64BitField() arm64BitField { // long form print. v# = opcode [aux] args [: reg] (names) func (v *Value) LongString() string { + if v == nil { + return "" + } s := fmt.Sprintf("v%d = %s", v.ID, v.Op) s += " <" + v.Type.String() + ">" s += v.auxString() @@ -198,12 +201,12 @@ func (v *Value) auxString() string { if v.Aux != nil { return fmt.Sprintf(" {%v}", v.Aux) } - case auxSymOff, auxCallOff, auxTypSize: + case auxSymOff, auxCallOff, auxTypSize, auxNameOffsetInt8: s := "" if v.Aux != nil { s = fmt.Sprintf(" {%v}", v.Aux) } - if v.AuxInt != 0 { + if v.AuxInt != 0 || opcodeTable[v.Op].auxType == auxNameOffsetInt8 { s += fmt.Sprintf(" [%v]", v.AuxInt) } return s @@ -345,6 +348,35 @@ func (v *Value) reset(op Op) { v.Aux = nil } +// invalidateRecursively marks a value as invalid (unused) +// and after decrementing reference counts on its Args, +// also recursively invalidates any of those whose use +// count goes to zero. +// +// BEWARE of doing this *before* you've applied intended +// updates to SSA. +func (v *Value) invalidateRecursively() { + if v.InCache { + v.Block.Func.unCache(v) + } + v.Op = OpInvalid + + for _, a := range v.Args { + a.Uses-- + if a.Uses == 0 { + a.invalidateRecursively() + } + } + + v.argstorage[0] = nil + v.argstorage[1] = nil + v.argstorage[2] = nil + v.Args = v.argstorage[:0] + + v.AuxInt = 0 + v.Aux = nil +} + // copyOf is called from rewrite rules. // It modifies v to be (Copy a). //go:noinline @@ -411,6 +443,23 @@ func (v *Value) isGenericIntConst() bool { return v != nil && (v.Op == OpConst64 || v.Op == OpConst32 || v.Op == OpConst16 || v.Op == OpConst8) } +// ResultReg returns the result register assigned to v, in cmd/internal/obj/$ARCH numbering. +// It is similar to Reg and Reg0, except that it is usable interchangeably for all Value Ops. +// If you know v.Op, using Reg or Reg0 (as appropriate) will be more efficient. +func (v *Value) ResultReg() int16 { + reg := v.Block.Func.RegAlloc[v.ID] + if reg == nil { + v.Fatalf("nil reg for value: %s\n%s\n", v.LongString(), v.Block.Func) + } + if pair, ok := reg.(LocPair); ok { + reg = pair[0] + } + if reg == nil { + v.Fatalf("nil reg0 for value: %s\n%s\n", v.LongString(), v.Block.Func) + } + return reg.(*Register).objNum +} + // Reg returns the register assigned to v, in cmd/internal/obj/$ARCH numbering. func (v *Value) Reg() int16 { reg := v.Block.Func.RegAlloc[v.ID] @@ -482,9 +531,9 @@ func (v *Value) removeable() bool { return false } if v.Type.IsMemory() { - // All memory ops aren't needed here, but we do need + // We don't need to preserve all memory ops, but we do need // to keep calls at least (because they might have - // syncronization operations we can't see). + // synchronization operations we can't see). return false } if v.Op.HasSideEffects() { @@ -500,9 +549,13 @@ func (*Value) CanBeAnSSAAux() {} // AutoVar returns a *Name and int64 representing the auto variable and offset within it // where v should be spilled. func AutoVar(v *Value) (*ir.Name, int64) { - loc := v.Block.Func.RegAlloc[v.ID].(LocalSlot) - if v.Type.Size() > loc.Type.Size() { - v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) + if loc, ok := v.Block.Func.RegAlloc[v.ID].(LocalSlot); ok { + if v.Type.Size() > loc.Type.Size() { + v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) + } + return loc.N, loc.Off } - return loc.N, loc.Off + // Assume it is a register, return its spill slot, which needs to be live + nameOff := v.Aux.(*AuxNameOffset) + return nameOff.Name, nameOff.Offset } diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go index 4378f2d6276..419d91d0d36 100644 --- a/src/cmd/compile/internal/ssa/writebarrier.go +++ b/src/cmd/compile/internal/ssa/writebarrier.go @@ -38,9 +38,11 @@ func needwb(v *Value, zeroes map[ID]ZeroRegion) bool { if IsStackAddr(v.Args[0]) { return false // write on stack doesn't need write barrier } - if v.Op == OpMove && IsReadOnlyGlobalAddr(v.Args[1]) && IsNewObject(v.Args[0], v.MemoryArg()) { - // Copying data from readonly memory into a fresh object doesn't need a write barrier. - return false + if v.Op == OpMove && IsReadOnlyGlobalAddr(v.Args[1]) { + if mem, ok := IsNewObject(v.Args[0]); ok && mem == v.MemoryArg() { + // Copying data from readonly memory into a fresh object doesn't need a write barrier. + return false + } } if v.Op == OpStore && IsGlobalAddr(v.Args[1]) { // Storing pointers to non-heap locations into zeroed memory doesn't need a write barrier. @@ -389,11 +391,7 @@ func (f *Func) computeZeroMap() map[ID]ZeroRegion { // Find new objects. for _, b := range f.Blocks { for _, v := range b.Values { - if v.Op != OpLoad { - continue - } - mem := v.MemoryArg() - if IsNewObject(v, mem) { + if mem, ok := IsNewObject(v); ok { nptr := v.Type.Elem().Size() / ptrSize if nptr > 64 { nptr = 64 @@ -483,38 +481,56 @@ func (f *Func) computeZeroMap() map[ID]ZeroRegion { func wbcall(pos src.XPos, b *Block, fn, typ *obj.LSym, ptr, val, mem, sp, sb *Value) *Value { config := b.Func.Config + var wbargs []*Value + // TODO (register args) this is a bit of a hack. + inRegs := b.Func.ABIDefault == b.Func.ABI1 && len(config.intParamRegs) >= 3 + // put arguments on stack off := config.ctxt.FixedFrameSize() - var ACArgs []Param + var argTypes []*types.Type if typ != nil { // for typedmemmove taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb) + argTypes = append(argTypes, b.Func.Config.Types.Uintptr) off = round(off, taddr.Type.Alignment()) - arg := b.NewValue1I(pos, OpOffPtr, taddr.Type.PtrTo(), off, sp) - mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, taddr, mem) - ACArgs = append(ACArgs, Param{Type: b.Func.Config.Types.Uintptr, Offset: int32(off)}) + if inRegs { + wbargs = append(wbargs, taddr) + } else { + arg := b.NewValue1I(pos, OpOffPtr, taddr.Type.PtrTo(), off, sp) + mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, taddr, mem) + } off += taddr.Type.Size() } + argTypes = append(argTypes, ptr.Type) off = round(off, ptr.Type.Alignment()) - arg := b.NewValue1I(pos, OpOffPtr, ptr.Type.PtrTo(), off, sp) - mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, ptr, mem) - ACArgs = append(ACArgs, Param{Type: ptr.Type, Offset: int32(off)}) + if inRegs { + wbargs = append(wbargs, ptr) + } else { + arg := b.NewValue1I(pos, OpOffPtr, ptr.Type.PtrTo(), off, sp) + mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, ptr, mem) + } off += ptr.Type.Size() if val != nil { + argTypes = append(argTypes, val.Type) off = round(off, val.Type.Alignment()) - arg = b.NewValue1I(pos, OpOffPtr, val.Type.PtrTo(), off, sp) - mem = b.NewValue3A(pos, OpStore, types.TypeMem, val.Type, arg, val, mem) - ACArgs = append(ACArgs, Param{Type: val.Type, Offset: int32(off)}) + if inRegs { + wbargs = append(wbargs, val) + } else { + arg := b.NewValue1I(pos, OpOffPtr, val.Type.PtrTo(), off, sp) + mem = b.NewValue3A(pos, OpStore, types.TypeMem, val.Type, arg, val, mem) + } off += val.Type.Size() } off = round(off, config.PtrSize) + wbargs = append(wbargs, mem) // issue call - mem = b.NewValue1A(pos, OpStaticCall, types.TypeMem, StaticAuxCall(fn, ACArgs, nil), mem) - mem.AuxInt = off - config.ctxt.FixedFrameSize() - return mem + call := b.NewValue0A(pos, OpStaticCall, types.TypeResultMem, StaticAuxCall(fn, b.Func.ABIDefault.ABIAnalyzeTypes(nil, argTypes, nil))) + call.AddArgs(wbargs...) + call.AuxInt = off - config.ctxt.FixedFrameSize() + return b.NewValue1I(pos, OpSelectN, types.TypeMem, 0, call) } // round to a multiple of r, r is a power of 2 @@ -560,31 +576,60 @@ func IsReadOnlyGlobalAddr(v *Value) bool { return false } -// IsNewObject reports whether v is a pointer to a freshly allocated & zeroed object at memory state mem. -func IsNewObject(v *Value, mem *Value) bool { - if v.Op != OpLoad { - return false +// IsNewObject reports whether v is a pointer to a freshly allocated & zeroed object, +// if so, also returns the memory state mem at which v is zero. +func IsNewObject(v *Value) (mem *Value, ok bool) { + f := v.Block.Func + c := f.Config + if f.ABIDefault == f.ABI1 && len(c.intParamRegs) >= 1 { + if v.Op != OpSelectN || v.AuxInt != 0 { + return nil, false + } + // Find the memory + for _, w := range v.Block.Values { + if w.Op == OpSelectN && w.AuxInt == 1 && w.Args[0] == v.Args[0] { + mem = w + break + } + } + if mem == nil { + return nil, false + } + } else { + if v.Op != OpLoad { + return nil, false + } + mem = v.MemoryArg() + if mem.Op != OpSelectN { + return nil, false + } + if mem.Type != types.TypeMem { + return nil, false + } // assume it is the right selection if true } - if v.MemoryArg() != mem { - return false + call := mem.Args[0] + if call.Op != OpStaticCall { + return nil, false } - if mem.Op != OpStaticCall { - return false + if !isSameCall(call.Aux, "runtime.newobject") { + return nil, false } - if !isSameCall(mem.Aux, "runtime.newobject") { - return false + if f.ABIDefault == f.ABI1 && len(c.intParamRegs) >= 1 { + if v.Args[0] == call { + return mem, true + } + return nil, false } if v.Args[0].Op != OpOffPtr { - return false + return nil, false } if v.Args[0].Args[0].Op != OpSP { - return false + return nil, false } - c := v.Block.Func.Config if v.Args[0].AuxInt != c.ctxt.FixedFrameSize()+c.RegSize { // offset of return value - return false + return nil, false } - return true + return mem, true } // IsSanitizerSafeAddr reports whether v is known to be an address diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go index 7180b3816ce..e460adaf95d 100644 --- a/src/cmd/compile/internal/ssagen/abi.go +++ b/src/cmd/compile/internal/ssagen/abi.go @@ -6,13 +6,13 @@ package ssagen import ( "fmt" + "internal/buildcfg" "io/ioutil" "log" "os" "strings" "cmd/compile/internal/base" - "cmd/compile/internal/escape" "cmd/compile/internal/ir" "cmd/compile/internal/staticdata" "cmd/compile/internal/typecheck" @@ -21,46 +21,40 @@ import ( "cmd/internal/objabi" ) -// useNewABIWrapGen returns TRUE if the compiler should generate an -// ABI wrapper for the function 'f'. -func useABIWrapGen(f *ir.Func) bool { - if !base.Flag.ABIWrap { - return false - } +// SymABIs records information provided by the assembler about symbol +// definition ABIs and reference ABIs. +type SymABIs struct { + defs map[string]obj.ABI + refs map[string]obj.ABISet - // Support limit option for bisecting. - if base.Flag.ABIWrapLimit == 1 { - return false - } - if base.Flag.ABIWrapLimit < 1 { - return true - } - base.Flag.ABIWrapLimit-- - if base.Debug.ABIWrap != 0 && base.Flag.ABIWrapLimit == 1 { - fmt.Fprintf(os.Stderr, "=-= limit reached after new wrapper for %s\n", - f.LSym.Name) - } - - return true + localPrefix string } -// symabiDefs and symabiRefs record the defined and referenced ABIs of -// symbols required by non-Go code. These are keyed by link symbol -// name, where the local package prefix is always `"".` -var symabiDefs, symabiRefs map[string]obj.ABI - -func CgoSymABIs() { - // The linker expects an ABI0 wrapper for all cgo-exported - // functions. - for _, prag := range typecheck.Target.CgoPragmas { - switch prag[0] { - case "cgo_export_static", "cgo_export_dynamic": - if symabiRefs == nil { - symabiRefs = make(map[string]obj.ABI) - } - symabiRefs[prag[1]] = obj.ABI0 - } +func NewSymABIs(myimportpath string) *SymABIs { + var localPrefix string + if myimportpath != "" { + localPrefix = objabi.PathToPrefix(myimportpath) + "." } + + return &SymABIs{ + defs: make(map[string]obj.ABI), + refs: make(map[string]obj.ABISet), + localPrefix: localPrefix, + } +} + +// canonicalize returns the canonical name used for a linker symbol in +// s's maps. Symbols in this package may be written either as "".X or +// with the package's import path already in the symbol. This rewrites +// both to `"".`, which matches compiler-generated linker symbol names. +func (s *SymABIs) canonicalize(linksym string) string { + // If the symbol is already prefixed with localPrefix, + // rewrite it to start with "" so it matches the + // compiler's internal symbol names. + if s.localPrefix != "" && strings.HasPrefix(linksym, s.localPrefix) { + return `"".` + linksym[len(s.localPrefix):] + } + return linksym } // ReadSymABIs reads a symabis file that specifies definitions and @@ -72,23 +66,12 @@ func CgoSymABIs() { // symbol using an ABI. For both "def" and "ref", the second field is // the symbol name and the third field is the ABI name, as one of the // named cmd/internal/obj.ABI constants. -func ReadSymABIs(file, myimportpath string) { +func (s *SymABIs) ReadSymABIs(file string) { data, err := ioutil.ReadFile(file) if err != nil { log.Fatalf("-symabis: %v", err) } - symabiDefs = make(map[string]obj.ABI) - symabiRefs = make(map[string]obj.ABI) - - localPrefix := "" - if myimportpath != "" { - // Symbols in this package may be written either as - // "".X or with the package's import path already in - // the symbol. - localPrefix = objabi.PathToPrefix(myimportpath) + "." - } - for lineNum, line := range strings.Split(string(data), "\n") { lineNum++ // 1-based line = strings.TrimSpace(line) @@ -109,19 +92,13 @@ func ReadSymABIs(file, myimportpath string) { log.Fatalf(`%s:%d: invalid symabi: unknown abi "%s"`, file, lineNum, abistr) } - // If the symbol is already prefixed with - // myimportpath, rewrite it to start with "" - // so it matches the compiler's internal - // symbol names. - if localPrefix != "" && strings.HasPrefix(sym, localPrefix) { - sym = `"".` + sym[len(localPrefix):] - } + sym = s.canonicalize(sym) // Record for later. if parts[0] == "def" { - symabiDefs[sym] = abi + s.defs[sym] = abi } else { - symabiRefs[sym] = abi + s.refs[sym] |= obj.ABISetOf(abi) } default: log.Fatalf(`%s:%d: invalid symabi type "%s"`, file, lineNum, parts[0]) @@ -129,6 +106,124 @@ func ReadSymABIs(file, myimportpath string) { } } +// GenABIWrappers applies ABI information to Funcs and generates ABI +// wrapper functions where necessary. +func (s *SymABIs) GenABIWrappers() { + // For cgo exported symbols, we tell the linker to export the + // definition ABI to C. That also means that we don't want to + // create ABI wrappers even if there's a linkname. + // + // TODO(austin): Maybe we want to create the ABI wrappers, but + // ensure the linker exports the right ABI definition under + // the unmangled name? + cgoExports := make(map[string][]*[]string) + for i, prag := range typecheck.Target.CgoPragmas { + switch prag[0] { + case "cgo_export_static", "cgo_export_dynamic": + symName := s.canonicalize(prag[1]) + pprag := &typecheck.Target.CgoPragmas[i] + cgoExports[symName] = append(cgoExports[symName], pprag) + } + } + + // Apply ABI defs and refs to Funcs and generate wrappers. + // + // This may generate new decls for the wrappers, but we + // specifically *don't* want to visit those, lest we create + // wrappers for wrappers. + for _, fn := range typecheck.Target.Decls { + if fn.Op() != ir.ODCLFUNC { + continue + } + fn := fn.(*ir.Func) + nam := fn.Nname + if ir.IsBlank(nam) { + continue + } + sym := nam.Sym() + var symName string + if sym.Linkname != "" { + symName = s.canonicalize(sym.Linkname) + } else { + // These names will already be canonical. + symName = sym.Pkg.Prefix + "." + sym.Name + } + + // Apply definitions. + defABI, hasDefABI := s.defs[symName] + if hasDefABI { + fn.ABI = defABI + } + + if fn.Pragma&ir.CgoUnsafeArgs != 0 { + // CgoUnsafeArgs indicates the function (or its callee) uses + // offsets to dispatch arguments, which currently using ABI0 + // frame layout. Pin it to ABI0. + fn.ABI = obj.ABI0 + } + + // If cgo-exported, add the definition ABI to the cgo + // pragmas. + cgoExport := cgoExports[symName] + for _, pprag := range cgoExport { + // The export pragmas have the form: + // + // cgo_export_* [] + // + // If is omitted, it's the same as + // . + // + // Expand to + // + // cgo_export_* + if len(*pprag) == 2 { + *pprag = append(*pprag, (*pprag)[1]) + } + // Add the ABI argument. + *pprag = append(*pprag, fn.ABI.String()) + } + + // Apply references. + if abis, ok := s.refs[symName]; ok { + fn.ABIRefs |= abis + } + // Assume all functions are referenced at least as + // ABIInternal, since they may be referenced from + // other packages. + fn.ABIRefs.Set(obj.ABIInternal, true) + + // If a symbol is defined in this package (either in + // Go or assembly) and given a linkname, it may be + // referenced from another package, so make it + // callable via any ABI. It's important that we know + // it's defined in this package since other packages + // may "pull" symbols using linkname and we don't want + // to create duplicate ABI wrappers. + // + // However, if it's given a linkname for exporting to + // C, then we don't make ABI wrappers because the cgo + // tool wants the original definition. + hasBody := len(fn.Body) != 0 + if sym.Linkname != "" && (hasBody || hasDefABI) && len(cgoExport) == 0 { + fn.ABIRefs |= obj.ABISetCallable + } + + // Double check that cgo-exported symbols don't get + // any wrappers. + if len(cgoExport) > 0 && fn.ABIRefs&^obj.ABISetOf(fn.ABI) != 0 { + base.Fatalf("cgo exported function %s cannot have ABI wrappers", fn) + } + + if !buildcfg.Experiment.RegabiWrappers { + // We'll generate ABI aliases instead of + // wrappers once we have LSyms in InitLSym. + continue + } + + forEachWrapperABI(fn, makeABIWrapper) + } +} + // InitLSym defines f's obj.LSym and initializes it based on the // properties of f. This includes setting the symbol flags and ABI and // creating and initializing related DWARF symbols. @@ -138,96 +233,73 @@ func ReadSymABIs(file, myimportpath string) { // For body-less functions, we only create the LSym; for functions // with bodies call a helper to setup up / populate the LSym. func InitLSym(f *ir.Func, hasBody bool) { - // FIXME: for new-style ABI wrappers, we set up the lsym at the - // point the wrapper is created. - if f.LSym != nil && base.Flag.ABIWrap { - return - } - staticdata.NeedFuncSym(f.Sym()) - selectLSym(f, hasBody) - if hasBody { - setupTextLSym(f, 0) - } -} - -// selectLSym sets up the LSym for a given function, and -// makes calls to helpers to create ABI wrappers if needed. -func selectLSym(f *ir.Func, hasBody bool) { if f.LSym != nil { base.FatalfAt(f.Pos(), "InitLSym called twice on %v", f) } if nam := f.Nname; !ir.IsBlank(nam) { - - var wrapperABI obj.ABI - needABIWrapper := false - defABI, hasDefABI := symabiDefs[nam.Linksym().Name] - if hasDefABI && defABI == obj.ABI0 { - // Symbol is defined as ABI0. Create an - // Internal -> ABI0 wrapper. - f.LSym = nam.LinksymABI(obj.ABI0) - needABIWrapper, wrapperABI = true, obj.ABIInternal - } else { - f.LSym = nam.Linksym() - // No ABI override. Check that the symbol is - // using the expected ABI. - want := obj.ABIInternal - if f.LSym.ABI() != want { - base.Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.LSym.Name, f.LSym.ABI(), want) - } - } + f.LSym = nam.LinksymABI(f.ABI) if f.Pragma&ir.Systemstack != 0 { f.LSym.Set(obj.AttrCFunc, true) } - - isLinknameExported := nam.Sym().Linkname != "" && (hasBody || hasDefABI) - if abi, ok := symabiRefs[f.LSym.Name]; (ok && abi == obj.ABI0) || isLinknameExported { - // Either 1) this symbol is definitely - // referenced as ABI0 from this package; or 2) - // this symbol is defined in this package but - // given a linkname, indicating that it may be - // referenced from another package. Create an - // ABI0 -> Internal wrapper so it can be - // called as ABI0. In case 2, it's important - // that we know it's defined in this package - // since other packages may "pull" symbols - // using linkname and we don't want to create - // duplicate ABI wrappers. - if f.LSym.ABI() != obj.ABI0 { - needABIWrapper, wrapperABI = true, obj.ABI0 - } + if f.ABI == obj.ABIInternal || !buildcfg.Experiment.RegabiWrappers { + // Function values can only point to + // ABIInternal entry points. This will create + // the funcsym for either the defining + // function or its wrapper as appropriate. + // + // If we're using ABI aliases instead of + // wrappers, we only InitLSym for the defining + // ABI of a function, so we make the funcsym + // when we see that. + staticdata.NeedFuncSym(f) } - - if needABIWrapper { - if !useABIWrapGen(f) { - // Fallback: use alias instead. FIXME. - - // These LSyms have the same name as the - // native function, so we create them directly - // rather than looking them up. The uniqueness - // of f.lsym ensures uniqueness of asym. - asym := &obj.LSym{ - Name: f.LSym.Name, - Type: objabi.SABIALIAS, - R: []obj.Reloc{{Sym: f.LSym}}, // 0 size, so "informational" - } - asym.SetABI(wrapperABI) - asym.Set(obj.AttrDuplicateOK, true) - base.Ctxt.ABIAliases = append(base.Ctxt.ABIAliases, asym) - } else { - if base.Debug.ABIWrap != 0 { - fmt.Fprintf(os.Stderr, "=-= %v to %v wrapper for %s.%s\n", - wrapperABI, 1-wrapperABI, types.LocalPkg.Path, f.LSym.Name) - } - makeABIWrapper(f, wrapperABI) - } + if !buildcfg.Experiment.RegabiWrappers { + // Create ABI aliases instead of wrappers. + forEachWrapperABI(f, makeABIAlias) } } + if hasBody { + setupTextLSym(f, 0) + } } -// makeABIWrapper creates a new function that wraps a cross-ABI call -// to "f". The wrapper is marked as an ABIWRAPPER. +func forEachWrapperABI(fn *ir.Func, cb func(fn *ir.Func, wrapperABI obj.ABI)) { + need := fn.ABIRefs &^ obj.ABISetOf(fn.ABI) + if need == 0 { + return + } + + for wrapperABI := obj.ABI(0); wrapperABI < obj.ABICount; wrapperABI++ { + if !need.Get(wrapperABI) { + continue + } + cb(fn, wrapperABI) + } +} + +// makeABIAlias creates a new ABI alias so calls to f via wrapperABI +// will be resolved directly to f's ABI by the linker. +func makeABIAlias(f *ir.Func, wrapperABI obj.ABI) { + // These LSyms have the same name as the native function, so + // we create them directly rather than looking them up. + // The uniqueness of f.lsym ensures uniqueness of asym. + asym := &obj.LSym{ + Name: f.LSym.Name, + Type: objabi.SABIALIAS, + R: []obj.Reloc{{Sym: f.LSym}}, // 0 size, so "informational" + } + asym.SetABI(wrapperABI) + asym.Set(obj.AttrDuplicateOK, true) + base.Ctxt.ABIAliases = append(base.Ctxt.ABIAliases, asym) +} + +// makeABIWrapper creates a new function that will be called with +// wrapperABI and calls "f" using f.ABI. func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { + if base.Debug.ABIWrap != 0 { + fmt.Fprintf(os.Stderr, "=-= %v to %v wrapper for %v\n", wrapperABI, f.ABI, f) + } // Q: is this needed? savepos := base.Pos @@ -239,7 +311,7 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { // At the moment we don't support wrapping a method, we'd need machinery // below to handle the receiver. Panic if we see this scenario. - ft := f.Nname.Ntype.Type() + ft := f.Nname.Type() if ft.NumRecvs() != 0 { panic("makeABIWrapper support for wrapping methods not implemented") } @@ -253,16 +325,10 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { // Reuse f's types.Sym to create a new ODCLFUNC/function. fn := typecheck.DeclFunc(f.Nname.Sym(), tfn) - fn.SetDupok(true) - fn.SetWrapper(true) // ignore frame for panic+recover matching + fn.ABI = wrapperABI - // Select LSYM now. - asym := base.Ctxt.LookupABI(f.LSym.Name, wrapperABI) - asym.Type = objabi.STEXT - if fn.LSym != nil { - panic("unexpected") - } - fn.LSym = asym + fn.SetABIWrapper(true) + fn.SetDupok(true) // ABI0-to-ABIInternal wrappers will be mainly loading params from // stack into registers (and/or storing stack locations back to @@ -279,7 +345,7 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { // things in registers and pushing them onto the stack prior to // the ABI0 call, meaning that they will always need to allocate // stack space. If the compiler marks them as NOSPLIT this seems - // as though it could lead to situations where the the linker's + // as though it could lead to situations where the linker's // nosplit-overflow analysis would trigger a link failure. On the // other hand if they not tagged NOSPLIT then this could cause // problems when building the runtime (since there may be calls to @@ -289,7 +355,7 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { // into trouble here. // FIXME: at the moment all.bash does not pass when I leave out // NOSPLIT for these wrappers, so all are currently tagged with NOSPLIT. - setupTextLSym(fn, obj.NOSPLIT|obj.ABIWRAPPER) + fn.Pragma |= ir.Nosplit // Generate call. Use tail call if no params and no returns, // but a regular call otherwise. @@ -337,8 +403,6 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { ir.CurFunc = fn typecheck.Stmts(fn.Body) - escape.Batch([]*ir.Func{fn}, false) - typecheck.Target.Decls = append(typecheck.Target.Decls, fn) // Restore previous context. @@ -355,6 +419,9 @@ func setupTextLSym(f *ir.Func, flag int) { if f.Wrapper() { flag |= obj.WRAPPER } + if f.ABIWrapper() { + flag |= obj.ABIWRAPPER + } if f.Needctxt() { flag |= obj.NEEDCTXT } @@ -366,10 +433,20 @@ func setupTextLSym(f *ir.Func, flag int) { } // Clumsy but important. + // For functions that could be on the path of invoking a deferred + // function that can recover (runtime.reflectcall, reflect.callReflect, + // and reflect.callMethod), we want the panic+recover special handling. // See test/recover.go for test cases and src/reflect/value.go // for the actual functions being considered. - if base.Ctxt.Pkgpath == "reflect" { - switch f.Sym().Name { + // + // runtime.reflectcall is an assembly function which tailcalls + // WRAPPER functions (runtime.callNN). Its ABI wrapper needs WRAPPER + // flag as well. + fnname := f.Sym().Name + if base.Ctxt.Pkgpath == "runtime" && fnname == "reflectcall" { + flag |= obj.WRAPPER + } else if base.Ctxt.Pkgpath == "reflect" { + switch fnname { case "callReflect", "callMethod": flag |= obj.WRAPPER } diff --git a/src/cmd/compile/internal/ssagen/arch.go b/src/cmd/compile/internal/ssagen/arch.go index cc50ab36b5d..7215f42c059 100644 --- a/src/cmd/compile/internal/ssagen/arch.go +++ b/src/cmd/compile/internal/ssagen/arch.go @@ -5,8 +5,10 @@ package ssagen import ( + "cmd/compile/internal/ir" "cmd/compile/internal/objw" "cmd/compile/internal/ssa" + "cmd/compile/internal/types" "cmd/internal/obj" ) @@ -39,4 +41,12 @@ type ArchInfo struct { // SSAGenBlock emits end-of-block Progs. SSAGenValue should be called // for all values in the block before SSAGenBlock. SSAGenBlock func(s *State, b, next *ssa.Block) + + // LoadRegResults emits instructions that loads register-assigned results + // into registers. They are already in memory (PPARAMOUT nodes). + // Used in open-coded defer return path. + LoadRegResults func(s *State, f *ssa.Func) + + // SpillArgReg emits instructions that spill reg to n+off. + SpillArgReg func(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog } diff --git a/src/cmd/compile/internal/ssagen/nowb.go b/src/cmd/compile/internal/ssagen/nowb.go index a2434366a02..1fbc6a847d0 100644 --- a/src/cmd/compile/internal/ssagen/nowb.go +++ b/src/cmd/compile/internal/ssagen/nowb.go @@ -61,6 +61,12 @@ func newNowritebarrierrecChecker() *nowritebarrierrecChecker { continue } c.curfn = n.(*ir.Func) + if c.curfn.ABIWrapper() { + // We only want "real" calls to these + // functions, not the generated ones within + // their own ABI wrappers. + continue + } ir.Visit(n, c.findExtraCalls) } c.curfn = nil diff --git a/src/cmd/compile/internal/ssagen/pgen.go b/src/cmd/compile/internal/ssagen/pgen.go index 40f07a8d45a..62567535d76 100644 --- a/src/cmd/compile/internal/ssagen/pgen.go +++ b/src/cmd/compile/internal/ssagen/pgen.go @@ -5,6 +5,7 @@ package ssagen import ( + "internal/buildcfg" "internal/race" "math/rand" "sort" @@ -15,12 +16,10 @@ import ( "cmd/compile/internal/ir" "cmd/compile/internal/objw" "cmd/compile/internal/ssa" - "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/src" - "cmd/internal/sys" ) // cmpstackvarlt reports whether the stack variable a sorts before b. @@ -34,11 +33,11 @@ import ( // the top of the stack and increasing in size. // Non-autos sort on offset. func cmpstackvarlt(a, b *ir.Name) bool { - if (a.Class == ir.PAUTO) != (b.Class == ir.PAUTO) { - return b.Class == ir.PAUTO + if needAlloc(a) != needAlloc(b) { + return needAlloc(b) } - if a.Class != ir.PAUTO { + if !needAlloc(a) { return a.FrameOffset() < b.FrameOffset() } @@ -72,6 +71,13 @@ func (s byStackVar) Len() int { return len(s) } func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) } func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +// needAlloc reports whether n is within the current frame, for which we need to +// allocate space. In particular, it excludes arguments and results, which are in +// the callers frame. +func needAlloc(n *ir.Name) bool { + return n.Class == ir.PAUTO || n.Class == ir.PPARAMOUT && n.IsOutputParamInRegisters() +} + func (s *ssafn) AllocFrame(f *ssa.Func) { s.stksize = 0 s.stkptrsize = 0 @@ -79,7 +85,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { // Mark the PAUTO's unused. for _, ln := range fn.Dcl { - if ln.Class == ir.PAUTO { + if needAlloc(ln) { ln.SetUsed(false) } } @@ -90,37 +96,31 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { } } - scratchUsed := false for _, b := range f.Blocks { for _, v := range b.Values { if n, ok := v.Aux.(*ir.Name); ok { switch n.Class { - case ir.PPARAM, ir.PPARAMOUT: - // Don't modify RegFP; it is a global. - if n != ir.RegFP { - n.SetUsed(true) + case ir.PPARAMOUT: + if n.IsOutputParamInRegisters() && v.Op == ssa.OpVarDef { + // ignore VarDef, look for "real" uses. + // TODO: maybe do this for PAUTO as well? + continue } - case ir.PAUTO: + fallthrough + case ir.PPARAM, ir.PAUTO: n.SetUsed(true) } } - if !scratchUsed { - scratchUsed = v.Op.UsesScratch() - } - } } - if f.Config.NeedsFpScratch && scratchUsed { - s.scratchFpMem = typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT64]) - } - sort.Sort(byStackVar(fn.Dcl)) // Reassign stack offsets of the locals that are used. lastHasPtr := false for i, n := range fn.Dcl { - if n.Op() != ir.ONAME || n.Class != ir.PAUTO { + if n.Op() != ir.ONAME || n.Class != ir.PAUTO && !(n.Class == ir.PPARAMOUT && n.IsOutputParamInRegisters()) { + // i.e., stack assign if AUTO, or if PARAMOUT in registers (which has no predefined spill locations) continue } if !n.Used() { @@ -148,9 +148,6 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { } else { lastHasPtr = false } - if Arch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) { - s.stksize = types.Rnd(s.stksize, int64(types.PtrSize)) - } n.SetFrameOffset(-s.stksize) } @@ -167,9 +164,9 @@ const maxStackSize = 1 << 30 func Compile(fn *ir.Func, worker int) { f := buildssa(fn, worker) // Note: check arg size to fix issue 25507. - if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize { + if f.Frontend().(*ssafn).stksize >= maxStackSize || f.OwnAux.ArgWidth() >= maxStackSize { largeStackFramesMu.Lock() - largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type().ArgWidth(), pos: fn.Pos()}) + largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: f.OwnAux.ArgWidth(), pos: fn.Pos()}) largeStackFramesMu.Unlock() return } @@ -185,7 +182,7 @@ func Compile(fn *ir.Func, worker int) { if pp.Text.To.Offset >= maxStackSize { largeStackFramesMu.Lock() locals := f.Frontend().(*ssafn).stksize - largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type().ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()}) + largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: f.OwnAux.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()}) largeStackFramesMu.Unlock() return } @@ -208,16 +205,20 @@ func StackOffset(slot ssa.LocalSlot) int32 { n := slot.N var off int64 switch n.Class { + case ir.PPARAM, ir.PPARAMOUT: + if !n.IsOutputParamInRegisters() { + off = n.FrameOffset() + base.Ctxt.FixedFrameSize() + break + } + fallthrough // PPARAMOUT in registers allocates like an AUTO case ir.PAUTO: off = n.FrameOffset() if base.Ctxt.FixedFrameSize() == 0 { off -= int64(types.PtrSize) } - if objabi.Framepointer_enabled { + if buildcfg.FramePointerEnabled { off -= int64(types.PtrSize) } - case ir.PPARAM, ir.PPARAMOUT: - off = n.FrameOffset() + base.Ctxt.FixedFrameSize() } return int32(off + slot.Off) } @@ -228,7 +229,7 @@ func fieldtrack(fnsym *obj.LSym, tracked map[*obj.LSym]struct{}) { if fnsym == nil { return } - if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 { + if !buildcfg.Experiment.FieldTrack || len(tracked) == 0 { return } diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 6b1ddebd32a..004e084f728 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -7,10 +7,11 @@ package ssagen import ( "bufio" "bytes" - "encoding/binary" + "cmd/compile/internal/abi" "fmt" "go/constant" "html" + "internal/buildcfg" "os" "path/filepath" "sort" @@ -162,6 +163,7 @@ func InitConfig() { BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("goPanicSlice3BU") BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("goPanicSlice3C") BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("goPanicSlice3CU") + BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("goPanicSliceConvert") } else { BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("panicIndex") BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("panicIndexU") @@ -179,6 +181,7 @@ func InitConfig() { BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("panicSlice3BU") BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("panicSlice3C") BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("panicSlice3CU") + BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("panicSliceConvert") } if Arch.LinkArch.PtrSize == 4 { ExtendCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeVar("panicExtendIndex") @@ -208,6 +211,74 @@ func InitConfig() { ir.Syms.SigPanic = typecheck.LookupRuntimeFunc("sigpanic") } +// AbiForBodylessFuncStackMap returns the ABI for a bodyless function's stack map. +// This is not necessarily the ABI used to call it. +// Currently (1.17 dev) such a stack map is always ABI0; +// any ABI wrapper that is present is nosplit, hence a precise +// stack map is not needed there (the parameters survive only long +// enough to call the wrapped assembly function). +// This always returns a freshly copied ABI. +func AbiForBodylessFuncStackMap(fn *ir.Func) *abi.ABIConfig { + return ssaConfig.ABI0.Copy() // No idea what races will result, be safe +} + +// These are disabled but remain ready for use in case they are needed for the next regabi port. +// TODO if they are not needed for 1.18 / next register abi port, delete them. +const magicNameDotSuffix = ".*disabled*MagicMethodNameForTestingRegisterABI" +const magicLastTypeName = "*disabled*MagicLastTypeNameForTestingRegisterABI" + +// abiForFunc implements ABI policy for a function, but does not return a copy of the ABI. +// Passing a nil function returns the default ABI based on experiment configuration. +func abiForFunc(fn *ir.Func, abi0, abi1 *abi.ABIConfig) *abi.ABIConfig { + if buildcfg.Experiment.RegabiArgs { + // Select the ABI based on the function's defining ABI. + if fn == nil { + return abi1 + } + switch fn.ABI { + case obj.ABI0: + return abi0 + case obj.ABIInternal: + // TODO(austin): Clean up the nomenclature here. + // It's not clear that "abi1" is ABIInternal. + return abi1 + } + base.Fatalf("function %v has unknown ABI %v", fn, fn.ABI) + panic("not reachable") + } + + a := abi0 + if fn != nil { + name := ir.FuncName(fn) + magicName := strings.HasSuffix(name, magicNameDotSuffix) + if fn.Pragma&ir.RegisterParams != 0 { // TODO(register args) remove after register abi is working + if strings.Contains(name, ".") { + if !magicName { + base.ErrorfAt(fn.Pos(), "Calls to //go:registerparams method %s won't work, remove the pragma from the declaration.", name) + } + } + a = abi1 + } else if magicName { + if base.FmtPos(fn.Pos()) == ":1" { + // no way to put a pragma here, and it will error out in the real source code if they did not do it there. + a = abi1 + } else { + base.ErrorfAt(fn.Pos(), "Methods with magic name %s (method %s) must also specify //go:registerparams", magicNameDotSuffix[1:], name) + } + } + if regAbiForFuncType(fn.Type().FuncType()) { + // fmt.Printf("Saw magic last type name for function %s\n", name) + a = abi1 + } + } + return a +} + +func regAbiForFuncType(ft *types.Func) bool { + np := ft.Params.NumFields() + return np > 0 && strings.Contains(ft.Params.FieldType(np-1).String(), magicLastTypeName) +} + // getParam returns the Field of ith param of node n (which is a // function/method/interface call), where the receiver of a method call is // considered as the 0th parameter. This does not include the receiver of an @@ -274,7 +345,7 @@ func (s *state) emitOpenDeferInfo() { var maxargsize int64 for i := len(s.openDefers) - 1; i >= 0; i-- { r := s.openDefers[i] - argsize := r.n.X.Type().ArgWidth() + argsize := r.n.X.Type().ArgWidth() // TODO register args: but maybe use of abi0 will make this easy if argsize > maxargsize { maxargsize = argsize } @@ -296,27 +367,42 @@ func (s *state) emitOpenDeferInfo() { numArgs++ } off = dvarint(x, off, int64(numArgs)) + argAdjust := 0 // presence of receiver offsets the parameter count. if r.rcvrNode != nil { - off = dvarint(x, off, -r.rcvrNode.FrameOffset()) + off = dvarint(x, off, -okOffset(r.rcvrNode.FrameOffset())) off = dvarint(x, off, s.config.PtrSize) - off = dvarint(x, off, 0) + off = dvarint(x, off, 0) // This is okay because defer records use ABI0 (for now) + argAdjust++ } + + // TODO(register args) assume abi0 for this? + ab := s.f.ABI0 + pri := ab.ABIAnalyzeFuncType(r.n.X.Type().FuncType()) for j, arg := range r.argNodes { f := getParam(r.n, j) - off = dvarint(x, off, -arg.FrameOffset()) + off = dvarint(x, off, -okOffset(arg.FrameOffset())) off = dvarint(x, off, f.Type.Size()) - off = dvarint(x, off, f.Offset) + off = dvarint(x, off, okOffset(pri.InParam(j+argAdjust).FrameOffset(pri))) } } } +func okOffset(offset int64) int64 { + if offset == types.BOGUS_FUNARG_OFFSET { + panic(fmt.Errorf("Bogus offset %d", offset)) + } + return offset +} + // buildssa builds an SSA function for fn. // worker indicates which of the backend workers is doing the processing. func buildssa(fn *ir.Func, worker int) *ssa.Func { name := ir.FuncName(fn) printssa := false - if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset" - printssa = name == ssaDump || base.Ctxt.Pkgpath+"."+name == ssaDump + if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", package.name e.g. "compress/gzip.(*Reader).Reset", or subpackage name "gzip.(*Reader).Reset" + pkgDotName := base.Ctxt.Pkgpath + "." + name + printssa = name == ssaDump || + strings.HasSuffix(pkgDotName, ssaDump) && (pkgDotName == ssaDump || strings.HasSuffix(pkgDotName, "/"+ssaDump)) } var astBuf *bytes.Buffer if printssa { @@ -357,12 +443,10 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { if fn.Pragma&ir.Nosplit != 0 { s.f.NoSplit = true } - if fn.Pragma&ir.RegisterParams != 0 { // TODO remove after register abi is working - if strings.Contains(name, ".") { - base.ErrorfAt(fn.Pos(), "Calls to //go:registerparams method %s won't work, remove the pragma from the declaration.", name) - } - s.f.Warnl(fn.Pos(), "declared function %v has register params", fn) - } + s.f.ABI0 = ssaConfig.ABI0.Copy() // Make a copy to avoid racy map operations in type-register-width cache. + s.f.ABI1 = ssaConfig.ABI1.Copy() + s.f.ABIDefault = abiForFunc(nil, s.f.ABI0, s.f.ABI1) + s.f.ABISelf = abiForFunc(fn, s.f.ABI0, s.f.ABI1) s.panics = map[funcLine]*ssa.Block{} s.softFloat = s.config.SoftFloat @@ -391,11 +475,12 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed() switch { + case base.Debug.NoOpenDefer != 0: + s.hasOpenDefers = false case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && base.Ctxt.Arch.Name == "386": // Don't support open-coded defers for 386 ONLY when using shared // libraries, because there is extra code (added by rewriteToUseGot()) - // preceding the deferreturn/ret code that is generated by gencallret() - // that we don't track correctly. + // preceding the deferreturn/ret code that we don't track correctly. s.hasOpenDefers = false } if s.hasOpenDefers && len(s.curfn.Exit) > 0 { @@ -449,18 +534,18 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false) } + var params *abi.ABIParamResultInfo + params = s.f.ABISelf.ABIAnalyze(fn.Type(), true) + // Generate addresses of local declarations s.decladdrs = map[*ir.Name]*ssa.Value{} - var args []ssa.Param - var results []ssa.Param for _, n := range fn.Dcl { switch n.Class { case ir.PPARAM: + // Be aware that blank and unnamed input parameters will not appear here, but do appear in the type s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem) - args = append(args, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset())}) case ir.PPARAMOUT: s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem) - results = append(results, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset()), Name: n}) case ir.PAUTO: // processed at each use, to prevent Addr coming // before the decl. @@ -468,14 +553,29 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { s.Fatalf("local variable with class %v unimplemented", n.Class) } } - s.f.OwnAux = ssa.OwnAuxCall(fn.LSym, args, results) + + s.f.OwnAux = ssa.OwnAuxCall(fn.LSym, params) // Populate SSAable arguments. for _, n := range fn.Dcl { - if n.Class == ir.PPARAM && s.canSSA(n) { - v := s.newValue0A(ssa.OpArg, n.Type(), n) - s.vars[n] = v - s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself. + if n.Class == ir.PPARAM { + if s.canSSA(n) { + v := s.newValue0A(ssa.OpArg, n.Type(), n) + s.vars[n] = v + s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself. + } else { // address was taken AND/OR too large for SSA + paramAssignment := ssa.ParamAssignmentForArgName(s.f, n) + if len(paramAssignment.Registers) > 0 { + if TypeOK(n.Type()) { // SSA-able type, so address was taken -- receive value in OpArg, DO NOT bind to var, store immediately to memory. + v := s.newValue0A(ssa.OpArg, n.Type(), n) + s.store(n.Type(), s.decladdrs[n], v) + } else { // Too big for SSA. + // Brute force, and early, do a bunch of stores from registers + // TODO fix the nasty storeArgOrLoad recursion in ssa/expand_calls.go so this Just Works with store of a big Arg. + s.storeParameterRegsToStack(s.f.ABISelf, paramAssignment, n, s.decladdrs[n], false) + } + } + } } } @@ -545,9 +645,42 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { s.emitOpenDeferInfo() } + // Record incoming parameter spill information for morestack calls emitted in the assembler. + // This is done here, using all the parameters (used, partially used, and unused) because + // it mimics the behavior of the former ABI (everything stored) and because it's not 100% + // clear if naming conventions are respected in autogenerated code. + // TODO figure out exactly what's unused, don't spill it. Make liveness fine-grained, also. + // TODO non-amd64 architectures have link registers etc that may require adjustment here. + for _, p := range params.InParams() { + typs, offs := p.RegisterTypesAndOffsets() + for i, t := range typs { + o := offs[i] // offset within parameter + fo := p.FrameOffset(params) // offset of parameter in frame + reg := ssa.ObjRegForAbiReg(p.Registers[i], s.f.Config) + s.f.RegArgs = append(s.f.RegArgs, ssa.Spill{Reg: reg, Offset: fo + o, Type: t}) + } + } + return s.f } +func (s *state) storeParameterRegsToStack(abi *abi.ABIConfig, paramAssignment *abi.ABIParamAssignment, n *ir.Name, addr *ssa.Value, pointersOnly bool) { + typs, offs := paramAssignment.RegisterTypesAndOffsets() + for i, t := range typs { + if pointersOnly && !t.IsPtrShaped() { + continue + } + r := paramAssignment.Registers[i] + o := offs[i] + op, reg := ssa.ArgOpAndRegisterFor(r, abi) + aux := &ssa.AuxNameOffset{Name: n, Offset: o} + v := s.newValue0I(op, t, reg) + v.Aux = aux + p := s.newValue1I(ssa.OpOffPtr, types.NewPtr(t), o, addr) + s.store(t, p, v) + } +} + // zeroResults zeros the return values at the start of the function. // We need to do this very early in the function. Defer might stop a // panic and show the return values as they exist at the time of @@ -1028,39 +1161,51 @@ func (s *state) newValue4I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 return s.curBlock.NewValue4I(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3) } +func (s *state) entryBlock() *ssa.Block { + b := s.f.Entry + if base.Flag.N > 0 && s.curBlock != nil { + // If optimizations are off, allocate in current block instead. Since with -N + // we're not doing the CSE or tighten passes, putting lots of stuff in the + // entry block leads to O(n^2) entries in the live value map during regalloc. + // See issue 45897. + b = s.curBlock + } + return b +} + // entryNewValue0 adds a new value with no arguments to the entry block. func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value { - return s.f.Entry.NewValue0(src.NoXPos, op, t) + return s.entryBlock().NewValue0(src.NoXPos, op, t) } // entryNewValue0A adds a new value with no arguments and an aux value to the entry block. func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value { - return s.f.Entry.NewValue0A(src.NoXPos, op, t, aux) + return s.entryBlock().NewValue0A(src.NoXPos, op, t, aux) } // entryNewValue1 adds a new value with one argument to the entry block. func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { - return s.f.Entry.NewValue1(src.NoXPos, op, t, arg) + return s.entryBlock().NewValue1(src.NoXPos, op, t, arg) } // entryNewValue1 adds a new value with one argument and an auxint value to the entry block. func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value { - return s.f.Entry.NewValue1I(src.NoXPos, op, t, auxint, arg) + return s.entryBlock().NewValue1I(src.NoXPos, op, t, auxint, arg) } // entryNewValue1A adds a new value with one argument and an aux value to the entry block. func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value { - return s.f.Entry.NewValue1A(src.NoXPos, op, t, aux, arg) + return s.entryBlock().NewValue1A(src.NoXPos, op, t, aux, arg) } // entryNewValue2 adds a new value with two arguments to the entry block. func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { - return s.f.Entry.NewValue2(src.NoXPos, op, t, arg0, arg1) + return s.entryBlock().NewValue2(src.NoXPos, op, t, arg0, arg1) } // entryNewValue2A adds a new value with two arguments and an aux value to the entry block. func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value { - return s.f.Entry.NewValue2A(src.NoXPos, op, t, aux, arg0, arg1) + return s.entryBlock().NewValue2A(src.NoXPos, op, t, aux, arg0, arg1) } // const* routines add a new const value to the entry block. @@ -1151,7 +1296,7 @@ func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrument if f.Sym.IsBlank() { continue } - offptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(f.Type), f.Offset, addr) + offptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(f.Type), abi.FieldOffsetOf(f), addr) s.instrumentFields(f.Type, offptr, kind) } } @@ -1589,7 +1734,7 @@ func (s *state) stmt(n ir.Node) { n := n.(*ir.TailCallStmt) b := s.exit() b.Kind = ssa.BlockRetJmp // override BlockRet - b.Aux = callTargetLSym(n.Target, s.curfn.LSym) + b.Aux = callTargetLSym(n.Target) case ir.OCONTINUE, ir.OBREAK: n := n.(*ir.BranchStmt) @@ -1691,7 +1836,7 @@ func (s *state) stmt(n ir.Node) { b.AddEdgeTo(bCond) // It can happen that bIncr ends in a block containing only VARKILL, // and that muddles the debugging experience. - if n.Op() != ir.OFORUNTIL && b.Pos == src.NoXPos { + if b.Pos == src.NoXPos { b.Pos = bCond.Pos } } @@ -1803,7 +1948,6 @@ const shareDeferExits = false // It returns a BlockRet block that ends the control flow. Its control value // will be set to the final memory state. func (s *state) exit() *ssa.Block { - lateResultLowering := s.f.DebugTest if s.hasdefer { if s.hasOpenDefers { if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount { @@ -1824,56 +1968,40 @@ func (s *state) exit() *ssa.Block { var m *ssa.Value // Do actual return. // These currently turn into self-copies (in many cases). - if lateResultLowering { - resultFields := s.curfn.Type().Results().FieldSlice() - results := make([]*ssa.Value, len(resultFields)+1, len(resultFields)+1) - m = s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType()) - // Store SSAable and heap-escaped PPARAMOUT variables back to stack locations. - for i, f := range resultFields { - n := f.Nname.(*ir.Name) - s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) - if s.canSSA(n) { // result is in some SSA variable - results[i] = s.variable(n, n.Type()) - } else if !n.OnStack() { // result is actually heap allocated - ha := s.expr(n.Heapaddr) - s.instrumentFields(n.Type(), ha, instrumentRead) - results[i] = s.newValue2(ssa.OpDereference, n.Type(), ha, s.mem()) - } else { // result is not SSA-able; not escaped, so not on heap, but too large for SSA. - // Before register ABI this ought to be a self-move, home=dest, - // With register ABI, it's still a self-move if parameter is on stack (i.e., too big or overflowed) - results[i] = s.newValue2(ssa.OpDereference, n.Type(), s.addr(n), s.mem()) + resultFields := s.curfn.Type().Results().FieldSlice() + results := make([]*ssa.Value, len(resultFields)+1, len(resultFields)+1) + m = s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType()) + // Store SSAable and heap-escaped PPARAMOUT variables back to stack locations. + for i, f := range resultFields { + n := f.Nname.(*ir.Name) + if s.canSSA(n) { // result is in some SSA variable + if !n.IsOutputParamInRegisters() { + // We are about to store to the result slot. + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) } + results[i] = s.variable(n, n.Type()) + } else if !n.OnStack() { // result is actually heap allocated + // We are about to copy the in-heap result to the result slot. + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) + ha := s.expr(n.Heapaddr) + s.instrumentFields(n.Type(), ha, instrumentRead) + results[i] = s.newValue2(ssa.OpDereference, n.Type(), ha, s.mem()) + } else { // result is not SSA-able; not escaped, so not on heap, but too large for SSA. + // Before register ABI this ought to be a self-move, home=dest, + // With register ABI, it's still a self-move if parameter is on stack (i.e., too big or overflowed) + // No VarDef, as the result slot is already holding live value. + results[i] = s.newValue2(ssa.OpDereference, n.Type(), s.addr(n), s.mem()) } - - // Run exit code. Today, this is just racefuncexit, in -race mode. - // TODO this seems risky here with a register-ABI, but not clear it is right to do it earlier either. - // Spills in register allocation might just fix it. - s.stmtList(s.curfn.Exit) - - results[len(results)-1] = s.mem() - m.AddArgs(results...) - } else { - // Store SSAable and heap-escaped PPARAMOUT variables back to stack locations. - for _, f := range s.curfn.Type().Results().FieldSlice() { - n := f.Nname.(*ir.Name) - if s.canSSA(n) { - val := s.variable(n, n.Type()) - s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) - s.store(n.Type(), s.decladdrs[n], val) - } else if !n.OnStack() { - s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) - s.move(n.Type(), s.decladdrs[n], s.expr(n.Heapaddr)) - } // else, on stack but too large to SSA, the result is already in its destination by construction, so no store needed. - - // TODO: if (SSA) val is ever spilled, we'd like to use the PPARAMOUT slot for spilling it. That won't happen currently. - } - - // Run exit code. Today, this is just racefuncexit, in -race mode. - s.stmtList(s.curfn.Exit) - - // Do actual return. - m = s.mem() } + + // Run exit code. Today, this is just racefuncexit, in -race mode. + // TODO(register args) this seems risky here with a register-ABI, but not clear it is right to do it earlier either. + // Spills in register allocation might just fix it. + s.stmtList(s.curfn.Exit) + + results[len(results)-1] = s.mem() + m.AddArgs(results...) + b = s.endBlock() b.Kind = ssa.BlockRet b.SetControl(m) @@ -2285,6 +2413,11 @@ func (s *state) expr(n ir.Node) *ssa.Value { case ir.OCFUNC: n := n.(*ir.UnaryExpr) aux := n.X.(*ir.Name).Linksym() + // OCFUNC is used to build function values, which must + // always reference ABIInternal entry points. + if aux.ABI() != obj.ABIInternal { + s.Fatalf("expected ABIInternal: %v", aux.ABI()) + } return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb) case ir.ONAME: n := n.(*ir.Name) @@ -2862,22 +2995,13 @@ func (s *state) expr(n ir.Node) *ssa.Value { case ir.ORESULT: n := n.(*ir.ResultExpr) if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall { - // Do the old thing - addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset) - return s.rawLoad(n.Type(), addr) + panic("Expected to see a previous call") } - which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset) + which := n.Index if which == -1 { - // Do the old thing // TODO: Panic instead. - addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset) - return s.rawLoad(n.Type(), addr) - } - if TypeOK(n.Type()) { - return s.newValue1I(ssa.OpSelectN, n.Type(), which, s.prevCall) - } else { - addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type()), which, s.prevCall) - return s.rawLoad(n.Type(), addr) + panic(fmt.Errorf("ORESULT %v does not match call %s", n, s.prevCall)) } + return s.resultOfCall(s.prevCall, which, n.Type()) case ir.ODEREF: n := n.(*ir.StarExpr) @@ -3040,6 +3164,18 @@ func (s *state) expr(n ir.Node) *ssa.Value { p, l, _ := s.slice(v, i, j, nil, n.Bounded()) return s.newValue2(ssa.OpStringMake, n.Type(), p, l) + case ir.OSLICE2ARRPTR: + // if arrlen > slice.len { + // panic(...) + // } + // slice.ptr + n := n.(*ir.ConvExpr) + v := s.expr(n.X) + arrlen := s.constInt(types.Types[types.TINT], n.Type().Elem().NumElem()) + cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v) + s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false) + return s.newValue1(ssa.OpSlicePtrUnchecked, types.Types[types.TINT], v) + case ir.OCALLFUNC: n := n.(*ir.CallExpr) if ir.IsIntrinsicCall(n) { @@ -3072,12 +3208,42 @@ func (s *state) expr(n ir.Node) *ssa.Value { n := n.(*ir.UnaryExpr) return s.newObject(n.Type().Elem()) + case ir.OUNSAFEADD: + n := n.(*ir.BinaryExpr) + ptr := s.expr(n.X) + len := s.expr(n.Y) + return s.newValue2(ssa.OpAddPtr, n.Type(), ptr, len) + default: s.Fatalf("unhandled expr %v", n.Op()) return nil } } +func (s *state) resultOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value { + aux := c.Aux.(*ssa.AuxCall) + pa := aux.ParamAssignmentForResult(which) + // TODO(register args) determine if in-memory TypeOK is better loaded early from SelectNAddr or later when SelectN is expanded. + // SelectN is better for pattern-matching and possible call-aware analysis we might want to do in the future. + if len(pa.Registers) == 0 && !TypeOK(t) { + addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c) + return s.rawLoad(t, addr) + } + return s.newValue1I(ssa.OpSelectN, t, which, c) +} + +func (s *state) resultAddrOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value { + aux := c.Aux.(*ssa.AuxCall) + pa := aux.ParamAssignmentForResult(which) + if len(pa.Registers) == 0 { + return s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c) + } + _, addr := s.temp(c.Pos, t) + rval := s.newValue1I(ssa.OpSelectN, t, which, c) + s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, addr, rval, s.mem(), false) + return addr +} + // append converts an OAPPEND node to SSA. // If inplace is false, it converts the OAPPEND expression n to an ssa.Value, // adds it to s, and returns the Value. @@ -3898,25 +4064,25 @@ func InitTables() { s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem()) return nil }, - sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X) + sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "And", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem()) return nil }, - sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X) + sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Or8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem()) return nil }, - sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X) + sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X) addF("runtime/internal/atomic", "Or", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem()) return nil }, - sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X) + sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X) atomicAndOrEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) { s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem()) @@ -3935,18 +4101,23 @@ func InitTables() { makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64), sys.ARM64) + // Aliases for atomic load operations + alias("runtime/internal/atomic", "Loadint32", "runtime/internal/atomic", "Load", all...) alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...) - alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...) - alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...) - alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...) alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...) alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...) + alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...) + alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...) alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...) alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...) alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed + + // Aliases for atomic store operations + alias("runtime/internal/atomic", "Storeint32", "runtime/internal/atomic", "Store", all...) + alias("runtime/internal/atomic", "Storeint64", "runtime/internal/atomic", "Store64", all...) alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...) alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...) alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...) @@ -3955,10 +4126,22 @@ func InitTables() { alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed + + // Aliases for atomic swap operations + alias("runtime/internal/atomic", "Xchgint32", "runtime/internal/atomic", "Xchg", all...) + alias("runtime/internal/atomic", "Xchgint64", "runtime/internal/atomic", "Xchg64", all...) alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...) alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...) + + // Aliases for atomic add operations + alias("runtime/internal/atomic", "Xaddint32", "runtime/internal/atomic", "Xadd", all...) + alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...) alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...) alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...) + + // Aliases for atomic CAS operations + alias("runtime/internal/atomic", "Casint32", "runtime/internal/atomic", "Cas", all...) + alias("runtime/internal/atomic", "Casint64", "runtime/internal/atomic", "Cas64", all...) alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...) alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...) alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...) @@ -4352,13 +4535,14 @@ func InitTables() { return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1]) }, sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64) - alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE) + alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE) + alias("runtime/internal/math", "Mul64", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE) addF("math/bits", "Add64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2]) }, sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X) - alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X) + alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X) addF("math/bits", "Sub64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2]) @@ -4442,6 +4626,9 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder { if sym.Pkg == types.LocalPkg { pkg = base.Ctxt.Pkgpath } + if sym.Pkg == ir.Pkgs.Runtime { + pkg = "runtime" + } if base.Flag.Race && pkg == "sync/atomic" { // The race detector needs to be able to intercept these calls. // We can't intrinsify them. @@ -4510,6 +4697,10 @@ func (s *state) openDeferRecord(n *ir.CallExpr) { var args []*ssa.Value var argNodes []*ir.Name + if buildcfg.Experiment.RegabiDefer && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) { + s.Fatalf("defer call with arguments or results: %v", n) + } + opendefer := &openDeferInfo{ n: n, } @@ -4587,9 +4778,9 @@ func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Val // declared in the entry block, so that it will be live for the // defer exit code (which will actually access it only if the // associated defer call has been activated). - s.defvars[s.f.Entry.ID][memVar] = s.entryNewValue1A(ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar]) - s.defvars[s.f.Entry.ID][memVar] = s.entryNewValue1A(ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar]) - addrArgTemp = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.defvars[s.f.Entry.ID][memVar]) + s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar]) + s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar]) + addrArgTemp = s.f.Entry.NewValue2A(src.NoXPos, ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.defvars[s.f.Entry.ID][memVar]) } else { // Special case if we're still in the entry block. We can't use // the above code, since s.defvars[s.f.Entry.ID] isn't defined @@ -4660,21 +4851,20 @@ func (s *state) openDeferExit() { // Generate code to call the function call of the defer, using the // closure/receiver/args that were stored in argtmps at the point // of the defer statement. - argStart := base.Ctxt.FixedFrameSize() fn := r.n.X stksize := fn.Type().ArgWidth() - var ACArgs []ssa.Param - var ACResults []ssa.Param + var ACArgs []*types.Type + var ACResults []*types.Type var callArgs []*ssa.Value if r.rcvr != nil { // rcvr in case of OCALLINTER v := s.load(r.rcvr.Type.Elem(), r.rcvr) - ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart)}) + ACArgs = append(ACArgs, types.Types[types.TUINTPTR]) callArgs = append(callArgs, v) } for j, argAddrVal := range r.argVals { f := getParam(r.n, j) - ACArgs = append(ACArgs, ssa.Param{Type: f.Type, Offset: int32(argStart + f.Offset)}) + ACArgs = append(ACArgs, f.Type) var a *ssa.Value if !TypeOK(f.Type) { a = s.newValue2(ssa.OpDereference, f.Type, argAddrVal, s.mem()) @@ -4688,10 +4878,10 @@ func (s *state) openDeferExit() { v := s.load(r.closure.Type.Elem(), r.closure) s.maybeNilCheckClosure(v, callDefer) codeptr := s.rawLoad(types.Types[types.TUINTPTR], v) - aux := ssa.ClosureAuxCall(ACArgs, ACResults) + aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults)) call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v) } else { - aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), ACArgs, ACResults) + aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults)) call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) } callArgs = append(callArgs, s.mem()) @@ -4738,31 +4928,55 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val var codeptr *ssa.Value // ptr to target code (if dynamic) var rcvr *ssa.Value // receiver to set fn := n.X - var ACArgs []ssa.Param - var ACResults []ssa.Param - var callArgs []*ssa.Value - res := n.X.Type().Results() - if k == callNormal { - nf := res.NumFields() - for i := 0; i < nf; i++ { - fp := res.Field(i) - ACResults = append(ACResults, ssa.Param{Type: fp.Type, Offset: int32(fp.Offset + base.Ctxt.FixedFrameSize())}) + var ACArgs []*types.Type // AuxCall args + var ACResults []*types.Type // AuxCall results + var callArgs []*ssa.Value // For late-expansion, the args themselves (not stored, args to the call instead). + + callABI := s.f.ABIDefault + + if !buildcfg.Experiment.RegabiArgs { + var magicFnNameSym *types.Sym + if fn.Name() != nil { + magicFnNameSym = fn.Name().Sym() + ss := magicFnNameSym.Name + if strings.HasSuffix(ss, magicNameDotSuffix) { + callABI = s.f.ABI1 + } + } + if magicFnNameSym == nil && n.Op() == ir.OCALLINTER { + magicFnNameSym = fn.(*ir.SelectorExpr).Sym() + ss := magicFnNameSym.Name + if strings.HasSuffix(ss, magicNameDotSuffix[1:]) { + callABI = s.f.ABI1 + } } } - inRegisters := false + if buildcfg.Experiment.RegabiDefer && k != callNormal && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) { + s.Fatalf("go/defer call with arguments: %v", n) + } switch n.Op() { case ir.OCALLFUNC: if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC { fn := fn.(*ir.Name) callee = fn - // TODO remove after register abi is working - inRegistersImported := fn.Pragma()&ir.RegisterParams != 0 - inRegistersSamePackage := fn.Func != nil && fn.Func.Pragma&ir.RegisterParams != 0 - inRegisters = inRegistersImported || inRegistersSamePackage - if inRegisters { - s.f.Warnl(n.Pos(), "called function %v has register params", callee) + if buildcfg.Experiment.RegabiArgs { + // This is a static call, so it may be + // a direct call to a non-ABIInternal + // function. fn.Func may be nil for + // some compiler-generated functions, + // but those are all ABIInternal. + if fn.Func != nil { + callABI = abiForFunc(fn.Func, s.f.ABI0, s.f.ABI1) + } + } else { + // TODO(register args) remove after register abi is working + inRegistersImported := fn.Pragma()&ir.RegisterParams != 0 + inRegistersSamePackage := fn.Func != nil && fn.Func.Pragma&ir.RegisterParams != 0 + if inRegistersImported || inRegistersSamePackage { + callABI = s.f.ABI1 + } } break } @@ -4787,8 +5001,24 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val closure = iclosure } } + + if !buildcfg.Experiment.RegabiArgs { + if regAbiForFuncType(n.X.Type().FuncType()) { + // Magic last type in input args to call + callABI = s.f.ABI1 + } + } + + params := callABI.ABIAnalyze(n.X.Type(), false /* Do not set (register) nNames from caller side -- can cause races. */) types.CalcSize(fn.Type()) - stksize := fn.Type().ArgWidth() // includes receiver, args, and results + stksize := params.ArgWidth() // includes receiver, args, and results + + res := n.X.Type().Results() + if k == callNormal { + for _, p := range params.OutParams() { + ACResults = append(ACResults, p.Type) + } + } var call *ssa.Value if k == callDeferStack { @@ -4821,7 +5051,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val // Then, store all the arguments of the defer call. ft := fn.Type() - off := t.FieldOff(12) + off := t.FieldOff(12) // TODO register args: be sure this isn't a hardcoded param stack offset. args := n.Args // Set receiver (for interface calls). Always a pointer. @@ -4835,20 +5065,20 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val } // Set other args. for _, f := range ft.Params().Fields().Slice() { - s.storeArgWithBase(args[0], f.Type, addr, off+f.Offset) + s.storeArgWithBase(args[0], f.Type, addr, off+abi.FieldOffsetOf(f)) args = args[1:] } // Call runtime.deferprocStack with pointer to _defer record. - ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(base.Ctxt.FixedFrameSize())}) - aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, ACArgs, ACResults) + ACArgs = append(ACArgs, types.Types[types.TUINTPTR]) + aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults)) callArgs = append(callArgs, addr, s.mem()) call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) call.AddArgs(callArgs...) if stksize < int64(types.PtrSize) { // We need room for both the call to deferprocStack and the call to // the deferred function. - // TODO Revisit this if/when we pass args in registers. + // TODO(register args) Revisit this if/when we pass args in registers. stksize = int64(types.PtrSize) } call.AuxInt = stksize @@ -4860,9 +5090,9 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val if k != callNormal { // Write argsize and closure (args to newproc/deferproc). argsize := s.constInt32(types.Types[types.TUINT32], int32(stksize)) - ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINT32], Offset: int32(argStart)}) + ACArgs = append(ACArgs, types.Types[types.TUINT32]) // not argExtra callArgs = append(callArgs, argsize) - ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart) + int32(types.PtrSize)}) + ACArgs = append(ACArgs, types.Types[types.TUINTPTR]) callArgs = append(callArgs, closure) stksize += 2 * int64(types.PtrSize) argStart += 2 * int64(types.PtrSize) @@ -4870,7 +5100,6 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val // Set receiver (for interface calls). if rcvr != nil { - ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart)}) callArgs = append(callArgs, rcvr) } @@ -4880,11 +5109,12 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val if n.Op() == ir.OCALLMETH { base.Fatalf("OCALLMETH missed by walkCall") } + + for _, p := range params.InParams() { // includes receiver for interface calls + ACArgs = append(ACArgs, p.Type) + } for i, n := range args { - f := t.Params().Field(i) - ACArg, arg := s.putArg(n, f.Type, argStart+f.Offset) - ACArgs = append(ACArgs, ACArg) - callArgs = append(callArgs, arg) + callArgs = append(callArgs, s.putArg(n, t.Params().Field(i).Type)) } callArgs = append(callArgs, s.mem()) @@ -4892,11 +5122,11 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val // call target switch { case k == callDefer: - aux := ssa.StaticAuxCall(ir.Syms.Deferproc, ACArgs, ACResults) + aux := ssa.StaticAuxCall(ir.Syms.Deferproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults)) // TODO paramResultInfo for DeferProc call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) case k == callGo: - aux := ssa.StaticAuxCall(ir.Syms.Newproc, ACArgs, ACResults) - call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) + aux := ssa.StaticAuxCall(ir.Syms.Newproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults)) + call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) // TODO paramResultInfo for NewProc case closure != nil: // rawLoad because loading the code pointer from a // closure is always safe, but IsSanitizerSafeAddr @@ -4904,13 +5134,14 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val // critical that we not clobber any arguments already // stored onto the stack. codeptr = s.rawLoad(types.Types[types.TUINTPTR], closure) - aux := ssa.ClosureAuxCall(ACArgs, ACResults) + aux := ssa.ClosureAuxCall(callABI.ABIAnalyzeTypes(nil, ACArgs, ACResults)) call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure) case codeptr != nil: - aux := ssa.InterfaceAuxCall(ACArgs, ACResults) + // Note that the "receiver" parameter is nil because the actual receiver is the first input parameter. + aux := ssa.InterfaceAuxCall(params) call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr) case callee != nil: - aux := ssa.StaticAuxCall(callTargetLSym(callee, s.curfn.LSym), ACArgs, ACResults) + aux := ssa.StaticAuxCall(callTargetLSym(callee), params) call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) default: s.Fatalf("bad call type %v %v", n.Op(), n) @@ -4947,17 +5178,15 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val } fp := res.Field(0) if returnResultAddr { - pt := types.NewPtr(fp.Type) - return s.newValue1I(ssa.OpSelectNAddr, pt, 0, call) + return s.resultAddrOfCall(call, 0, fp.Type) } - return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call) } // maybeNilCheckClosure checks if a nil check of a closure is needed in some // architecture-dependent situations and, if so, emits the nil check. func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) { - if Arch.LinkArch.Family == sys.Wasm || objabi.GOOS == "aix" && k != callGo { + if Arch.LinkArch.Family == sys.Wasm || buildcfg.GOOS == "aix" && k != callGo { // On AIX, the closure needs to be verified as fn can be nil, except if it's a call go. This needs to be handled by the runtime to have the "go of nil func value" error. // TODO(neelance): On other architectures this should be eliminated by the optimization steps s.nilCheck(closure) @@ -5028,10 +5257,6 @@ func (s *state) addr(n ir.Node) *ssa.Value { if v != nil { return v } - if n == ir.RegFP { - // Special arg that points to the frame pointer (Used by ORECOVER). - return s.entryNewValue2A(ssa.OpLocalAddr, t, n, s.sp, s.startmem) - } s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs) return nil case ir.PAUTO: @@ -5048,17 +5273,7 @@ func (s *state) addr(n ir.Node) *ssa.Value { case ir.ORESULT: // load return from callee n := n.(*ir.ResultExpr) - if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall { - return s.constOffPtrSP(t, n.Offset) - } - which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset) - if which == -1 { - // Do the old thing // TODO: Panic instead. - return s.constOffPtrSP(t, n.Offset) - } - x := s.newValue1I(ssa.OpSelectNAddr, t, which, s.prevCall) - return x - + return s.resultAddrOfCall(s.prevCall, n.Index, n.Type()) case ir.OINDEX: n := n.(*ir.IndexExpr) if n.X.Type().IsSlice() { @@ -5367,16 +5582,15 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args . s.prevCall = nil // Write args to the stack off := base.Ctxt.FixedFrameSize() - var ACArgs []ssa.Param - var ACResults []ssa.Param var callArgs []*ssa.Value + var callArgTypes []*types.Type for _, arg := range args { t := arg.Type off = types.Rnd(off, t.Alignment()) size := t.Size() - ACArgs = append(ACArgs, ssa.Param{Type: t, Offset: int32(off)}) callArgs = append(callArgs, arg) + callArgTypes = append(callArgTypes, t) off += size } off = types.Rnd(off, int64(types.RegSize)) @@ -5385,17 +5599,16 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args . offR := off for _, t := range results { offR = types.Rnd(offR, t.Alignment()) - ACResults = append(ACResults, ssa.Param{Type: t, Offset: int32(offR)}) offR += t.Size() } // Issue call var call *ssa.Value - aux := ssa.StaticAuxCall(fn, ACArgs, ACResults) + aux := ssa.StaticAuxCall(fn, s.f.ABIDefault.ABIAnalyzeTypes(nil, callArgTypes, results)) callArgs = append(callArgs, s.mem()) call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) call.AddArgs(callArgs...) - s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call) + s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(results)), call) if !returns { // Finish block @@ -5413,12 +5626,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args . res := make([]*ssa.Value, len(results)) for i, t := range results { off = types.Rnd(off, t.Alignment()) - if TypeOK(t) { - res[i] = s.newValue1I(ssa.OpSelectN, t, int64(i), call) - } else { - addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), int64(i), call) - res[i] = s.rawLoad(t, addr) - } + res[i] = s.resultOfCall(call, int64(i), t) off += t.Size() } off = types.Rnd(off, int64(types.PtrSize)) @@ -5539,15 +5747,15 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) { } } -// putArg evaluates n for the purpose of passing it as an argument to a function and returns the corresponding Param and value for the call. -func (s *state) putArg(n ir.Node, t *types.Type, off int64) (ssa.Param, *ssa.Value) { +// putArg evaluates n for the purpose of passing it as an argument to a function and returns the value for the call. +func (s *state) putArg(n ir.Node, t *types.Type) *ssa.Value { var a *ssa.Value if !TypeOK(t) { a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem()) } else { a = s.expr(n) } - return ssa.Param{Type: t, Offset: int32(off)}, a + return a } func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) { @@ -6075,18 +6283,23 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val if base.Debug.TypeAssert > 0 { base.WarnfAt(n.Pos(), "type assertion not inlined") } - if n.X.Type().IsEmptyInterface() { - if commaok { - call := s.rtcall(ir.Syms.AssertE2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface) - return call[0], call[1] + if !commaok { + fn := ir.Syms.AssertI2I + if n.X.Type().IsEmptyInterface() { + fn = ir.Syms.AssertE2I } - return s.rtcall(ir.Syms.AssertE2I, true, []*types.Type{n.Type()}, target, iface)[0], nil + data := s.newValue1(ssa.OpIData, types.Types[types.TUNSAFEPTR], iface) + tab := s.newValue1(ssa.OpITab, byteptr, iface) + tab = s.rtcall(fn, true, []*types.Type{byteptr}, target, tab)[0] + return s.newValue2(ssa.OpIMake, n.Type(), tab, data), nil } - if commaok { - call := s.rtcall(ir.Syms.AssertI2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface) - return call[0], call[1] + fn := ir.Syms.AssertI2I2 + if n.X.Type().IsEmptyInterface() { + fn = ir.Syms.AssertE2I2 } - return s.rtcall(ir.Syms.AssertI2I, true, []*types.Type{n.Type()}, target, iface)[0], nil + res = s.rtcall(fn, true, []*types.Type{n.Type()}, target, iface)[0] + resok = s.newValue2(ssa.OpNeqInter, types.Types[types.TBOOL], res, s.constInterface(n.Type())) + return } if base.Debug.TypeAssert > 0 { @@ -6113,9 +6326,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val if commaok && !TypeOK(n.Type()) { // unSSAable type, use temporary. // TODO: get rid of some of these temporaries. - tmp = typecheck.TempAt(n.Pos(), s.curfn, n.Type()) - s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp.(*ir.Name), s.mem()) - addr = s.addr(tmp) + tmp, addr = s.temp(n.Pos(), n.Type()) } cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, targetITab) @@ -6197,6 +6408,14 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val return res, resok } +// temp allocates a temp of type t at position pos +func (s *state) temp(pos src.XPos, t *types.Type) (*ir.Name, *ssa.Value) { + tmp := typecheck.TempAt(pos, s.curfn, t) + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem()) + addr := s.addr(tmp) + return tmp, addr +} + // variable returns the value of a variable at the current location. func (s *state) variable(n ir.Node, t *types.Type) *ssa.Value { v := s.vars[n] @@ -6243,21 +6462,12 @@ func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) { loc := ssa.LocalSlot{N: n, Type: n.Type(), Off: 0} values, ok := s.f.NamedValues[loc] if !ok { - s.f.Names = append(s.f.Names, loc) + s.f.Names = append(s.f.Names, &loc) + s.f.CanonicalLocalSlots[loc] = &loc } s.f.NamedValues[loc] = append(values, v) } -// Generate a disconnected call to a runtime routine and a return. -func gencallret(pp *objw.Progs, sym *obj.LSym) *obj.Prog { - p := pp.Prog(obj.ACALL) - p.To.Type = obj.TYPE_MEM - p.To.Name = obj.NAME_EXTERN - p.To.Sym = sym - p = pp.Prog(obj.ARET) - return p -} - // Branch is an unresolved branch. type Branch struct { P *obj.Prog // branch instruction @@ -6277,15 +6487,16 @@ type State struct { // bstart remembers where each block starts (indexed by block ID) bstart []*obj.Prog - // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include PPC and Sparc V8. - ScratchFpMem *ir.Name - maxarg int64 // largest frame size for arguments to calls made by the function // Map from GC safe points to liveness index, generated by // liveness analysis. livenessMap liveness.Map + // partLiveArgs includes arguments that may be partially live, for which we + // need to generate instructions that spill the argument registers. + partLiveArgs map[*ir.Name]bool + // lineRunStart records the beginning of the current run of instructions // within a single block sharing the same line number // Used to move statement marks to the beginning of such runs. @@ -6295,6 +6506,10 @@ type State struct { OnWasmStackSkipped int } +func (s *State) FuncInfo() *obj.FuncInfo { + return s.pp.CurFunc.LSym.Func() +} + // Prog appends a new Prog. func (s *State) Prog(as obj.As) *obj.Prog { p := s.pp.Prog(as) @@ -6360,6 +6575,167 @@ func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) { } } +// emit argument info (locations on stack) for traceback. +func emitArgInfo(e *ssafn, f *ssa.Func, pp *objw.Progs) { + ft := e.curfn.Type() + if ft.NumRecvs() == 0 && ft.NumParams() == 0 { + return + } + + x := EmitArgInfo(e.curfn, f.OwnAux.ABIInfo()) + e.curfn.LSym.Func().ArgInfo = x + + // Emit a funcdata pointing at the arg info data. + p := pp.Prog(obj.AFUNCDATA) + p.From.SetConst(objabi.FUNCDATA_ArgInfo) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = x +} + +// emit argument info (locations on stack) of f for traceback. +func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym { + x := base.Ctxt.Lookup(fmt.Sprintf("%s.arginfo%d", f.LSym.Name, f.ABI)) + + PtrSize := int64(types.PtrSize) + + isAggregate := func(t *types.Type) bool { + return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() + } + + // Populate the data. + // The data is a stream of bytes, which contains the offsets and sizes of the + // non-aggregate arguments or non-aggregate fields/elements of aggregate-typed + // arguments, along with special "operators". Specifically, + // - for each non-aggrgate arg/field/element, its offset from FP (1 byte) and + // size (1 byte) + // - special operators: + // - 0xff - end of sequence + // - 0xfe - print { (at the start of an aggregate-typed argument) + // - 0xfd - print } (at the end of an aggregate-typed argument) + // - 0xfc - print ... (more args/fields/elements) + // - 0xfb - print _ (offset too large) + // These constants need to be in sync with runtime.traceback.go:printArgs. + const ( + _endSeq = 0xff + _startAgg = 0xfe + _endAgg = 0xfd + _dotdotdot = 0xfc + _offsetTooLarge = 0xfb + _special = 0xf0 // above this are operators, below this are ordinary offsets + ) + + const ( + limit = 10 // print no more than 10 args/components + maxDepth = 5 // no more than 5 layers of nesting + + // maxLen is a (conservative) upper bound of the byte stream length. For + // each arg/component, it has no more than 2 bytes of data (size, offset), + // and no more than one {, }, ... at each level (it cannot have both the + // data and ... unless it is the last one, just be conservative). Plus 1 + // for _endSeq. + maxLen = (maxDepth*3+2)*limit + 1 + ) + + wOff := 0 + n := 0 + writebyte := func(o uint8) { wOff = objw.Uint8(x, wOff, o) } + + // Write one non-aggrgate arg/field/element if there is room. + // Returns whether to continue. + write1 := func(sz, offset int64) bool { + if n >= limit { + return false + } + if offset >= _special { + writebyte(_offsetTooLarge) + } else { + writebyte(uint8(offset)) + writebyte(uint8(sz)) + } + n++ + return true + } + + // Visit t recursively and write it out. + // Returns whether to continue visiting. + var visitType func(baseOffset int64, t *types.Type, depth int) bool + visitType = func(baseOffset int64, t *types.Type, depth int) bool { + if n >= limit { + return false + } + if !isAggregate(t) { + return write1(t.Size(), baseOffset) + } + writebyte(_startAgg) + depth++ + if depth >= maxDepth { + writebyte(_dotdotdot) + writebyte(_endAgg) + n++ + return true + } + var r bool + switch { + case t.IsInterface(), t.IsString(): + r = write1(PtrSize, baseOffset) && + write1(PtrSize, baseOffset+PtrSize) + case t.IsSlice(): + r = write1(PtrSize, baseOffset) && + write1(PtrSize, baseOffset+PtrSize) && + write1(PtrSize, baseOffset+PtrSize*2) + case t.IsComplex(): + r = write1(t.Size()/2, baseOffset) && + write1(t.Size()/2, baseOffset+t.Size()/2) + case t.IsArray(): + r = true + if t.NumElem() == 0 { + n++ // {} counts as a component + break + } + for i := int64(0); i < t.NumElem(); i++ { + if !visitType(baseOffset, t.Elem(), depth) { + r = false + break + } + baseOffset += t.Elem().Size() + } + case t.IsStruct(): + r = true + if t.NumFields() == 0 { + n++ // {} counts as a component + break + } + for _, field := range t.Fields().Slice() { + if !visitType(baseOffset+field.Offset, field.Type, depth) { + r = false + break + } + } + } + if !r { + writebyte(_dotdotdot) + } + writebyte(_endAgg) + return r + } + + c := true + for _, a := range abiInfo.InParams() { + if !c { + writebyte(_dotdotdot) + break + } + c = visitType(a.FrameOffset(abiInfo), a.Type, 0) + } + writebyte(_endSeq) + if wOff > maxLen { + base.Fatalf("ArgInfo too large") + } + + return x +} + // genssa appends entries to pp for each instruction in f. func genssa(f *ssa.Func, pp *objw.Progs) { var s State @@ -6367,7 +6743,8 @@ func genssa(f *ssa.Func, pp *objw.Progs) { e := f.Frontend().(*ssafn) - s.livenessMap = liveness.Compute(e.curfn, f, e.stkptrsize, pp) + s.livenessMap, s.partLiveArgs = liveness.Compute(e.curfn, f, e.stkptrsize, pp) + emitArgInfo(e, f, pp) openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo if openDeferInfo != nil { @@ -6393,8 +6770,6 @@ func genssa(f *ssa.Func, pp *objw.Progs) { progToBlock[s.pp.Next] = f.Blocks[0] } - s.ScratchFpMem = e.scratchFpMem - if base.Ctxt.Flag_locationlists { if cap(f.Cache.ValueToProgAfter) < f.NumValues() { f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues()) @@ -6445,6 +6820,10 @@ func genssa(f *ssa.Func, pp *objw.Progs) { x := s.pp.Next s.DebugFriendlySetPosFrom(v) + if v.Op.ResultInArg0() && v.ResultReg() != v.Args[0].Reg() { + v.Fatalf("input[0] and output not in same register %s", v.LongString()) + } + switch v.Op { case ssa.OpInitMem: // memory arg needs no code @@ -6452,7 +6831,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) { // input args need no code case ssa.OpSP, ssa.OpSB: // nothing to do - case ssa.OpSelect0, ssa.OpSelect1: + case ssa.OpSelect0, ssa.OpSelect1, ssa.OpSelectN, ssa.OpMakeResult: // nothing to do case ssa.OpGetG: // nothing to do when there's a g register, @@ -6478,15 +6857,15 @@ func genssa(f *ssa.Func, pp *objw.Progs) { inlMarksByPos[pos] = append(inlMarksByPos[pos], p) default: + // Special case for first line in function; move it to the start (which cannot be a register-valued instruction) + if firstPos != src.NoXPos && v.Op != ssa.OpArgIntReg && v.Op != ssa.OpArgFloatReg && v.Op != ssa.OpLoadReg && v.Op != ssa.OpStoreReg { + s.SetPos(firstPos) + firstPos = src.NoXPos + } // Attach this safe point to the next // instruction. s.pp.NextLive = s.livenessMap.Get(v) - // Special case for first line in function; move it to the start. - if firstPos != src.NoXPos { - s.SetPos(firstPos) - firstPos = src.NoXPos - } // let the backend handle it Arch.SSAGenValue(&s, v) } @@ -6543,7 +6922,20 @@ func genssa(f *ssa.Func, pp *objw.Progs) { // deferreturn and a return. This will be used to during panic // recovery to unwind the stack and return back to the runtime. s.pp.NextLive = s.livenessMap.DeferReturn - gencallret(pp, ir.Syms.Deferreturn) + p := pp.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ir.Syms.Deferreturn + + // Load results into registers. So when a deferred function + // recovers a panic, it will return to caller with right results. + // The results are already in memory, because they are not SSA'd + // when the function has defers (see canSSAName). + if f.OwnAux.ABIInfo().OutRegistersUsed() != 0 { + Arch.LoadRegResults(&s, f) + } + + pp.Prog(obj.ARET) } if inlMarks != nil { @@ -6592,9 +6984,18 @@ func genssa(f *ssa.Func, pp *objw.Progs) { } if base.Ctxt.Flag_locationlists { - debugInfo := ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset) + var debugInfo *ssa.FuncDebug + if e.curfn.ABI == obj.ABIInternal && base.Flag.N != 0 { + debugInfo = ssa.BuildFuncDebugNoOptimized(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset) + } else { + debugInfo = ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset) + } e.curfn.DebugInfo = debugInfo bstart := s.bstart + idToIdx := make([]int, f.NumBlocks()) + for i, b := range f.Blocks { + idToIdx[b.ID] = i + } // Note that at this moment, Prog.Pc is a sequence number; it's // not a real PC until after assembly, so this mapping has to // be done later. @@ -6607,6 +7008,10 @@ func genssa(f *ssa.Func, pp *objw.Progs) { } return bstart[b].Pc case ssa.BlockEnd.ID: + blk := f.Blocks[idToIdx[b]] + nv := len(blk.Values) + return valueToProgAfter[blk.Values[nv-1].ID].Pc + case ssa.FuncEnd.ID: return e.curfn.LSym.Size default: return valueToProgAfter[v].Pc @@ -6675,13 +7080,13 @@ func genssa(f *ssa.Func, pp *objw.Progs) { f.HTMLWriter.WriteColumn("genssa", "genssa", "ssa-prog", buf.String()) } - defframe(&s, e) + defframe(&s, e, f) f.HTMLWriter.Close() f.HTMLWriter = nil } -func defframe(s *State, e *ssafn) { +func defframe(s *State, e *ssafn, f *ssa.Func) { pp := s.pp frame := types.Rnd(s.maxarg+e.stksize, int64(types.RegSize)) @@ -6691,20 +7096,73 @@ func defframe(s *State, e *ssafn) { // Fill in argument and frame size. pp.Text.To.Type = obj.TYPE_TEXTSIZE - pp.Text.To.Val = int32(types.Rnd(e.curfn.Type().ArgWidth(), int64(types.RegSize))) + pp.Text.To.Val = int32(types.Rnd(f.OwnAux.ArgWidth(), int64(types.RegSize))) pp.Text.To.Offset = frame + p := pp.Text + + // Insert code to spill argument registers if the named slot may be partially + // live. That is, the named slot is considered live by liveness analysis, + // (because a part of it is live), but we may not spill all parts into the + // slot. This can only happen with aggregate-typed arguments that are SSA-able + // and not address-taken (for non-SSA-able or address-taken arguments we always + // spill upfront). + // Note: spilling is unnecessary in the -N/no-optimize case, since all values + // will be considered non-SSAable and spilled up front. + // TODO(register args) Make liveness more fine-grained to that partial spilling is okay. + if f.OwnAux.ABIInfo().InRegistersUsed() != 0 && base.Flag.N == 0 { + // First, see if it is already spilled before it may be live. Look for a spill + // in the entry block up to the first safepoint. + type nameOff struct { + n *ir.Name + off int64 + } + partLiveArgsSpilled := make(map[nameOff]bool) + for _, v := range f.Entry.Values { + if v.Op.IsCall() { + break + } + if v.Op != ssa.OpStoreReg || v.Args[0].Op != ssa.OpArgIntReg { + continue + } + n, off := ssa.AutoVar(v) + if n.Class != ir.PPARAM || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] { + continue + } + partLiveArgsSpilled[nameOff{n, off}] = true + } + + // Then, insert code to spill registers if not already. + for _, a := range f.OwnAux.ABIInfo().InParams() { + n, ok := a.Name.(*ir.Name) + if !ok || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] || len(a.Registers) <= 1 { + continue + } + rts, offs := a.RegisterTypesAndOffsets() + for i := range a.Registers { + if !rts[i].HasPointers() { + continue + } + if partLiveArgsSpilled[nameOff{n, offs[i]}] { + continue // already spilled + } + reg := ssa.ObjRegForAbiReg(a.Registers[i], f.Config) + p = Arch.SpillArgReg(pp, p, f, rts[i], reg, n, offs[i]) + } + } + } + // Insert code to zero ambiguously live variables so that the // garbage collector only sees initialized values when it // looks for pointers. - p := pp.Text var lo, hi int64 // Opaque state for backend to use. Current backends use it to // keep track of which helper registers have been zeroed. var state uint32 - // Iterate through declarations. They are sorted in decreasing Xoffset order. + // Iterate through declarations. Autos are sorted in decreasing + // frame offset order. for _, n := range e.curfn.Dcl { if !n.Needzero() { continue @@ -6794,14 +7252,18 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { a.Name = obj.NAME_EXTERN a.Sym = n case *ir.Name: - if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT { + if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) { a.Name = obj.NAME_PARAM a.Sym = ir.Orig(n).(*ir.Name).Linksym() a.Offset += n.FrameOffset() break } a.Name = obj.NAME_AUTO - a.Sym = n.Linksym() + if n.Class == ir.PPARAMOUT { + a.Sym = ir.Orig(n).(*ir.Name).Linksym() + } else { + a.Sym = n.Linksym() + } a.Offset += n.FrameOffset() default: v.Fatalf("aux in %s not implemented %#v", v, v.Aux) @@ -6920,14 +7382,34 @@ func CheckLoweredPhi(v *ssa.Value) { } } -// CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block. +// CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block, +// except for incoming in-register arguments. // The output of LoweredGetClosurePtr is generally hardwired to the correct register. // That register contains the closure pointer on closure entry. func CheckLoweredGetClosurePtr(v *ssa.Value) { entry := v.Block.Func.Entry - if entry != v.Block || entry.Values[0] != v { + if entry != v.Block { base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v) } + for _, w := range entry.Values { + if w == v { + break + } + switch w.Op { + case ssa.OpArgIntReg, ssa.OpArgFloatReg: + // okay + default: + base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v) + } + } +} + +// CheckArgReg ensures that v is in the function's entry block. +func CheckArgReg(v *ssa.Value) { + entry := v.Block.Func.Entry + if entry != v.Block { + base.Fatalf("in %s, badly placed ArgIReg or ArgFReg: %v %v", v.Block.Func.Name, v.Block, v) + } } func AddrAuto(a *obj.Addr, v *ssa.Value) { @@ -6936,24 +7418,13 @@ func AddrAuto(a *obj.Addr, v *ssa.Value) { a.Sym = n.Linksym() a.Reg = int16(Arch.REGSP) a.Offset = n.FrameOffset() + off - if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT { + if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) { a.Name = obj.NAME_PARAM } else { a.Name = obj.NAME_AUTO } } -func (s *State) AddrScratch(a *obj.Addr) { - if s.ScratchFpMem == nil { - panic("no scratch memory available; forgot to declare usesScratch for Op?") - } - a.Type = obj.TYPE_MEM - a.Name = obj.NAME_AUTO - a.Sym = s.ScratchFpMem.Linksym() - a.Reg = int16(Arch.REGSP) - a.Offset = s.ScratchFpMem.Offset_ -} - // Call returns a new CALL instruction for the SSA value v. // It uses PrepareCall to prepare the call. func (s *State) Call(v *ssa.Value) *obj.Prog { @@ -7056,12 +7527,11 @@ func fieldIdx(n *ir.SelectorExpr) int { // ssafn holds frontend information about a function that the backend is processing. // It also exports a bunch of compiler services for the ssa backend. type ssafn struct { - curfn *ir.Func - strings map[string]*obj.LSym // map from constant string to data symbols - scratchFpMem *ir.Name // temp for floating point register / memory moves on some architectures - stksize int64 // stack size for current frame - stkptrsize int64 // prefix of stack containing pointers - log bool // print ssa debug to the stdout + curfn *ir.Func + strings map[string]*obj.LSym // map from constant string to data symbols + stksize int64 // stack size for current frame + stkptrsize int64 // prefix of stack containing pointers + log bool // print ssa debug to the stdout } // StringData returns a symbol which @@ -7082,82 +7552,6 @@ func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Name { return typecheck.TempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list } -func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { - ptrType := types.NewPtr(types.Types[types.TUINT8]) - lenType := types.Types[types.TINT] - // Split this string up into two separate variables. - p := e.SplitSlot(&name, ".ptr", 0, ptrType) - l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType) - return p, l -} - -func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { - n := name.N - u := types.Types[types.TUINTPTR] - t := types.NewPtr(types.Types[types.TUINT8]) - // Split this interface up into two separate variables. - f := ".itab" - if n.Type().IsEmptyInterface() { - f = ".type" - } - c := e.SplitSlot(&name, f, 0, u) // see comment in typebits.Set - d := e.SplitSlot(&name, ".data", u.Size(), t) - return c, d -} - -func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) { - ptrType := types.NewPtr(name.Type.Elem()) - lenType := types.Types[types.TINT] - p := e.SplitSlot(&name, ".ptr", 0, ptrType) - l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType) - c := e.SplitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType) - return p, l, c -} - -func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { - s := name.Type.Size() / 2 - var t *types.Type - if s == 8 { - t = types.Types[types.TFLOAT64] - } else { - t = types.Types[types.TFLOAT32] - } - r := e.SplitSlot(&name, ".real", 0, t) - i := e.SplitSlot(&name, ".imag", t.Size(), t) - return r, i -} - -func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { - var t *types.Type - if name.Type.IsSigned() { - t = types.Types[types.TINT32] - } else { - t = types.Types[types.TUINT32] - } - if Arch.LinkArch.ByteOrder == binary.BigEndian { - return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[types.TUINT32]) - } - return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[types.TUINT32]) -} - -func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot { - st := name.Type - // Note: the _ field may appear several times. But - // have no fear, identically-named but distinct Autos are - // ok, albeit maybe confusing for a debugger. - return e.SplitSlot(&name, "."+st.FieldName(i), st.FieldOff(i), st.FieldType(i)) -} - -func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot { - n := name.N - at := name.Type - if at.NumElem() != 1 { - e.Fatalf(n.Pos(), "bad array size") - } - et := at.Elem() - return e.SplitSlot(&name, "[0]", 0, et) -} - func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym { return reflectdata.ITabSym(it, offset) } @@ -7265,44 +7659,17 @@ func clobberBase(n ir.Node) ir.Node { return n } -// callTargetLSym determines the correct LSym for 'callee' when called -// from function 'caller'. There are a couple of different scenarios -// to contend with here: -// -// 1. if 'caller' is an ABI wrapper, then we always want to use the -// LSym from the Func for the callee. -// -// 2. if 'caller' is not an ABI wrapper, then we looked at the callee -// to see if it corresponds to a "known" ABI0 symbol (e.g. assembly -// routine defined in the current package); if so, we want the call to -// directly target the ABI0 symbol (effectively bypassing the -// ABIInternal->ABI0 wrapper for 'callee'). -// -// 3. in all other cases, want the regular ABIInternal linksym -// -func callTargetLSym(callee *ir.Name, callerLSym *obj.LSym) *obj.LSym { - lsym := callee.Linksym() - if !base.Flag.ABIWrap { - return lsym - } - fn := callee.Func - if fn == nil { - return lsym +// callTargetLSym returns the correct LSym to call 'callee' using its ABI. +func callTargetLSym(callee *ir.Name) *obj.LSym { + if callee.Func == nil { + // TODO(austin): This happens in a few cases of + // compiler-generated functions. These are all + // ABIInternal. It would be better if callee.Func was + // never nil and we didn't need this case. + return callee.Linksym() } - // check for case 1 above - if callerLSym.ABIWrapper() { - if nlsym := fn.LSym; nlsym != nil { - lsym = nlsym - } - } else { - // check for case 2 above - defABI, hasDefABI := symabiDefs[lsym.Name] - if hasDefABI && defABI == obj.ABI0 { - lsym = callee.LinksymABI(obj.ABI0) - } - } - return lsym + return callee.LinksymABI(callee.Func.ABI) } func min8(a, b int8) int8 { @@ -7360,6 +7727,19 @@ func deferstruct(stksize int64) *types.Type { return s } +// SlotAddr uses LocalSlot information to initialize an obj.Addr +// The resulting addr is used in a non-standard context -- in the prologue +// of a function, before the frame has been constructed, so the standard +// addressing for the parameters will be wrong. +func SpillSlotAddr(spill ssa.Spill, baseReg int16, extraOffset int64) obj.Addr { + return obj.Addr{ + Name: obj.NAME_NONE, + Type: obj.TYPE_MEM, + Reg: baseReg, + Offset: spill.Offset + extraOffset, + } +} + var ( BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym diff --git a/src/cmd/compile/internal/staticdata/data.go b/src/cmd/compile/internal/staticdata/data.go index b06fd7aa4b2..abb0bba646e 100644 --- a/src/cmd/compile/internal/staticdata/data.go +++ b/src/cmd/compile/internal/staticdata/data.go @@ -8,6 +8,7 @@ import ( "crypto/sha256" "fmt" "go/constant" + "internal/buildcfg" "io" "io/ioutil" "os" @@ -214,11 +215,16 @@ func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int var ( funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym) - funcsyms []*types.Sym + funcsyms []*ir.Name // functions that need function value symbols ) -// FuncSym returns s·f. -func FuncSym(s *types.Sym) *types.Sym { +// FuncLinksym returns n·f, the function value symbol for n. +func FuncLinksym(n *ir.Name) *obj.LSym { + if n.Op() != ir.ONAME || n.Class != ir.PFUNC { + base.Fatalf("expected func name: %v", n) + } + s := n.Sym() + // funcsymsmu here serves to protect not just mutations of funcsyms (below), // but also the package lookup of the func sym name, // since this function gets called concurrently from the backend. @@ -235,17 +241,11 @@ func FuncSym(s *types.Sym) *types.Sym { // symbols will be created explicitly with NeedFuncSym. // See the NeedFuncSym comment for details. if !base.Ctxt.Flag_dynlink && !existed { - funcsyms = append(funcsyms, s) + funcsyms = append(funcsyms, n) } funcsymsmu.Unlock() - return sf -} -func FuncLinksym(n *ir.Name) *obj.LSym { - if n.Op() != ir.ONAME || n.Class != ir.PFUNC { - base.Fatalf("expected func name: %v", n) - } - return FuncSym(n.Sym()).Linksym() + return sf.Linksym() } func GlobalLinksym(n *ir.Name) *obj.LSym { @@ -255,43 +255,62 @@ func GlobalLinksym(n *ir.Name) *obj.LSym { return n.Linksym() } -// NeedFuncSym ensures that s·f is exported, if needed. +// NeedFuncSym ensures that fn·f is exported, if needed. // It is only used with -dynlink. // When not compiling for dynamic linking, // the funcsyms are created as needed by // the packages that use them. -// Normally we emit the s·f stubs as DUPOK syms, +// Normally we emit the fn·f stubs as DUPOK syms, // but DUPOK doesn't work across shared library boundaries. // So instead, when dynamic linking, we only create -// the s·f stubs in s's package. -func NeedFuncSym(s *types.Sym) { +// the fn·f stubs in fn's package. +func NeedFuncSym(fn *ir.Func) { if base.Ctxt.InParallel { // The append below probably just needs to lock // funcsymsmu, like in FuncSym. base.Fatalf("NeedFuncSym must be called in serial") } + if fn.ABI != obj.ABIInternal && buildcfg.Experiment.RegabiWrappers { + // Function values must always reference ABIInternal + // entry points, so it doesn't make sense to create a + // funcsym for other ABIs. + // + // (If we're using ABI aliases, it doesn't matter.) + base.Fatalf("expected ABIInternal: %v has %v", fn.Nname, fn.ABI) + } + if ir.IsBlank(fn.Nname) { + // Blank functions aren't unique, so we can't make a + // funcsym for them. + base.Fatalf("NeedFuncSym called for _") + } if !base.Ctxt.Flag_dynlink { return } - if s.IsBlank() { + s := fn.Nname.Sym() + if base.Flag.CompilingRuntime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") || + (base.Ctxt.Pkgpath == "internal/abi" && (s.Name == "FuncPCABI0" || s.Name == "FuncPCABIInternal")) { + // runtime.getg(), getclosureptr(), getcallerpc(), getcallersp(), + // and internal/abi.FuncPCABIxxx() are not real functions and so + // do not get funcsyms. return } - if base.Flag.CompilingRuntime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") { - // runtime.getg(), getclosureptr(), getcallerpc(), and - // getcallersp() are not real functions and so do not - // get funcsyms. - return - } - funcsyms = append(funcsyms, s) + funcsyms = append(funcsyms, fn.Nname) } func WriteFuncSyms() { sort.Slice(funcsyms, func(i, j int) bool { return funcsyms[i].Linksym().Name < funcsyms[j].Linksym().Name }) - for _, s := range funcsyms { + for _, nam := range funcsyms { + s := nam.Sym() sf := s.Pkg.Lookup(ir.FuncSymName(s)).Linksym() - objw.SymPtr(sf, 0, s.Linksym(), 0) + // Function values must always reference ABIInternal + // entry points. + target := s.Linksym() + if target.ABI() != obj.ABIInternal { + base.Fatalf("expected ABIInternal: %v has %v", target, target.ABI()) + } + objw.SymPtr(sf, 0, target, 0) objw.Global(sf, int32(types.PtrSize), obj.DUPOK|obj.RODATA) } } diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go index f3ad82e7b60..0c97b6de747 100644 --- a/src/cmd/compile/internal/staticinit/sched.go +++ b/src/cmd/compile/internal/staticinit/sched.go @@ -459,7 +459,6 @@ func StaticName(t *types.Type) *ir.Name { statuniqgen++ typecheck.Declare(n, ir.PEXTERN) n.SetType(t) - n.Linksym().Set(obj.AttrLocal, true) return n } diff --git a/src/cmd/compile/internal/syntax/error_test.go b/src/cmd/compile/internal/syntax/error_test.go index 919667f1d31..e4bedf54fdc 100644 --- a/src/cmd/compile/internal/syntax/error_test.go +++ b/src/cmd/compile/internal/syntax/error_test.go @@ -164,7 +164,7 @@ func testSyntaxErrors(t *testing.T, filename string) { // we have a match - eliminate this error delete(declared, pos) } else { - t.Errorf("%s: unexpected error: %s", orig, e.Msg) + t.Errorf("%s:%s: unexpected error: %s", filename, orig, e.Msg) } }, nil, mode) @@ -175,7 +175,7 @@ func testSyntaxErrors(t *testing.T, filename string) { // report expected but not reported errors for pos, pattern := range declared { - t.Errorf("%s: missing error: %s", pos, pattern) + t.Errorf("%s:%s: missing error: %s", filename, pos, pattern) } } diff --git a/src/cmd/compile/internal/syntax/operator_string.go b/src/cmd/compile/internal/syntax/operator_string.go index a7cd40fb135..f045d8c5524 100644 --- a/src/cmd/compile/internal/syntax/operator_string.go +++ b/src/cmd/compile/internal/syntax/operator_string.go @@ -11,30 +11,31 @@ func _() { _ = x[Def-1] _ = x[Not-2] _ = x[Recv-3] - _ = x[OrOr-4] - _ = x[AndAnd-5] - _ = x[Eql-6] - _ = x[Neq-7] - _ = x[Lss-8] - _ = x[Leq-9] - _ = x[Gtr-10] - _ = x[Geq-11] - _ = x[Add-12] - _ = x[Sub-13] - _ = x[Or-14] - _ = x[Xor-15] - _ = x[Mul-16] - _ = x[Div-17] - _ = x[Rem-18] - _ = x[And-19] - _ = x[AndNot-20] - _ = x[Shl-21] - _ = x[Shr-22] + _ = x[Tilde-4] + _ = x[OrOr-5] + _ = x[AndAnd-6] + _ = x[Eql-7] + _ = x[Neq-8] + _ = x[Lss-9] + _ = x[Leq-10] + _ = x[Gtr-11] + _ = x[Geq-12] + _ = x[Add-13] + _ = x[Sub-14] + _ = x[Or-15] + _ = x[Xor-16] + _ = x[Mul-17] + _ = x[Div-18] + _ = x[Rem-19] + _ = x[And-20] + _ = x[AndNot-21] + _ = x[Shl-22] + _ = x[Shr-23] } -const _Operator_name = ":!<-||&&==!=<<=>>=+-|^*/%&&^<<>>" +const _Operator_name = ":!<-~||&&==!=<<=>>=+-|^*/%&&^<<>>" -var _Operator_index = [...]uint8{0, 1, 2, 4, 6, 8, 10, 12, 13, 15, 16, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 30, 32} +var _Operator_index = [...]uint8{0, 1, 2, 4, 5, 7, 9, 11, 13, 14, 16, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 31, 33} func (i Operator) String() string { i -= 1 diff --git a/src/cmd/compile/internal/syntax/parser.go b/src/cmd/compile/internal/syntax/parser.go index c4ccbb82cb5..e7b8840b337 100644 --- a/src/cmd/compile/internal/syntax/parser.go +++ b/src/cmd/compile/internal/syntax/parser.go @@ -735,9 +735,9 @@ func (p *parser) binaryExpr(prec int) Expr { t := new(Operation) t.pos = p.pos() t.Op = p.op - t.X = x tprec := p.prec p.next() + t.X = x t.Y = p.binaryExpr(tprec) x = t } @@ -1291,16 +1291,15 @@ func (p *parser) typeInstance(typ Expr) Expr { pos := p.pos() p.want(_Lbrack) - if p.tok == _Rbrack { - p.error("expecting type") - p.next() - return typ - } - x := new(IndexExpr) x.pos = pos x.X = typ - x.Index, _ = p.typeList() + if p.tok == _Rbrack { + p.syntaxError("expecting type") + x.Index = p.badExpr() + } else { + x.Index, _ = p.typeList() + } p.want(_Rbrack) return x } @@ -1381,7 +1380,9 @@ func (p *parser) structType() *StructType { return typ } -// InterfaceType = "interface" "{" { MethodSpec ";" } "}" . +// InterfaceType = "interface" "{" { ( MethodDecl | EmbeddedElem | TypeList ) ";" } "}" . +// TypeList = "type" Type { "," Type } . +// TODO(gri) remove TypeList syntax if we accept #45346 func (p *parser) interfaceType() *InterfaceType { if trace { defer p.trace("interfaceType")() @@ -1395,9 +1396,15 @@ func (p *parser) interfaceType() *InterfaceType { p.list(_Semi, _Rbrace, func() bool { switch p.tok { case _Name: - typ.MethodList = append(typ.MethodList, p.methodDecl()) + f := p.methodDecl() + if f.Name == nil && p.mode&AllowGenerics != 0 { + f = p.embeddedElem(f) + } + typ.MethodList = append(typ.MethodList, f) + return false case _Lparen: + // TODO(gri) Need to decide how to adjust this restriction. p.syntaxError("cannot parenthesize embedded type") f := new(Field) f.pos = p.pos() @@ -1405,10 +1412,17 @@ func (p *parser) interfaceType() *InterfaceType { f.Type = p.qualifiedName(nil) p.want(_Rparen) typ.MethodList = append(typ.MethodList, f) + return false + + case _Operator: + if p.op == Tilde && p.mode&AllowGenerics != 0 { + typ.MethodList = append(typ.MethodList, p.embeddedElem(nil)) + return false + } case _Type: + // TODO(gri) remove TypeList syntax if we accept #45346 if p.mode&AllowGenerics != 0 { - // TODO(gri) factor this better type_ := NewName(p.pos(), "type") // cannot have a method named "type" p.next() if p.tok != _Semi && p.tok != _Rbrace { @@ -1427,19 +1441,18 @@ func (p *parser) interfaceType() *InterfaceType { } else { p.syntaxError("expecting type") } - break - } - fallthrough - - default: - if p.mode&AllowGenerics != 0 { - p.syntaxError("expecting method, interface name, or type list") - p.advance(_Semi, _Rbrace, _Type) - } else { - p.syntaxError("expecting method or interface name") - p.advance(_Semi, _Rbrace) + return false } } + + if p.mode&AllowGenerics != 0 { + p.syntaxError("expecting method, type list, or embedded element") + p.advance(_Semi, _Rbrace, _Type) // TODO(gri) remove _Type if we don't accept it anymore + return false + } + + p.syntaxError("expecting method or interface name") + p.advance(_Semi, _Rbrace) return false }) @@ -1732,6 +1745,56 @@ func (p *parser) methodDecl() *Field { return f } +// EmbeddedElem = MethodSpec | EmbeddedTerm { "|" EmbeddedTerm } . +func (p *parser) embeddedElem(f *Field) *Field { + if trace { + defer p.trace("embeddedElem")() + } + + if f == nil { + f = new(Field) + f.pos = p.pos() + f.Type = p.embeddedTerm() + } + + for p.tok == _Operator && p.op == Or { + t := new(Operation) + t.pos = p.pos() + t.Op = Or + p.next() + t.X = f.Type + t.Y = p.embeddedTerm() + f.Type = t + } + + return f +} + +// EmbeddedTerm = [ "~" ] Type . +func (p *parser) embeddedTerm() Expr { + if trace { + defer p.trace("embeddedTerm")() + } + + if p.tok == _Operator && p.op == Tilde { + t := new(Operation) + t.pos = p.pos() + t.Op = Tilde + p.next() + t.X = p.type_() + return t + } + + t := p.typeOrNil() + if t == nil { + t = p.badExpr() + p.syntaxError("expecting ~ term or type") + p.advance(_Operator, _Semi, _Rparen, _Rbrack, _Rbrace) + } + + return t +} + // ParameterDecl = [ IdentifierList ] [ "..." ] Type . func (p *parser) paramDeclOrNil(name *Name) *Field { if trace { @@ -1772,7 +1835,7 @@ func (p *parser) paramDeclOrNil(name *Name) *Field { t.Elem = p.typeOrNil() if t.Elem == nil { t.Elem = p.badExpr() - p.syntaxError("final argument in variadic function missing type") + p.syntaxError("... is missing type") } f.Type = t return f diff --git a/src/cmd/compile/internal/syntax/pos.go b/src/cmd/compile/internal/syntax/pos.go index 99734d42d80..baebcc995c7 100644 --- a/src/cmd/compile/internal/syntax/pos.go +++ b/src/cmd/compile/internal/syntax/pos.go @@ -59,6 +59,45 @@ func (pos Pos) RelCol() uint { return pos.Col() } +// Cmp compares the positions p and q and returns a result r as follows: +// +// r < 0: p is before q +// r == 0: p and q are the same position (but may not be identical) +// r > 0: p is after q +// +// If p and q are in different files, p is before q if the filename +// of p sorts lexicographically before the filename of q. +func (p Pos) Cmp(q Pos) int { + pname := p.RelFilename() + qname := q.RelFilename() + switch { + case pname < qname: + return -1 + case pname > qname: + return +1 + } + + pline := p.Line() + qline := q.Line() + switch { + case pline < qline: + return -1 + case pline > qline: + return +1 + } + + pcol := p.Col() + qcol := q.Col() + switch { + case pcol < qcol: + return -1 + case pcol > qcol: + return +1 + } + + return 0 +} + func (pos Pos) String() string { rel := position_{pos.RelFilename(), pos.RelLine(), pos.RelCol()} abs := position_{pos.Base().Pos().RelFilename(), pos.Line(), pos.Col()} diff --git a/src/cmd/compile/internal/types2/pos.go b/src/cmd/compile/internal/syntax/positions.go similarity index 52% rename from src/cmd/compile/internal/types2/pos.go rename to src/cmd/compile/internal/syntax/positions.go index 955bb2ad080..b00f86c67cd 100644 --- a/src/cmd/compile/internal/types2/pos.go +++ b/src/cmd/compile/internal/syntax/positions.go @@ -1,16 +1,13 @@ -// UNREVIEWED -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // This file implements helper functions for scope position computations. -package types2 +package syntax -import "cmd/compile/internal/syntax" - -// startPos returns the start position of n. -func startPos(n syntax.Node) syntax.Pos { +// StartPos returns the start position of n. +func StartPos(n Node) Pos { // Cases for nodes which don't need a correction are commented out. for m := n; ; { switch n := m.(type) { @@ -18,95 +15,95 @@ func startPos(n syntax.Node) syntax.Pos { panic("internal error: nil") // packages - case *syntax.File: + case *File: // file block starts at the beginning of the file - return syntax.MakePos(n.Pos().Base(), 1, 1) + return MakePos(n.Pos().Base(), 1, 1) // declarations - // case *syntax.ImportDecl: - // case *syntax.ConstDecl: - // case *syntax.TypeDecl: - // case *syntax.VarDecl: - // case *syntax.FuncDecl: + // case *ImportDecl: + // case *ConstDecl: + // case *TypeDecl: + // case *VarDecl: + // case *FuncDecl: // expressions - // case *syntax.BadExpr: - // case *syntax.Name: - // case *syntax.BasicLit: - case *syntax.CompositeLit: + // case *BadExpr: + // case *Name: + // case *BasicLit: + case *CompositeLit: if n.Type != nil { m = n.Type continue } return n.Pos() - // case *syntax.KeyValueExpr: - // case *syntax.FuncLit: - // case *syntax.ParenExpr: - case *syntax.SelectorExpr: + // case *KeyValueExpr: + // case *FuncLit: + // case *ParenExpr: + case *SelectorExpr: m = n.X - case *syntax.IndexExpr: + case *IndexExpr: m = n.X - // case *syntax.SliceExpr: - case *syntax.AssertExpr: + // case *SliceExpr: + case *AssertExpr: m = n.X - case *syntax.TypeSwitchGuard: + case *TypeSwitchGuard: if n.Lhs != nil { m = n.Lhs continue } m = n.X - case *syntax.Operation: + case *Operation: if n.Y != nil { m = n.X continue } return n.Pos() - case *syntax.CallExpr: + case *CallExpr: m = n.Fun - case *syntax.ListExpr: + case *ListExpr: if len(n.ElemList) > 0 { m = n.ElemList[0] continue } return n.Pos() // types - // case *syntax.ArrayType: - // case *syntax.SliceType: - // case *syntax.DotsType: - // case *syntax.StructType: - // case *syntax.Field: - // case *syntax.InterfaceType: - // case *syntax.FuncType: - // case *syntax.MapType: - // case *syntax.ChanType: + // case *ArrayType: + // case *SliceType: + // case *DotsType: + // case *StructType: + // case *Field: + // case *InterfaceType: + // case *FuncType: + // case *MapType: + // case *ChanType: // statements - // case *syntax.EmptyStmt: - // case *syntax.LabeledStmt: - // case *syntax.BlockStmt: - // case *syntax.ExprStmt: - case *syntax.SendStmt: + // case *EmptyStmt: + // case *LabeledStmt: + // case *BlockStmt: + // case *ExprStmt: + case *SendStmt: m = n.Chan - // case *syntax.DeclStmt: - case *syntax.AssignStmt: + // case *DeclStmt: + case *AssignStmt: m = n.Lhs - // case *syntax.BranchStmt: - // case *syntax.CallStmt: - // case *syntax.ReturnStmt: - // case *syntax.IfStmt: - // case *syntax.ForStmt: - // case *syntax.SwitchStmt: - // case *syntax.SelectStmt: + // case *BranchStmt: + // case *CallStmt: + // case *ReturnStmt: + // case *IfStmt: + // case *ForStmt: + // case *SwitchStmt: + // case *SelectStmt: // helper nodes - case *syntax.RangeClause: + case *RangeClause: if n.Lhs != nil { m = n.Lhs continue } m = n.X - // case *syntax.CaseClause: - // case *syntax.CommClause: + // case *CaseClause: + // case *CommClause: default: return n.Pos() @@ -114,30 +111,29 @@ func startPos(n syntax.Node) syntax.Pos { } } -// endPos returns the approximate end position of n in the source. -// For some nodes (*syntax.Name, *syntax.BasicLit) it returns -// the position immediately following the node; for others -// (*syntax.BlockStmt, *syntax.SwitchStmt, etc.) it returns -// the position of the closing '}'; and for some (*syntax.ParenExpr) +// EndPos returns the approximate end position of n in the source. +// For some nodes (*Name, *BasicLit) it returns the position immediately +// following the node; for others (*BlockStmt, *SwitchStmt, etc.) it +// returns the position of the closing '}'; and for some (*ParenExpr) // the returned position is the end position of the last enclosed // expression. -// Thus, endPos should not be used for exact demarcation of the +// Thus, EndPos should not be used for exact demarcation of the // end of a node in the source; it is mostly useful to determine // scope ranges where there is some leeway. -func endPos(n syntax.Node) syntax.Pos { +func EndPos(n Node) Pos { for m := n; ; { switch n := m.(type) { case nil: panic("internal error: nil") // packages - case *syntax.File: + case *File: return n.EOF // declarations - case *syntax.ImportDecl: + case *ImportDecl: m = n.Path - case *syntax.ConstDecl: + case *ConstDecl: if n.Values != nil { m = n.Values continue @@ -151,9 +147,9 @@ func endPos(n syntax.Node) syntax.Pos { continue } return n.Pos() - case *syntax.TypeDecl: + case *TypeDecl: m = n.Type - case *syntax.VarDecl: + case *VarDecl: if n.Values != nil { m = n.Values continue @@ -167,7 +163,7 @@ func endPos(n syntax.Node) syntax.Pos { continue } return n.Pos() - case *syntax.FuncDecl: + case *FuncDecl: if n.Body != nil { m = n.Body continue @@ -175,27 +171,27 @@ func endPos(n syntax.Node) syntax.Pos { m = n.Type // expressions - case *syntax.BadExpr: + case *BadExpr: return n.Pos() - case *syntax.Name: + case *Name: p := n.Pos() - return syntax.MakePos(p.Base(), p.Line(), p.Col()+uint(len(n.Value))) - case *syntax.BasicLit: + return MakePos(p.Base(), p.Line(), p.Col()+uint(len(n.Value))) + case *BasicLit: p := n.Pos() - return syntax.MakePos(p.Base(), p.Line(), p.Col()+uint(len(n.Value))) - case *syntax.CompositeLit: + return MakePos(p.Base(), p.Line(), p.Col()+uint(len(n.Value))) + case *CompositeLit: return n.Rbrace - case *syntax.KeyValueExpr: + case *KeyValueExpr: m = n.Value - case *syntax.FuncLit: + case *FuncLit: m = n.Body - case *syntax.ParenExpr: + case *ParenExpr: m = n.X - case *syntax.SelectorExpr: + case *SelectorExpr: m = n.Sel - case *syntax.IndexExpr: + case *IndexExpr: m = n.Index - case *syntax.SliceExpr: + case *SliceExpr: for i := len(n.Index) - 1; i >= 0; i-- { if x := n.Index[i]; x != nil { m = x @@ -203,23 +199,23 @@ func endPos(n syntax.Node) syntax.Pos { } } m = n.X - case *syntax.AssertExpr: + case *AssertExpr: m = n.Type - case *syntax.TypeSwitchGuard: + case *TypeSwitchGuard: m = n.X - case *syntax.Operation: + case *Operation: if n.Y != nil { m = n.Y continue } m = n.X - case *syntax.CallExpr: + case *CallExpr: if l := lastExpr(n.ArgList); l != nil { m = l continue } m = n.Fun - case *syntax.ListExpr: + case *ListExpr: if l := lastExpr(n.ElemList); l != nil { m = l continue @@ -227,32 +223,32 @@ func endPos(n syntax.Node) syntax.Pos { return n.Pos() // types - case *syntax.ArrayType: + case *ArrayType: m = n.Elem - case *syntax.SliceType: + case *SliceType: m = n.Elem - case *syntax.DotsType: + case *DotsType: m = n.Elem - case *syntax.StructType: + case *StructType: if l := lastField(n.FieldList); l != nil { m = l continue } return n.Pos() // TODO(gri) need to take TagList into account - case *syntax.Field: + case *Field: if n.Type != nil { m = n.Type continue } m = n.Name - case *syntax.InterfaceType: + case *InterfaceType: if l := lastField(n.MethodList); l != nil { m = l continue } return n.Pos() - case *syntax.FuncType: + case *FuncType: if l := lastField(n.ResultList); l != nil { m = l continue @@ -262,71 +258,71 @@ func endPos(n syntax.Node) syntax.Pos { continue } return n.Pos() - case *syntax.MapType: + case *MapType: m = n.Value - case *syntax.ChanType: + case *ChanType: m = n.Elem // statements - case *syntax.EmptyStmt: + case *EmptyStmt: return n.Pos() - case *syntax.LabeledStmt: + case *LabeledStmt: m = n.Stmt - case *syntax.BlockStmt: + case *BlockStmt: return n.Rbrace - case *syntax.ExprStmt: + case *ExprStmt: m = n.X - case *syntax.SendStmt: + case *SendStmt: m = n.Value - case *syntax.DeclStmt: + case *DeclStmt: if l := lastDecl(n.DeclList); l != nil { m = l continue } return n.Pos() - case *syntax.AssignStmt: + case *AssignStmt: m = n.Rhs if m == nil { - p := endPos(n.Lhs) - return syntax.MakePos(p.Base(), p.Line(), p.Col()+2) + p := EndPos(n.Lhs) + return MakePos(p.Base(), p.Line(), p.Col()+2) } - case *syntax.BranchStmt: + case *BranchStmt: if n.Label != nil { m = n.Label continue } return n.Pos() - case *syntax.CallStmt: + case *CallStmt: m = n.Call - case *syntax.ReturnStmt: + case *ReturnStmt: if n.Results != nil { m = n.Results continue } return n.Pos() - case *syntax.IfStmt: + case *IfStmt: if n.Else != nil { m = n.Else continue } m = n.Then - case *syntax.ForStmt: + case *ForStmt: m = n.Body - case *syntax.SwitchStmt: + case *SwitchStmt: return n.Rbrace - case *syntax.SelectStmt: + case *SelectStmt: return n.Rbrace // helper nodes - case *syntax.RangeClause: + case *RangeClause: m = n.X - case *syntax.CaseClause: + case *CaseClause: if l := lastStmt(n.Body); l != nil { m = l continue } return n.Colon - case *syntax.CommClause: + case *CommClause: if l := lastStmt(n.Body); l != nil { m = l continue @@ -339,28 +335,28 @@ func endPos(n syntax.Node) syntax.Pos { } } -func lastDecl(list []syntax.Decl) syntax.Decl { +func lastDecl(list []Decl) Decl { if l := len(list); l > 0 { return list[l-1] } return nil } -func lastExpr(list []syntax.Expr) syntax.Expr { +func lastExpr(list []Expr) Expr { if l := len(list); l > 0 { return list[l-1] } return nil } -func lastStmt(list []syntax.Stmt) syntax.Stmt { +func lastStmt(list []Stmt) Stmt { if l := len(list); l > 0 { return list[l-1] } return nil } -func lastField(list []*syntax.Field) *syntax.Field { +func lastField(list []*Field) *Field { if l := len(list); l > 0 { return list[l-1] } diff --git a/src/cmd/compile/internal/syntax/printer.go b/src/cmd/compile/internal/syntax/printer.go index 9109ce2363c..e557f5d9247 100644 --- a/src/cmd/compile/internal/syntax/printer.go +++ b/src/cmd/compile/internal/syntax/printer.go @@ -481,10 +481,10 @@ func (p *printer) printRawNode(n Node) { if len(n.FieldList) > 0 { if p.linebreaks { p.print(newline, indent) - p.printFieldList(n.FieldList, n.TagList) + p.printFieldList(n.FieldList, n.TagList, _Semi) p.print(outdent, newline) } else { - p.printFieldList(n.FieldList, n.TagList) + p.printFieldList(n.FieldList, n.TagList, _Semi) } } p.print(_Rbrace) @@ -494,20 +494,40 @@ func (p *printer) printRawNode(n Node) { p.printSignature(n) case *InterfaceType: + // separate type list and method list + var types []Expr + var methods []*Field + for _, f := range n.MethodList { + if f.Name != nil && f.Name.Value == "type" { + types = append(types, f.Type) + } else { + // method or embedded interface + methods = append(methods, f) + } + } + + multiLine := len(n.MethodList) > 0 && p.linebreaks p.print(_Interface) - if len(n.MethodList) > 0 && p.linebreaks { + if multiLine { p.print(blank) } p.print(_Lbrace) - if len(n.MethodList) > 0 { - if p.linebreaks { - p.print(newline, indent) - p.printMethodList(n.MethodList) - p.print(outdent, newline) - } else { - p.printMethodList(n.MethodList) + if multiLine { + p.print(newline, indent) + } + if len(types) > 0 { + p.print(_Type, blank) + p.printExprList(types) + if len(methods) > 0 { + p.print(_Semi, blank) } } + if len(methods) > 0 { + p.printMethodList(methods) + } + if multiLine { + p.print(outdent, newline) + } p.print(_Rbrace) case *MapType: @@ -667,7 +687,13 @@ func (p *printer) printRawNode(n Node) { if n.Group == nil { p.print(_Type, blank) } - p.print(n.Name, blank) + p.print(n.Name) + if n.TParamList != nil { + p.print(_Lbrack) + p.printFieldList(n.TParamList, nil, _Comma) + p.print(_Rbrack) + } + p.print(blank) if n.Alias { p.print(_Assign, blank) } @@ -696,6 +722,11 @@ func (p *printer) printRawNode(n Node) { p.print(_Rparen, blank) } p.print(n.Name) + if n.TParamList != nil { + p.print(_Lbrack) + p.printFieldList(n.TParamList, nil, _Comma) + p.print(_Rbrack) + } p.printSignature(n.Type) if n.Body != nil { p.print(blank, n.Body) @@ -746,14 +777,14 @@ func (p *printer) printFields(fields []*Field, tags []*BasicLit, i, j int) { } } -func (p *printer) printFieldList(fields []*Field, tags []*BasicLit) { +func (p *printer) printFieldList(fields []*Field, tags []*BasicLit, sep token) { i0 := 0 var typ Expr for i, f := range fields { if f.Name == nil || f.Type != typ { if i0 < i { p.printFields(fields, tags, i0, i) - p.print(_Semi, newline) + p.print(sep, newline) i0 = i } typ = f.Type diff --git a/src/cmd/compile/internal/syntax/printer_test.go b/src/cmd/compile/internal/syntax/printer_test.go index bcae815a468..ec4b1de573f 100644 --- a/src/cmd/compile/internal/syntax/printer_test.go +++ b/src/cmd/compile/internal/syntax/printer_test.go @@ -61,6 +61,21 @@ var stringTests = []string{ "package p", "package p; type _ int; type T1 = struct{}; type ( _ *struct{}; T2 = float32 )", + // generic type declarations + "package p; type _[T any] struct{}", + "package p; type _[A, B, C interface{m()}] struct{}", + "package p; type _[T any, A, B, C interface{m()}, X, Y, Z interface{type int}] struct{}", + + // generic function declarations + "package p; func _[T any]()", + "package p; func _[A, B, C interface{m()}]()", + "package p; func _[T any, A, B, C interface{m()}, X, Y, Z interface{type int}]()", + + // methods with generic receiver types + "package p; func (R[T]) _()", + "package p; func (*R[A, B, C]) _()", + "package p; func (_ *R[A, B, C]) _()", + // channels "package p; type _ chan chan int", "package p; type _ chan (<-chan int)", @@ -79,7 +94,7 @@ var stringTests = []string{ func TestPrintString(t *testing.T) { for _, want := range stringTests { - ast, err := Parse(nil, strings.NewReader(want), nil, nil, 0) + ast, err := Parse(nil, strings.NewReader(want), nil, nil, AllowGenerics) if err != nil { t.Error(err) continue @@ -116,6 +131,33 @@ var exprTests = [][2]string{ {"func(x int) complex128 { return 0 }", "func(x int) complex128 {…}"}, {"[]int{1, 2, 3}", "[]int{…}"}, + // type expressions + dup("[1 << 10]byte"), + dup("[]int"), + dup("*int"), + dup("struct{x int}"), + dup("func()"), + dup("func(int, float32) string"), + dup("interface{m()}"), + dup("interface{m() string; n(x int)}"), + dup("interface{type int}"), + dup("interface{type int, float64, string}"), + dup("interface{type int; m()}"), + dup("interface{type int, float64, string; m() string; n(x int)}"), + dup("map[string]int"), + dup("chan E"), + dup("<-chan E"), + dup("chan<- E"), + + // new interfaces + dup("interface{int}"), + dup("interface{~int}"), + dup("interface{~int}"), + dup("interface{int | string}"), + dup("interface{~int | ~string; float64; m()}"), + dup("interface{type a, b, c; ~int | ~string; float64; m()}"), + dup("interface{~T[int, string] | string}"), + // non-type expressions dup("(x)"), dup("x.f"), @@ -172,7 +214,7 @@ var exprTests = [][2]string{ func TestShortString(t *testing.T) { for _, test := range exprTests { src := "package p; var _ = " + test[0] - ast, err := Parse(nil, strings.NewReader(src), nil, nil, 0) + ast, err := Parse(nil, strings.NewReader(src), nil, nil, AllowGenerics) if err != nil { t.Errorf("%s: %s", test[0], err) continue diff --git a/src/cmd/compile/internal/syntax/scanner.go b/src/cmd/compile/internal/syntax/scanner.go index 9fe49659844..218bc24e61f 100644 --- a/src/cmd/compile/internal/syntax/scanner.go +++ b/src/cmd/compile/internal/syntax/scanner.go @@ -343,6 +343,11 @@ redo: s.op, s.prec = Not, 0 s.tok = _Operator + case '~': + s.nextch() + s.op, s.prec = Tilde, 0 + s.tok = _Operator + default: s.errorf("invalid character %#U", s.ch) s.nextch() diff --git a/src/cmd/compile/internal/syntax/scanner_test.go b/src/cmd/compile/internal/syntax/scanner_test.go index 04338629d47..2deb3bbf846 100644 --- a/src/cmd/compile/internal/syntax/scanner_test.go +++ b/src/cmd/compile/internal/syntax/scanner_test.go @@ -232,6 +232,9 @@ var sampleTokens = [...]struct { {_Literal, "`\r`", 0, 0}, // operators + {_Operator, "!", Not, 0}, + {_Operator, "~", Tilde, 0}, + {_Operator, "||", OrOr, precOrOr}, {_Operator, "&&", AndAnd, precAndAnd}, @@ -547,7 +550,7 @@ func TestNumbers(t *testing.T) { t.Errorf("%q: got error but bad not set", test.src) } - // compute lit where where s.lit is not defined + // compute lit where s.lit is not defined var lit string switch s.tok { case _Name, _Literal: @@ -601,7 +604,7 @@ func TestScanErrors(t *testing.T) { {"\U0001d7d8" /* 𝟘 */, "identifier cannot begin with digit U+1D7D8 '𝟘'", 0, 0}, {"foo\U0001d7d8_½" /* foo𝟘_½ */, "invalid character U+00BD '½' in identifier", 0, 8 /* byte offset */}, - {"x + ~y", "invalid character U+007E '~'", 0, 4}, + {"x + #y", "invalid character U+0023 '#'", 0, 4}, {"foo$bar = 0", "invalid character U+0024 '$'", 0, 3}, {"0123456789", "invalid digit '8' in octal literal", 0, 8}, {"0123456789. /* foobar", "comment not terminated", 0, 12}, // valid float constant diff --git a/src/cmd/compile/internal/syntax/testdata/interface.go2 b/src/cmd/compile/internal/syntax/testdata/interface.go2 new file mode 100644 index 00000000000..a817327a43f --- /dev/null +++ b/src/cmd/compile/internal/syntax/testdata/interface.go2 @@ -0,0 +1,36 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains test cases for interfaces containing +// constraint elements. +// +// For now, we accept both ordinary type lists and the +// more complex constraint elements. + +package p + +type _ interface { + m() + type int + type int, string + E +} + +type _ interface { + m() + ~int + int | string + int | ~string + ~int | ~string +} + + +type _ interface { + m() + ~int + T[int, string] | string + int | ~T[string, struct{}] + ~int | ~string + type bool, int, float64 +} diff --git a/src/cmd/compile/internal/syntax/testdata/issue43674.src b/src/cmd/compile/internal/syntax/testdata/issue43674.src new file mode 100644 index 00000000000..51c692ae69f --- /dev/null +++ b/src/cmd/compile/internal/syntax/testdata/issue43674.src @@ -0,0 +1,13 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func _(... /* ERROR [.][.][.] is missing type */ ) +func _(... /* ERROR [.][.][.] is missing type */ , int) + +func _(a, b ... /* ERROR [.][.][.] is missing type */ ) +func _(a, b ... /* ERROR [.][.][.] is missing type */ , x int) + +func _()(... /* ERROR [.][.][.] is missing type */ ) diff --git a/src/cmd/compile/internal/syntax/testing.go b/src/cmd/compile/internal/syntax/testing.go index 3e02dc1c5db..6a97dc0c2a6 100644 --- a/src/cmd/compile/internal/syntax/testing.go +++ b/src/cmd/compile/internal/syntax/testing.go @@ -33,10 +33,10 @@ var errRx = regexp.MustCompile(`^ *ERROR *"?([^"]*)"?`) // for each Error is the position of the token immediately preceding // the comment, the Error message is the message msg extracted from // the comment, with all errors that are on the same line collected -// in a slice. If there is no preceding token (the `ERROR` comment -// appears in the beginning of the file), then the recorded position -// is unknown (line, col = 0, 0). If there are no ERROR comments, the -// result is nil. +// in a slice, in source order. If there is no preceding token (the +// `ERROR` comment appears in the beginning of the file), then the +// recorded position is unknown (line, col = 0, 0). If there are no +// ERROR comments, the result is nil. func ErrorMap(src io.Reader) (errmap map[uint][]Error) { // position of previous token var base *PosBase diff --git a/src/cmd/compile/internal/syntax/tokens.go b/src/cmd/compile/internal/syntax/tokens.go index 2936b6576bc..60eae36ec92 100644 --- a/src/cmd/compile/internal/syntax/tokens.go +++ b/src/cmd/compile/internal/syntax/tokens.go @@ -111,9 +111,10 @@ const ( _ Operator = iota // Def is the : in := - Def // : - Not // ! - Recv // <- + Def // : + Not // ! + Recv // <- + Tilde // ~ // precOrOr OrOr // || diff --git a/src/cmd/compile/internal/test/abiutils_test.go b/src/cmd/compile/internal/test/abiutils_test.go index a0a11671e1e..b752c486126 100644 --- a/src/cmd/compile/internal/test/abiutils_test.go +++ b/src/cmd/compile/internal/test/abiutils_test.go @@ -14,6 +14,7 @@ import ( "cmd/internal/obj" "cmd/internal/obj/x86" "cmd/internal/src" + "fmt" "os" "testing" ) @@ -21,7 +22,7 @@ import ( // AMD64 registers available: // - integer: RAX, RBX, RCX, RDI, RSI, R8, R9, r10, R11 // - floating point: X0 - X14 -var configAMD64 = abi.NewABIConfig(9, 15) +var configAMD64 = abi.NewABIConfig(9, 15, 0) func TestMain(m *testing.M) { ssagen.Arch.LinkArch = &x86.Linkamd64 @@ -55,7 +56,14 @@ func TestABIUtilsBasic1(t *testing.T) { } func TestABIUtilsBasic2(t *testing.T) { - // func(x int32, y float64) (int32, float64, float64) + // func(p1 int8, p2 int16, p3 int32, p4 int64, + // p5 float32, p6 float32, p7 float64, p8 float64, + // p9 int8, p10 int16, p11 int32, p12 int64, + // p13 float32, p14 float32, p15 float64, p16 float64, + // p17 complex128, p18 complex128, p19 complex12, p20 complex128, + // p21 complex64, p22 int8, p23 in16, p24 int32, p25 int64, + // p26 int8, p27 in16, p28 int32, p29 int64) + // (r1 int32, r2 float64, r3 float64) { i8 := types.Types[types.TINT8] i16 := types.Types[types.TINT16] i32 := types.Types[types.TINT32] @@ -114,6 +122,8 @@ func TestABIUtilsBasic2(t *testing.T) { } func TestABIUtilsArrays(t *testing.T) { + // func(p1 [1]int32, p2 [0]int32, p3 [1][1]int32, p4 [2]int32) + // (r1 [2]int32, r2 [1]int32, r3 [0]int32, r4 [1][1]int32) { i32 := types.Types[types.TINT32] ae := types.NewArray(i32, 0) a1 := types.NewArray(i32, 1) @@ -138,6 +148,9 @@ func TestABIUtilsArrays(t *testing.T) { } func TestABIUtilsStruct1(t *testing.T) { + // type s struct { f1 int8; f2 int8; f3 struct {}; f4 int8; f5 int16) } + // func(p1 int6, p2 s, p3 int64) + // (r1 s, r2 int8, r3 int32) { i8 := types.Types[types.TINT8] i16 := types.Types[types.TINT16] i32 := types.Types[types.TINT32] @@ -160,6 +173,10 @@ func TestABIUtilsStruct1(t *testing.T) { } func TestABIUtilsStruct2(t *testing.T) { + // type s struct { f1 int64; f2 struct { } } + // type fs struct { f1 float64; f2 s; f3 struct { } } + // func(p1 s, p2 s, p3 fs) + // (r1 fs, r2 fs) f64 := types.Types[types.TFLOAT64] i64 := types.Types[types.TINT64] s := mkstruct([]*types.Type{i64, mkstruct([]*types.Type{})}) @@ -170,16 +187,62 @@ func TestABIUtilsStruct2(t *testing.T) { exp := makeExpectedDump(` IN 0: R{ I0 } spilloffset: 0 typ: struct { int64; struct {} } IN 1: R{ I1 } spilloffset: 16 typ: struct { int64; struct {} } - IN 2: R{ I2 F0 } spilloffset: 32 typ: struct { float64; struct { int64; struct {} }; struct {} } - OUT 0: R{ I0 F0 } spilloffset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} } - OUT 1: R{ I1 F1 } spilloffset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} } + IN 2: R{ F0 I2 } spilloffset: 32 typ: struct { float64; struct { int64; struct {} }; struct {} } + OUT 0: R{ F0 I0 } spilloffset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} } + OUT 1: R{ F1 I1 } spilloffset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} } offsetToSpillArea: 0 spillAreaSize: 64 `) abitest(t, ft, exp) } +// TestABIUtilsEmptyFieldAtEndOfStruct is testing to make sure +// the abi code is doing the right thing for struct types that have +// a trailing zero-sized field (where the we need to add padding). +func TestABIUtilsEmptyFieldAtEndOfStruct(t *testing.T) { + // type s struct { f1 [2]int64; f2 struct { } } + // type s2 struct { f1 [3]int16; f2 struct { } } + // type fs struct { f1 float64; f s; f3 struct { } } + // func(p1 s, p2 s, p3 fs) (r1 fs, r2 fs) + f64 := types.Types[types.TFLOAT64] + i64 := types.Types[types.TINT64] + i16 := types.Types[types.TINT16] + tb := types.Types[types.TBOOL] + ab2 := types.NewArray(tb, 2) + a2 := types.NewArray(i64, 2) + a3 := types.NewArray(i16, 3) + s := mkstruct([]*types.Type{a2, mkstruct([]*types.Type{})}) + s2 := mkstruct([]*types.Type{a3, mkstruct([]*types.Type{})}) + fs := mkstruct([]*types.Type{f64, s, mkstruct([]*types.Type{})}) + ft := mkFuncType(nil, []*types.Type{s, ab2, s2, fs, fs}, + []*types.Type{fs, ab2, fs}) + + exp := makeExpectedDump(` + IN 0: R{ } offset: 0 typ: struct { [2]int64; struct {} } + IN 1: R{ } offset: 24 typ: [2]bool + IN 2: R{ } offset: 26 typ: struct { [3]int16; struct {} } + IN 3: R{ } offset: 40 typ: struct { float64; struct { [2]int64; struct {} }; struct {} } + IN 4: R{ } offset: 80 typ: struct { float64; struct { [2]int64; struct {} }; struct {} } + OUT 0: R{ } offset: 120 typ: struct { float64; struct { [2]int64; struct {} }; struct {} } + OUT 1: R{ } offset: 160 typ: [2]bool + OUT 2: R{ } offset: 168 typ: struct { float64; struct { [2]int64; struct {} }; struct {} } + offsetToSpillArea: 208 spillAreaSize: 0 +`) + + abitest(t, ft, exp) + + // Check to make sure that NumParamRegs yields 2 and not 3 + // for struct "s" (e.g. that it handles the padding properly). + nps := configAMD64.NumParamRegs(s) + if nps != 2 { + t.Errorf("NumParams(%v) returned %d expected %d\n", + s, nps, 2) + } +} + func TestABIUtilsSliceString(t *testing.T) { + // func(p1 []int32, p2 int8, p3 []int32, p4 int8, p5 string, + // p6 int64, p6 []intr32) (r1 string, r2 int64, r3 string, r4 []int32) i32 := types.Types[types.TINT32] sli32 := types.NewSlice(i32) str := types.New(types.TSTRING) @@ -208,10 +271,12 @@ func TestABIUtilsSliceString(t *testing.T) { } func TestABIUtilsMethod(t *testing.T) { + // type s1 struct { f1 int16; f2 int16; f3 int16 } + // func(p1 *s1, p2 [7]*s1, p3 float64, p4 int16, p5 int16, p6 int16) + // (r1 [7]*s1, r2 float64, r3 int64) i16 := types.Types[types.TINT16] i64 := types.Types[types.TINT64] f64 := types.Types[types.TFLOAT64] - s1 := mkstruct([]*types.Type{i16, i16, i16}) ps1 := types.NewPtr(s1) a7 := types.NewArray(ps1, 7) @@ -236,18 +301,20 @@ func TestABIUtilsMethod(t *testing.T) { } func TestABIUtilsInterfaces(t *testing.T) { + // type s1 { f1 int16; f2 int16; f3 bool) + // type nei interface { ...() string } + // func(p1 s1, p2 interface{}, p3 interface{}, p4 nei, + // p5 *interface{}, p6 nei, p7 int64) + // (r1 interface{}, r2 nei, r3 bool) ei := types.Types[types.TINTER] // interface{} pei := types.NewPtr(ei) // *interface{} fldt := mkFuncType(types.FakeRecvType(), []*types.Type{}, []*types.Type{types.UntypedString}) field := types.NewField(src.NoXPos, nil, fldt) - // interface{ ...() string } nei := types.NewInterface(types.LocalPkg, []*types.Field{field}) - i16 := types.Types[types.TINT16] tb := types.Types[types.TBOOL] s1 := mkstruct([]*types.Type{i16, i16, tb}) - ft := mkFuncType(nil, []*types.Type{s1, ei, ei, nei, pei, nei, i16}, []*types.Type{ei, nei, pei}) @@ -293,3 +360,38 @@ func TestABINumParamRegs(t *testing.T) { nrtest(t, a, 12) } + +func TestABIUtilsComputePadding(t *testing.T) { + // type s1 { f1 int8; f2 int16; f3 struct{}; f4 int32; f5 int64 } + i8 := types.Types[types.TINT8] + i16 := types.Types[types.TINT16] + i32 := types.Types[types.TINT32] + i64 := types.Types[types.TINT64] + emptys := mkstruct([]*types.Type{}) + s1 := mkstruct([]*types.Type{i8, i16, emptys, i32, i64}) + // func (p1 int32, p2 s1, p3 emptys, p4 [1]int32) + a1 := types.NewArray(i32, 1) + ft := mkFuncType(nil, []*types.Type{i32, s1, emptys, a1}, []*types.Type{}) + + // Run abitest() just to document what we're expected to see. + exp := makeExpectedDump(` + IN 0: R{ I0 } spilloffset: 0 typ: int32 + IN 1: R{ I1 I2 I3 I4 } spilloffset: 8 typ: struct { int8; int16; struct {}; int32; int64 } + IN 2: R{ } offset: 0 typ: struct {} + IN 3: R{ I5 } spilloffset: 24 typ: [1]int32 + offsetToSpillArea: 0 spillAreaSize: 32 +`) + abitest(t, ft, exp) + + // Analyze with full set of registers, then call ComputePadding + // on the second param, verifying the results. + regRes := configAMD64.ABIAnalyze(ft, false) + padding := make([]uint64, 32) + parm := regRes.InParams()[1] + padding = parm.ComputePadding(padding) + want := "[1 1 1 0]" + got := fmt.Sprintf("%+v", padding) + if got != want { + t.Errorf("padding mismatch: wanted %q got %q\n", got, want) + } +} diff --git a/src/cmd/compile/internal/test/abiutilsaux_test.go b/src/cmd/compile/internal/test/abiutilsaux_test.go index bac0c7639d8..b9456331332 100644 --- a/src/cmd/compile/internal/test/abiutilsaux_test.go +++ b/src/cmd/compile/internal/test/abiutilsaux_test.go @@ -119,7 +119,7 @@ func abitest(t *testing.T, ft *types.Type, exp expectedDump) { types.CalcSize(ft) // Analyze with full set of registers. - regRes := configAMD64.ABIAnalyze(ft) + regRes := configAMD64.ABIAnalyze(ft, false) regResString := strings.TrimSpace(regRes.String()) // Check results. @@ -129,36 +129,4 @@ func abitest(t *testing.T, ft *types.Type, exp expectedDump) { strings.TrimSpace(exp.dump), regResString, reason) } - // Analyze again with empty register set. - empty := abi.NewABIConfig(0, 0) - emptyRes := empty.ABIAnalyze(ft) - emptyResString := emptyRes.String() - - // Walk the results and make sure the offsets assigned match - // up with those assiged by CalcSize. This checks to make sure that - // when we have no available registers the ABI assignment degenerates - // back to the original ABI0. - - // receiver - failed := 0 - rfsl := ft.Recvs().Fields().Slice() - poff := 0 - if len(rfsl) != 0 { - failed |= verifyParamResultOffset(t, rfsl[0], emptyRes.InParams()[0], "receiver", 0) - poff = 1 - } - // params - pfsl := ft.Params().Fields().Slice() - for k, f := range pfsl { - verifyParamResultOffset(t, f, emptyRes.InParams()[k+poff], "param", k) - } - // results - ofsl := ft.Results().Fields().Slice() - for k, f := range ofsl { - failed |= verifyParamResultOffset(t, f, emptyRes.OutParams()[k], "result", k) - } - - if failed != 0 { - t.Logf("emptyres:\n%s\n", emptyResString) - } } diff --git a/src/cmd/compile/internal/test/bench_test.go b/src/cmd/compile/internal/test/bench_test.go index 3fffe57d082..47246000917 100644 --- a/src/cmd/compile/internal/test/bench_test.go +++ b/src/cmd/compile/internal/test/bench_test.go @@ -62,3 +62,63 @@ func BenchmarkConstModify(b *testing.B) { } } } + +func BenchmarkBitSet(b *testing.B) { + const N = 64 * 8 + a := make([]uint64, N/64) + for i := 0; i < b.N; i++ { + for j := uint64(0); j < N; j++ { + a[j/64] |= 1 << (j % 64) + } + } +} + +func BenchmarkBitClear(b *testing.B) { + const N = 64 * 8 + a := make([]uint64, N/64) + for i := 0; i < b.N; i++ { + for j := uint64(0); j < N; j++ { + a[j/64] &^= 1 << (j % 64) + } + } +} + +func BenchmarkBitToggle(b *testing.B) { + const N = 64 * 8 + a := make([]uint64, N/64) + for i := 0; i < b.N; i++ { + for j := uint64(0); j < N; j++ { + a[j/64] ^= 1 << (j % 64) + } + } +} + +func BenchmarkBitSetConst(b *testing.B) { + const N = 64 + a := make([]uint64, N) + for i := 0; i < b.N; i++ { + for j := range a { + a[j] |= 1 << 37 + } + } +} + +func BenchmarkBitClearConst(b *testing.B) { + const N = 64 + a := make([]uint64, N) + for i := 0; i < b.N; i++ { + for j := range a { + a[j] &^= 1 << 37 + } + } +} + +func BenchmarkBitToggleConst(b *testing.B) { + const N = 64 + a := make([]uint64, N) + for i := 0; i < b.N; i++ { + for j := range a { + a[j] ^= 1 << 37 + } + } +} diff --git a/src/cmd/compile/internal/test/clobberdead_test.go b/src/cmd/compile/internal/test/clobberdead_test.go new file mode 100644 index 00000000000..88b7d34623a --- /dev/null +++ b/src/cmd/compile/internal/test/clobberdead_test.go @@ -0,0 +1,55 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "internal/testenv" + "io/ioutil" + "os/exec" + "path/filepath" + "testing" +) + +const helloSrc = ` +package main +import "fmt" +func main() { fmt.Println("hello") } +` + +func TestClobberDead(t *testing.T) { + // Test that clobberdead mode generates correct program. + runHello(t, "-clobberdead") +} + +func TestClobberDeadReg(t *testing.T) { + // Test that clobberdeadreg mode generates correct program. + runHello(t, "-clobberdeadreg") +} + +func runHello(t *testing.T, flag string) { + if testing.Short() { + // This test rebuilds the runtime with a special flag, which + // takes a while. + t.Skip("skip in short mode") + } + testenv.MustHaveGoRun(t) + t.Parallel() + + tmpdir := t.TempDir() + src := filepath.Join(tmpdir, "x.go") + err := ioutil.WriteFile(src, []byte(helloSrc), 0644) + if err != nil { + t.Fatalf("write file failed: %v", err) + } + + cmd := exec.Command(testenv.GoToolPath(t), "run", "-gcflags=all="+flag, src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("go run failed: %v\n%s", err, out) + } + if string(out) != "hello\n" { + t.Errorf("wrong output: got %q, want %q", out, "hello\n") + } +} diff --git a/src/cmd/compile/internal/test/inl_test.go b/src/cmd/compile/internal/test/inl_test.go index 9d31975b310..6f100033cf5 100644 --- a/src/cmd/compile/internal/test/inl_test.go +++ b/src/cmd/compile/internal/test/inl_test.go @@ -16,7 +16,7 @@ import ( "testing" ) -// TestIntendedInlining tests that specific runtime functions are inlined. +// TestIntendedInlining tests that specific functions are inlined. // This allows refactoring for code clarity and re-use without fear that // changes to the compiler will cause silent performance regressions. func TestIntendedInlining(t *testing.T) { @@ -155,6 +155,9 @@ func TestIntendedInlining(t *testing.T) { "(*rngSource).Int63", "(*rngSource).Uint64", }, + "net": { + "(*UDPConn).ReadFromUDP", + }, } if runtime.GOARCH != "386" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" && runtime.GOARCH != "riscv64" { @@ -172,8 +175,8 @@ func TestIntendedInlining(t *testing.T) { want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Bswap32") } if bits.UintSize == 64 { - // rotl_31 is only defined on 64-bit architectures - want["runtime"] = append(want["runtime"], "rotl_31") + // mix is only defined on 64-bit architectures + want["runtime"] = append(want["runtime"], "mix") } switch runtime.GOARCH { diff --git a/src/cmd/compile/internal/test/testdata/arith_test.go b/src/cmd/compile/internal/test/testdata/arith_test.go index 158fedc28ef..7d54a9181d1 100644 --- a/src/cmd/compile/internal/test/testdata/arith_test.go +++ b/src/cmd/compile/internal/test/testdata/arith_test.go @@ -1452,3 +1452,46 @@ func testDivisibility(t *testing.T) { } } } + +//go:noinline +func genREV16_1(c uint64) uint64 { + b := ((c & 0xff00ff00ff00ff00) >> 8) | ((c & 0x00ff00ff00ff00ff) << 8) + return b +} + +//go:noinline +func genREV16_2(c uint64) uint64 { + b := ((c & 0xff00ff00) >> 8) | ((c & 0x00ff00ff) << 8) + return b +} + +//go:noinline +func genREV16W(c uint32) uint32 { + b := ((c & 0xff00ff00) >> 8) | ((c & 0x00ff00ff) << 8) + return b +} + +func TestREV16(t *testing.T) { + x := uint64(0x8f7f6f5f4f3f2f1f) + want1 := uint64(0x7f8f5f6f3f4f1f2f) + want2 := uint64(0x3f4f1f2f) + + got1 := genREV16_1(x) + if got1 != want1 { + t.Errorf("genREV16_1(%#x) = %#x want %#x", x, got1, want1) + } + got2 := genREV16_2(x) + if got2 != want2 { + t.Errorf("genREV16_2(%#x) = %#x want %#x", x, got2, want2) + } +} + +func TestREV16W(t *testing.T) { + x := uint32(0x4f3f2f1f) + want := uint32(0x3f4f1f2f) + + got := genREV16W(x) + if got != want { + t.Errorf("genREV16W(%#x) = %#x want %#x", x, got, want) + } +} diff --git a/src/cmd/compile/internal/test/zerorange_test.go b/src/cmd/compile/internal/test/zerorange_test.go index cb1a6e04e4e..ec871361572 100644 --- a/src/cmd/compile/internal/test/zerorange_test.go +++ b/src/cmd/compile/internal/test/zerorange_test.go @@ -4,7 +4,9 @@ package test -import "testing" +import ( + "testing" +) var glob = 3 var globp *int64 @@ -94,3 +96,90 @@ func testZeroRange136(t *testing.T) (r, s, t2, u, v, w, x, y, r1, s1, t1, u1, v1 globp = &z1 return } + +type S struct { + x [2]uint64 + p *uint64 + y [2]uint64 + q uint64 +} + +type M struct { + x [8]uint64 + p *uint64 + y [8]uint64 + q uint64 +} + +type L struct { + x [4096]uint64 + p *uint64 + y [4096]uint64 + q uint64 +} + +//go:noinline +func triggerZerorangeLarge(f, g, h uint64) (rv0 uint64) { + ll := L{p: &f} + da := f + rv0 = f + g + h + defer func(dl L, i uint64) { + rv0 += dl.q + i + }(ll, da) + return rv0 +} + +//go:noinline +func triggerZerorangeMedium(f, g, h uint64) (rv0 uint64) { + ll := M{p: &f} + rv0 = f + g + h + defer func(dm M, i uint64) { + rv0 += dm.q + i + }(ll, f) + return rv0 +} + +//go:noinline +func triggerZerorangeSmall(f, g, h uint64) (rv0 uint64) { + ll := S{p: &f} + rv0 = f + g + h + defer func(ds S, i uint64) { + rv0 += ds.q + i + }(ll, f) + return rv0 +} + +// This test was created as a follow up to issue #45372, to help +// improve coverage of the compiler's arch-specific "zerorange" +// function, which is invoked to zero out ambiguously live portions of +// the stack frame in certain specific circumstances. +// +// In the current compiler implementation, for zerorange to be +// invoked, we need to have an ambiguously live variable that needs +// zeroing. One way to trigger this is to have a function with an +// open-coded defer, where the opendefer function has an argument that +// contains a pointer (this is what's used below). +// +// At the moment this test doesn't do any specific checking for +// code sequence, or verification that things were properly set to zero, +// this seems as though it would be too tricky and would result +// in a "brittle" test. +// +// The small/medium/large scenarios below are inspired by the amd64 +// implementation of zerorange, which generates different code +// depending on the size of the thing that needs to be zeroed out +// (I've verified at the time of the writing of this test that it +// exercises the various cases). +// +func TestZerorange45372(t *testing.T) { + if r := triggerZerorangeLarge(101, 303, 505); r != 1010 { + t.Errorf("large: wanted %d got %d", 1010, r) + } + if r := triggerZerorangeMedium(101, 303, 505); r != 1010 { + t.Errorf("medium: wanted %d got %d", 1010, r) + } + if r := triggerZerorangeSmall(101, 303, 505); r != 1010 { + t.Errorf("small: wanted %d got %d", 1010, r) + } + +} diff --git a/src/cmd/compile/internal/typecheck/builtin.go b/src/cmd/compile/internal/typecheck/builtin.go index f9a4f6aef4d..67a894c7edd 100644 --- a/src/cmd/compile/internal/typecheck/builtin.go +++ b/src/cmd/compile/internal/typecheck/builtin.go @@ -39,6 +39,7 @@ var runtimeDecls = [...]struct { {"goPanicSlice3BU", funcTag, 18}, {"goPanicSlice3C", funcTag, 16}, {"goPanicSlice3CU", funcTag, 18}, + {"goPanicSliceConvert", funcTag, 16}, {"printbool", funcTag, 19}, {"printfloat", funcTag, 21}, {"printint", funcTag, 23}, @@ -71,133 +72,135 @@ var runtimeDecls = [...]struct { {"decoderune", funcTag, 55}, {"countrunes", funcTag, 56}, {"convI2I", funcTag, 57}, - {"convT16", funcTag, 58}, - {"convT32", funcTag, 58}, - {"convT64", funcTag, 58}, - {"convTstring", funcTag, 58}, - {"convTslice", funcTag, 58}, - {"convT2E", funcTag, 59}, - {"convT2Enoptr", funcTag, 59}, - {"convT2I", funcTag, 59}, - {"convT2Inoptr", funcTag, 59}, - {"assertE2I", funcTag, 57}, - {"assertE2I2", funcTag, 60}, - {"assertI2I", funcTag, 57}, - {"assertI2I2", funcTag, 60}, - {"panicdottypeE", funcTag, 61}, - {"panicdottypeI", funcTag, 61}, - {"panicnildottype", funcTag, 62}, - {"ifaceeq", funcTag, 64}, - {"efaceeq", funcTag, 64}, - {"fastrand", funcTag, 66}, - {"makemap64", funcTag, 68}, - {"makemap", funcTag, 69}, - {"makemap_small", funcTag, 70}, - {"mapaccess1", funcTag, 71}, - {"mapaccess1_fast32", funcTag, 72}, - {"mapaccess1_fast64", funcTag, 72}, - {"mapaccess1_faststr", funcTag, 72}, - {"mapaccess1_fat", funcTag, 73}, - {"mapaccess2", funcTag, 74}, - {"mapaccess2_fast32", funcTag, 75}, - {"mapaccess2_fast64", funcTag, 75}, - {"mapaccess2_faststr", funcTag, 75}, - {"mapaccess2_fat", funcTag, 76}, - {"mapassign", funcTag, 71}, - {"mapassign_fast32", funcTag, 72}, - {"mapassign_fast32ptr", funcTag, 72}, - {"mapassign_fast64", funcTag, 72}, - {"mapassign_fast64ptr", funcTag, 72}, - {"mapassign_faststr", funcTag, 72}, - {"mapiterinit", funcTag, 77}, - {"mapdelete", funcTag, 77}, - {"mapdelete_fast32", funcTag, 78}, - {"mapdelete_fast64", funcTag, 78}, - {"mapdelete_faststr", funcTag, 78}, - {"mapiternext", funcTag, 79}, - {"mapclear", funcTag, 80}, - {"makechan64", funcTag, 82}, - {"makechan", funcTag, 83}, - {"chanrecv1", funcTag, 85}, - {"chanrecv2", funcTag, 86}, - {"chansend1", funcTag, 88}, + {"convT16", funcTag, 59}, + {"convT32", funcTag, 61}, + {"convT64", funcTag, 62}, + {"convTstring", funcTag, 63}, + {"convTslice", funcTag, 66}, + {"convT2E", funcTag, 67}, + {"convT2Enoptr", funcTag, 67}, + {"convT2I", funcTag, 67}, + {"convT2Inoptr", funcTag, 67}, + {"assertE2I", funcTag, 68}, + {"assertE2I2", funcTag, 57}, + {"assertI2I", funcTag, 68}, + {"assertI2I2", funcTag, 57}, + {"panicdottypeE", funcTag, 69}, + {"panicdottypeI", funcTag, 69}, + {"panicnildottype", funcTag, 70}, + {"ifaceeq", funcTag, 72}, + {"efaceeq", funcTag, 72}, + {"fastrand", funcTag, 73}, + {"makemap64", funcTag, 75}, + {"makemap", funcTag, 76}, + {"makemap_small", funcTag, 77}, + {"mapaccess1", funcTag, 78}, + {"mapaccess1_fast32", funcTag, 79}, + {"mapaccess1_fast64", funcTag, 80}, + {"mapaccess1_faststr", funcTag, 81}, + {"mapaccess1_fat", funcTag, 82}, + {"mapaccess2", funcTag, 83}, + {"mapaccess2_fast32", funcTag, 84}, + {"mapaccess2_fast64", funcTag, 85}, + {"mapaccess2_faststr", funcTag, 86}, + {"mapaccess2_fat", funcTag, 87}, + {"mapassign", funcTag, 78}, + {"mapassign_fast32", funcTag, 79}, + {"mapassign_fast32ptr", funcTag, 88}, + {"mapassign_fast64", funcTag, 80}, + {"mapassign_fast64ptr", funcTag, 88}, + {"mapassign_faststr", funcTag, 81}, + {"mapiterinit", funcTag, 89}, + {"mapdelete", funcTag, 89}, + {"mapdelete_fast32", funcTag, 90}, + {"mapdelete_fast64", funcTag, 91}, + {"mapdelete_faststr", funcTag, 92}, + {"mapiternext", funcTag, 93}, + {"mapclear", funcTag, 94}, + {"makechan64", funcTag, 96}, + {"makechan", funcTag, 97}, + {"chanrecv1", funcTag, 99}, + {"chanrecv2", funcTag, 100}, + {"chansend1", funcTag, 102}, {"closechan", funcTag, 30}, - {"writeBarrier", varTag, 90}, - {"typedmemmove", funcTag, 91}, - {"typedmemclr", funcTag, 92}, - {"typedslicecopy", funcTag, 93}, - {"selectnbsend", funcTag, 94}, - {"selectnbrecv", funcTag, 95}, - {"selectnbrecv2", funcTag, 97}, - {"selectsetpc", funcTag, 98}, - {"selectgo", funcTag, 99}, + {"writeBarrier", varTag, 104}, + {"typedmemmove", funcTag, 105}, + {"typedmemclr", funcTag, 106}, + {"typedslicecopy", funcTag, 107}, + {"selectnbsend", funcTag, 108}, + {"selectnbrecv", funcTag, 109}, + {"selectsetpc", funcTag, 110}, + {"selectgo", funcTag, 111}, {"block", funcTag, 9}, - {"makeslice", funcTag, 100}, - {"makeslice64", funcTag, 101}, - {"makeslicecopy", funcTag, 102}, - {"growslice", funcTag, 104}, - {"memmove", funcTag, 105}, - {"memclrNoHeapPointers", funcTag, 106}, - {"memclrHasPointers", funcTag, 106}, - {"memequal", funcTag, 107}, - {"memequal0", funcTag, 108}, - {"memequal8", funcTag, 108}, - {"memequal16", funcTag, 108}, - {"memequal32", funcTag, 108}, - {"memequal64", funcTag, 108}, - {"memequal128", funcTag, 108}, - {"f32equal", funcTag, 109}, - {"f64equal", funcTag, 109}, - {"c64equal", funcTag, 109}, - {"c128equal", funcTag, 109}, - {"strequal", funcTag, 109}, - {"interequal", funcTag, 109}, - {"nilinterequal", funcTag, 109}, - {"memhash", funcTag, 110}, - {"memhash0", funcTag, 111}, - {"memhash8", funcTag, 111}, - {"memhash16", funcTag, 111}, - {"memhash32", funcTag, 111}, - {"memhash64", funcTag, 111}, - {"memhash128", funcTag, 111}, - {"f32hash", funcTag, 111}, - {"f64hash", funcTag, 111}, - {"c64hash", funcTag, 111}, - {"c128hash", funcTag, 111}, - {"strhash", funcTag, 111}, - {"interhash", funcTag, 111}, - {"nilinterhash", funcTag, 111}, - {"int64div", funcTag, 112}, - {"uint64div", funcTag, 113}, - {"int64mod", funcTag, 112}, - {"uint64mod", funcTag, 113}, - {"float64toint64", funcTag, 114}, - {"float64touint64", funcTag, 115}, - {"float64touint32", funcTag, 116}, - {"int64tofloat64", funcTag, 117}, - {"uint64tofloat64", funcTag, 118}, - {"uint32tofloat64", funcTag, 119}, - {"complex128div", funcTag, 120}, + {"makeslice", funcTag, 112}, + {"makeslice64", funcTag, 113}, + {"makeslicecopy", funcTag, 114}, + {"growslice", funcTag, 116}, + {"unsafeslice", funcTag, 117}, + {"unsafeslice64", funcTag, 118}, + {"memmove", funcTag, 119}, + {"memclrNoHeapPointers", funcTag, 120}, + {"memclrHasPointers", funcTag, 120}, + {"memequal", funcTag, 121}, + {"memequal0", funcTag, 122}, + {"memequal8", funcTag, 122}, + {"memequal16", funcTag, 122}, + {"memequal32", funcTag, 122}, + {"memequal64", funcTag, 122}, + {"memequal128", funcTag, 122}, + {"f32equal", funcTag, 123}, + {"f64equal", funcTag, 123}, + {"c64equal", funcTag, 123}, + {"c128equal", funcTag, 123}, + {"strequal", funcTag, 123}, + {"interequal", funcTag, 123}, + {"nilinterequal", funcTag, 123}, + {"memhash", funcTag, 124}, + {"memhash0", funcTag, 125}, + {"memhash8", funcTag, 125}, + {"memhash16", funcTag, 125}, + {"memhash32", funcTag, 125}, + {"memhash64", funcTag, 125}, + {"memhash128", funcTag, 125}, + {"f32hash", funcTag, 125}, + {"f64hash", funcTag, 125}, + {"c64hash", funcTag, 125}, + {"c128hash", funcTag, 125}, + {"strhash", funcTag, 125}, + {"interhash", funcTag, 125}, + {"nilinterhash", funcTag, 125}, + {"int64div", funcTag, 126}, + {"uint64div", funcTag, 127}, + {"int64mod", funcTag, 126}, + {"uint64mod", funcTag, 127}, + {"float64toint64", funcTag, 128}, + {"float64touint64", funcTag, 129}, + {"float64touint32", funcTag, 130}, + {"int64tofloat64", funcTag, 131}, + {"uint64tofloat64", funcTag, 132}, + {"uint32tofloat64", funcTag, 133}, + {"complex128div", funcTag, 134}, + {"getcallerpc", funcTag, 135}, + {"getcallersp", funcTag, 135}, {"racefuncenter", funcTag, 31}, - {"racefuncenterfp", funcTag, 9}, {"racefuncexit", funcTag, 9}, {"raceread", funcTag, 31}, {"racewrite", funcTag, 31}, - {"racereadrange", funcTag, 121}, - {"racewriterange", funcTag, 121}, - {"msanread", funcTag, 121}, - {"msanwrite", funcTag, 121}, - {"msanmove", funcTag, 122}, - {"checkptrAlignment", funcTag, 123}, - {"checkptrArithmetic", funcTag, 125}, - {"libfuzzerTraceCmp1", funcTag, 127}, - {"libfuzzerTraceCmp2", funcTag, 129}, - {"libfuzzerTraceCmp4", funcTag, 130}, - {"libfuzzerTraceCmp8", funcTag, 131}, - {"libfuzzerTraceConstCmp1", funcTag, 127}, - {"libfuzzerTraceConstCmp2", funcTag, 129}, - {"libfuzzerTraceConstCmp4", funcTag, 130}, - {"libfuzzerTraceConstCmp8", funcTag, 131}, + {"racereadrange", funcTag, 136}, + {"racewriterange", funcTag, 136}, + {"msanread", funcTag, 136}, + {"msanwrite", funcTag, 136}, + {"msanmove", funcTag, 137}, + {"checkptrAlignment", funcTag, 138}, + {"checkptrArithmetic", funcTag, 140}, + {"libfuzzerTraceCmp1", funcTag, 141}, + {"libfuzzerTraceCmp2", funcTag, 142}, + {"libfuzzerTraceCmp4", funcTag, 143}, + {"libfuzzerTraceCmp8", funcTag, 144}, + {"libfuzzerTraceConstCmp1", funcTag, 141}, + {"libfuzzerTraceConstCmp2", funcTag, 142}, + {"libfuzzerTraceConstCmp4", funcTag, 143}, + {"libfuzzerTraceConstCmp8", funcTag, 144}, {"x86HasPOPCNT", varTag, 6}, {"x86HasSSE41", varTag, 6}, {"x86HasFMA", varTag, 6}, @@ -220,7 +223,7 @@ func params(tlist ...*types.Type) []*types.Field { } func runtimeTypes() []*types.Type { - var typs [132]*types.Type + var typs [145]*types.Type typs[0] = types.ByteType typs[1] = types.NewPtr(typs[0]) typs[2] = types.Types[types.TANY] @@ -279,79 +282,92 @@ func runtimeTypes() []*types.Type { typs[55] = newSig(params(typs[28], typs[15]), params(typs[46], typs[15])) typs[56] = newSig(params(typs[28]), params(typs[15])) typs[57] = newSig(params(typs[1], typs[2]), params(typs[2])) - typs[58] = newSig(params(typs[2]), params(typs[7])) - typs[59] = newSig(params(typs[1], typs[3]), params(typs[2])) - typs[60] = newSig(params(typs[1], typs[2]), params(typs[2], typs[6])) - typs[61] = newSig(params(typs[1], typs[1], typs[1]), nil) - typs[62] = newSig(params(typs[1]), nil) - typs[63] = types.NewPtr(typs[5]) - typs[64] = newSig(params(typs[63], typs[7], typs[7]), params(typs[6])) - typs[65] = types.Types[types.TUINT32] - typs[66] = newSig(nil, params(typs[65])) - typs[67] = types.NewMap(typs[2], typs[2]) - typs[68] = newSig(params(typs[1], typs[22], typs[3]), params(typs[67])) - typs[69] = newSig(params(typs[1], typs[15], typs[3]), params(typs[67])) - typs[70] = newSig(nil, params(typs[67])) - typs[71] = newSig(params(typs[1], typs[67], typs[3]), params(typs[3])) - typs[72] = newSig(params(typs[1], typs[67], typs[2]), params(typs[3])) - typs[73] = newSig(params(typs[1], typs[67], typs[3], typs[1]), params(typs[3])) - typs[74] = newSig(params(typs[1], typs[67], typs[3]), params(typs[3], typs[6])) - typs[75] = newSig(params(typs[1], typs[67], typs[2]), params(typs[3], typs[6])) - typs[76] = newSig(params(typs[1], typs[67], typs[3], typs[1]), params(typs[3], typs[6])) - typs[77] = newSig(params(typs[1], typs[67], typs[3]), nil) - typs[78] = newSig(params(typs[1], typs[67], typs[2]), nil) - typs[79] = newSig(params(typs[3]), nil) - typs[80] = newSig(params(typs[1], typs[67]), nil) - typs[81] = types.NewChan(typs[2], types.Cboth) - typs[82] = newSig(params(typs[1], typs[22]), params(typs[81])) - typs[83] = newSig(params(typs[1], typs[15]), params(typs[81])) - typs[84] = types.NewChan(typs[2], types.Crecv) - typs[85] = newSig(params(typs[84], typs[3]), nil) - typs[86] = newSig(params(typs[84], typs[3]), params(typs[6])) - typs[87] = types.NewChan(typs[2], types.Csend) - typs[88] = newSig(params(typs[87], typs[3]), nil) - typs[89] = types.NewArray(typs[0], 3) - typs[90] = types.NewStruct(types.NoPkg, []*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[89]), types.NewField(src.NoXPos, Lookup("needed"), typs[6]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[24])}) - typs[91] = newSig(params(typs[1], typs[3], typs[3]), nil) - typs[92] = newSig(params(typs[1], typs[3]), nil) - typs[93] = newSig(params(typs[1], typs[3], typs[15], typs[3], typs[15]), params(typs[15])) - typs[94] = newSig(params(typs[87], typs[3]), params(typs[6])) - typs[95] = newSig(params(typs[3], typs[84]), params(typs[6])) - typs[96] = types.NewPtr(typs[6]) - typs[97] = newSig(params(typs[3], typs[96], typs[84]), params(typs[6])) - typs[98] = newSig(params(typs[63]), nil) - typs[99] = newSig(params(typs[1], typs[1], typs[63], typs[15], typs[15], typs[6]), params(typs[15], typs[6])) - typs[100] = newSig(params(typs[1], typs[15], typs[15]), params(typs[7])) - typs[101] = newSig(params(typs[1], typs[22], typs[22]), params(typs[7])) - typs[102] = newSig(params(typs[1], typs[15], typs[15], typs[7]), params(typs[7])) - typs[103] = types.NewSlice(typs[2]) - typs[104] = newSig(params(typs[1], typs[103], typs[15]), params(typs[103])) - typs[105] = newSig(params(typs[3], typs[3], typs[5]), nil) - typs[106] = newSig(params(typs[7], typs[5]), nil) - typs[107] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6])) - typs[108] = newSig(params(typs[3], typs[3]), params(typs[6])) - typs[109] = newSig(params(typs[7], typs[7]), params(typs[6])) - typs[110] = newSig(params(typs[7], typs[5], typs[5]), params(typs[5])) - typs[111] = newSig(params(typs[7], typs[5]), params(typs[5])) - typs[112] = newSig(params(typs[22], typs[22]), params(typs[22])) - typs[113] = newSig(params(typs[24], typs[24]), params(typs[24])) - typs[114] = newSig(params(typs[20]), params(typs[22])) - typs[115] = newSig(params(typs[20]), params(typs[24])) - typs[116] = newSig(params(typs[20]), params(typs[65])) - typs[117] = newSig(params(typs[22]), params(typs[20])) - typs[118] = newSig(params(typs[24]), params(typs[20])) - typs[119] = newSig(params(typs[65]), params(typs[20])) - typs[120] = newSig(params(typs[26], typs[26]), params(typs[26])) - typs[121] = newSig(params(typs[5], typs[5]), nil) - typs[122] = newSig(params(typs[5], typs[5], typs[5]), nil) - typs[123] = newSig(params(typs[7], typs[1], typs[5]), nil) - typs[124] = types.NewSlice(typs[7]) - typs[125] = newSig(params(typs[7], typs[124]), nil) - typs[126] = types.Types[types.TUINT8] - typs[127] = newSig(params(typs[126], typs[126]), nil) - typs[128] = types.Types[types.TUINT16] - typs[129] = newSig(params(typs[128], typs[128]), nil) - typs[130] = newSig(params(typs[65], typs[65]), nil) - typs[131] = newSig(params(typs[24], typs[24]), nil) + typs[58] = types.Types[types.TUINT16] + typs[59] = newSig(params(typs[58]), params(typs[7])) + typs[60] = types.Types[types.TUINT32] + typs[61] = newSig(params(typs[60]), params(typs[7])) + typs[62] = newSig(params(typs[24]), params(typs[7])) + typs[63] = newSig(params(typs[28]), params(typs[7])) + typs[64] = types.Types[types.TUINT8] + typs[65] = types.NewSlice(typs[64]) + typs[66] = newSig(params(typs[65]), params(typs[7])) + typs[67] = newSig(params(typs[1], typs[3]), params(typs[2])) + typs[68] = newSig(params(typs[1], typs[1]), params(typs[1])) + typs[69] = newSig(params(typs[1], typs[1], typs[1]), nil) + typs[70] = newSig(params(typs[1]), nil) + typs[71] = types.NewPtr(typs[5]) + typs[72] = newSig(params(typs[71], typs[7], typs[7]), params(typs[6])) + typs[73] = newSig(nil, params(typs[60])) + typs[74] = types.NewMap(typs[2], typs[2]) + typs[75] = newSig(params(typs[1], typs[22], typs[3]), params(typs[74])) + typs[76] = newSig(params(typs[1], typs[15], typs[3]), params(typs[74])) + typs[77] = newSig(nil, params(typs[74])) + typs[78] = newSig(params(typs[1], typs[74], typs[3]), params(typs[3])) + typs[79] = newSig(params(typs[1], typs[74], typs[60]), params(typs[3])) + typs[80] = newSig(params(typs[1], typs[74], typs[24]), params(typs[3])) + typs[81] = newSig(params(typs[1], typs[74], typs[28]), params(typs[3])) + typs[82] = newSig(params(typs[1], typs[74], typs[3], typs[1]), params(typs[3])) + typs[83] = newSig(params(typs[1], typs[74], typs[3]), params(typs[3], typs[6])) + typs[84] = newSig(params(typs[1], typs[74], typs[60]), params(typs[3], typs[6])) + typs[85] = newSig(params(typs[1], typs[74], typs[24]), params(typs[3], typs[6])) + typs[86] = newSig(params(typs[1], typs[74], typs[28]), params(typs[3], typs[6])) + typs[87] = newSig(params(typs[1], typs[74], typs[3], typs[1]), params(typs[3], typs[6])) + typs[88] = newSig(params(typs[1], typs[74], typs[7]), params(typs[3])) + typs[89] = newSig(params(typs[1], typs[74], typs[3]), nil) + typs[90] = newSig(params(typs[1], typs[74], typs[60]), nil) + typs[91] = newSig(params(typs[1], typs[74], typs[24]), nil) + typs[92] = newSig(params(typs[1], typs[74], typs[28]), nil) + typs[93] = newSig(params(typs[3]), nil) + typs[94] = newSig(params(typs[1], typs[74]), nil) + typs[95] = types.NewChan(typs[2], types.Cboth) + typs[96] = newSig(params(typs[1], typs[22]), params(typs[95])) + typs[97] = newSig(params(typs[1], typs[15]), params(typs[95])) + typs[98] = types.NewChan(typs[2], types.Crecv) + typs[99] = newSig(params(typs[98], typs[3]), nil) + typs[100] = newSig(params(typs[98], typs[3]), params(typs[6])) + typs[101] = types.NewChan(typs[2], types.Csend) + typs[102] = newSig(params(typs[101], typs[3]), nil) + typs[103] = types.NewArray(typs[0], 3) + typs[104] = types.NewStruct(types.NoPkg, []*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[103]), types.NewField(src.NoXPos, Lookup("needed"), typs[6]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[24])}) + typs[105] = newSig(params(typs[1], typs[3], typs[3]), nil) + typs[106] = newSig(params(typs[1], typs[3]), nil) + typs[107] = newSig(params(typs[1], typs[3], typs[15], typs[3], typs[15]), params(typs[15])) + typs[108] = newSig(params(typs[101], typs[3]), params(typs[6])) + typs[109] = newSig(params(typs[3], typs[98]), params(typs[6], typs[6])) + typs[110] = newSig(params(typs[71]), nil) + typs[111] = newSig(params(typs[1], typs[1], typs[71], typs[15], typs[15], typs[6]), params(typs[15], typs[6])) + typs[112] = newSig(params(typs[1], typs[15], typs[15]), params(typs[7])) + typs[113] = newSig(params(typs[1], typs[22], typs[22]), params(typs[7])) + typs[114] = newSig(params(typs[1], typs[15], typs[15], typs[7]), params(typs[7])) + typs[115] = types.NewSlice(typs[2]) + typs[116] = newSig(params(typs[1], typs[115], typs[15]), params(typs[115])) + typs[117] = newSig(params(typs[1], typs[15]), nil) + typs[118] = newSig(params(typs[1], typs[22]), nil) + typs[119] = newSig(params(typs[3], typs[3], typs[5]), nil) + typs[120] = newSig(params(typs[7], typs[5]), nil) + typs[121] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6])) + typs[122] = newSig(params(typs[3], typs[3]), params(typs[6])) + typs[123] = newSig(params(typs[7], typs[7]), params(typs[6])) + typs[124] = newSig(params(typs[7], typs[5], typs[5]), params(typs[5])) + typs[125] = newSig(params(typs[7], typs[5]), params(typs[5])) + typs[126] = newSig(params(typs[22], typs[22]), params(typs[22])) + typs[127] = newSig(params(typs[24], typs[24]), params(typs[24])) + typs[128] = newSig(params(typs[20]), params(typs[22])) + typs[129] = newSig(params(typs[20]), params(typs[24])) + typs[130] = newSig(params(typs[20]), params(typs[60])) + typs[131] = newSig(params(typs[22]), params(typs[20])) + typs[132] = newSig(params(typs[24]), params(typs[20])) + typs[133] = newSig(params(typs[60]), params(typs[20])) + typs[134] = newSig(params(typs[26], typs[26]), params(typs[26])) + typs[135] = newSig(nil, params(typs[5])) + typs[136] = newSig(params(typs[5], typs[5]), nil) + typs[137] = newSig(params(typs[5], typs[5], typs[5]), nil) + typs[138] = newSig(params(typs[7], typs[1], typs[5]), nil) + typs[139] = types.NewSlice(typs[7]) + typs[140] = newSig(params(typs[7], typs[139]), nil) + typs[141] = newSig(params(typs[64], typs[64]), nil) + typs[142] = newSig(params(typs[58], typs[58]), nil) + typs[143] = newSig(params(typs[60], typs[60]), nil) + typs[144] = newSig(params(typs[24], typs[24]), nil) return typs[:] } diff --git a/src/cmd/compile/internal/typecheck/builtin/runtime.go b/src/cmd/compile/internal/typecheck/builtin/runtime.go index acb69c7b282..ebeaeae79ec 100644 --- a/src/cmd/compile/internal/typecheck/builtin/runtime.go +++ b/src/cmd/compile/internal/typecheck/builtin/runtime.go @@ -6,6 +6,7 @@ // to update builtin.go. This is not done automatically // to avoid depending on having a working compiler binary. +//go:build ignore // +build ignore package runtime @@ -45,6 +46,7 @@ func goPanicSlice3B(x int, y int) func goPanicSlice3BU(x uint, y int) func goPanicSlice3C(x int, y int) func goPanicSlice3CU(x uint, y int) +func goPanicSliceConvert(x int, y int) func printbool(bool) func printfloat(float64) @@ -86,11 +88,16 @@ func convI2I(typ *byte, elem any) (ret any) // Specialized type-to-interface conversion. // These return only a data pointer. -func convT16(val any) unsafe.Pointer // val must be uint16-like (same size and alignment as a uint16) -func convT32(val any) unsafe.Pointer // val must be uint32-like (same size and alignment as a uint32) -func convT64(val any) unsafe.Pointer // val must be uint64-like (same size and alignment as a uint64 and contains no pointers) -func convTstring(val any) unsafe.Pointer // val must be a string -func convTslice(val any) unsafe.Pointer // val must be a slice +// These functions take concrete types in the runtime. But they may +// be used for a wider range of types, which have the same memory +// layout as the parameter type. The compiler converts the +// to-be-converted type to the parameter type before calling the +// runtime function. This way, the call is ABI-insensitive. +func convT16(val uint16) unsafe.Pointer +func convT32(val uint32) unsafe.Pointer +func convT64(val uint64) unsafe.Pointer +func convTstring(val string) unsafe.Pointer +func convTslice(val []uint8) unsafe.Pointer // Type to empty-interface conversion. func convT2E(typ *byte, elem *any) (ret any) @@ -101,10 +108,10 @@ func convT2I(tab *byte, elem *any) (ret any) func convT2Inoptr(tab *byte, elem *any) (ret any) // interface type assertions x.(T) -func assertE2I(typ *byte, iface any) (ret any) -func assertE2I2(typ *byte, iface any) (ret any, b bool) -func assertI2I(typ *byte, iface any) (ret any) -func assertI2I2(typ *byte, iface any) (ret any, b bool) +func assertE2I(inter *byte, typ *byte) *byte +func assertE2I2(inter *byte, eface any) (ret any) +func assertI2I(inter *byte, tab *byte) *byte +func assertI2I2(inter *byte, iface any) (ret any) func panicdottypeE(have, want, iface *byte) func panicdottypeI(have, want, iface *byte) func panicnildottype(want *byte) @@ -121,26 +128,26 @@ func makemap64(mapType *byte, hint int64, mapbuf *any) (hmap map[any]any) func makemap(mapType *byte, hint int, mapbuf *any) (hmap map[any]any) func makemap_small() (hmap map[any]any) func mapaccess1(mapType *byte, hmap map[any]any, key *any) (val *any) -func mapaccess1_fast32(mapType *byte, hmap map[any]any, key any) (val *any) -func mapaccess1_fast64(mapType *byte, hmap map[any]any, key any) (val *any) -func mapaccess1_faststr(mapType *byte, hmap map[any]any, key any) (val *any) +func mapaccess1_fast32(mapType *byte, hmap map[any]any, key uint32) (val *any) +func mapaccess1_fast64(mapType *byte, hmap map[any]any, key uint64) (val *any) +func mapaccess1_faststr(mapType *byte, hmap map[any]any, key string) (val *any) func mapaccess1_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any) func mapaccess2(mapType *byte, hmap map[any]any, key *any) (val *any, pres bool) -func mapaccess2_fast32(mapType *byte, hmap map[any]any, key any) (val *any, pres bool) -func mapaccess2_fast64(mapType *byte, hmap map[any]any, key any) (val *any, pres bool) -func mapaccess2_faststr(mapType *byte, hmap map[any]any, key any) (val *any, pres bool) +func mapaccess2_fast32(mapType *byte, hmap map[any]any, key uint32) (val *any, pres bool) +func mapaccess2_fast64(mapType *byte, hmap map[any]any, key uint64) (val *any, pres bool) +func mapaccess2_faststr(mapType *byte, hmap map[any]any, key string) (val *any, pres bool) func mapaccess2_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any, pres bool) func mapassign(mapType *byte, hmap map[any]any, key *any) (val *any) -func mapassign_fast32(mapType *byte, hmap map[any]any, key any) (val *any) -func mapassign_fast32ptr(mapType *byte, hmap map[any]any, key any) (val *any) -func mapassign_fast64(mapType *byte, hmap map[any]any, key any) (val *any) -func mapassign_fast64ptr(mapType *byte, hmap map[any]any, key any) (val *any) -func mapassign_faststr(mapType *byte, hmap map[any]any, key any) (val *any) +func mapassign_fast32(mapType *byte, hmap map[any]any, key uint32) (val *any) +func mapassign_fast32ptr(mapType *byte, hmap map[any]any, key unsafe.Pointer) (val *any) +func mapassign_fast64(mapType *byte, hmap map[any]any, key uint64) (val *any) +func mapassign_fast64ptr(mapType *byte, hmap map[any]any, key unsafe.Pointer) (val *any) +func mapassign_faststr(mapType *byte, hmap map[any]any, key string) (val *any) func mapiterinit(mapType *byte, hmap map[any]any, hiter *any) func mapdelete(mapType *byte, hmap map[any]any, key *any) -func mapdelete_fast32(mapType *byte, hmap map[any]any, key any) -func mapdelete_fast64(mapType *byte, hmap map[any]any, key any) -func mapdelete_faststr(mapType *byte, hmap map[any]any, key any) +func mapdelete_fast32(mapType *byte, hmap map[any]any, key uint32) +func mapdelete_fast64(mapType *byte, hmap map[any]any, key uint64) +func mapdelete_faststr(mapType *byte, hmap map[any]any, key string) func mapiternext(hiter *any) func mapclear(mapType *byte, hmap map[any]any) @@ -166,8 +173,7 @@ func typedmemclr(typ *byte, dst *any) func typedslicecopy(typ *byte, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int func selectnbsend(hchan chan<- any, elem *any) bool -func selectnbrecv(elem *any, hchan <-chan any) bool -func selectnbrecv2(elem *any, received *bool, hchan <-chan any) bool +func selectnbrecv(elem *any, hchan <-chan any) (bool, bool) func selectsetpc(pc *uintptr) func selectgo(cas0 *byte, order0 *byte, pc0 *uintptr, nsends int, nrecvs int, block bool) (int, bool) @@ -177,6 +183,9 @@ func makeslice(typ *byte, len int, cap int) unsafe.Pointer func makeslice64(typ *byte, len int64, cap int64) unsafe.Pointer func makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer func growslice(typ *byte, old []any, cap int) (ary []any) +func unsafeslice(typ *byte, len int) +func unsafeslice64(typ *byte, len int64) + func memmove(to *any, frm *any, length uintptr) func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) func memclrHasPointers(ptr unsafe.Pointer, n uintptr) @@ -225,9 +234,11 @@ func uint32tofloat64(uint32) float64 func complex128div(num complex128, den complex128) (quo complex128) +func getcallerpc() uintptr +func getcallersp() uintptr + // race detection func racefuncenter(uintptr) -func racefuncenterfp() func racefuncexit() func raceread(uintptr) func racewrite(uintptr) diff --git a/src/cmd/compile/internal/typecheck/const.go b/src/cmd/compile/internal/typecheck/const.go index c60d36ba62f..5a35eeade9f 100644 --- a/src/cmd/compile/internal/typecheck/const.go +++ b/src/cmd/compile/internal/typecheck/const.go @@ -760,7 +760,9 @@ func anyCallOrChan(n ir.Node) bool { ir.OPRINTN, ir.OREAL, ir.ORECOVER, - ir.ORECV: + ir.ORECV, + ir.OUNSAFEADD, + ir.OUNSAFESLICE: return true } return false @@ -794,7 +796,7 @@ func (s *constSet) add(pos src.XPos, n ir.Node, what, where string) { } } - if !ir.IsConstNode(n) { + if !ir.IsConstNode(n) || n.Type() == nil { return } if n.Type().IsUntyped() { diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go index 339fb00aa44..24d141e8a2c 100644 --- a/src/cmd/compile/internal/typecheck/expr.go +++ b/src/cmd/compile/internal/typecheck/expr.go @@ -48,7 +48,7 @@ func tcAddr(n *ir.AddrExpr) ir.Node { } func tcShift(n, l, r ir.Node) (ir.Node, ir.Node, *types.Type) { - if l.Type() == nil || l.Type() == nil { + if l.Type() == nil || r.Type() == nil { return l, r, nil } @@ -77,6 +77,10 @@ func tcShift(n, l, r ir.Node) (ir.Node, ir.Node, *types.Type) { return l, r, t } +func IsCmp(op ir.Op) bool { + return iscmp[op] +} + // tcArith typechecks operands of a binary arithmetic expression. // The result of tcArith MUST be assigned back to original operands, // t is the type of the expression, and should be set by the caller. e.g: @@ -102,7 +106,7 @@ func tcArith(n ir.Node, op ir.Op, l, r ir.Node) (ir.Node, ir.Node, *types.Type) // The conversion allocates, so only do it if the concrete type is huge. converted := false if r.Type().Kind() != types.TBLANK { - aop, _ = assignop(l.Type(), r.Type()) + aop, _ = Assignop(l.Type(), r.Type()) if aop != ir.OXXX { if r.Type().IsInterface() && !l.Type().IsInterface() && !types.IsComparable(l.Type()) { base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type())) @@ -121,7 +125,7 @@ func tcArith(n ir.Node, op ir.Op, l, r ir.Node) (ir.Node, ir.Node, *types.Type) } if !converted && l.Type().Kind() != types.TBLANK { - aop, _ = assignop(r.Type(), l.Type()) + aop, _ = Assignop(r.Type(), l.Type()) if aop != ir.OXXX { if l.Type().IsInterface() && !r.Type().IsInterface() && !types.IsComparable(r.Type()) { base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type())) @@ -362,9 +366,9 @@ func tcCompLit(n *ir.CompLitExpr) (res ir.Node) { } l := l.(*ir.StructKeyExpr) - f := lookdot1(nil, l.Field, t, t.Fields(), 0) + f := Lookdot1(nil, l.Field, t, t.Fields(), 0) if f == nil { - if ci := lookdot1(nil, l.Field, t, t.Fields(), 2); ci != nil { // Case-insensitive lookup. + if ci := Lookdot1(nil, l.Field, t, t.Fields(), 2); ci != nil { // Case-insensitive lookup. if visible(ci.Sym) { base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", l.Field, t, ci.Sym) } else if nonexported(l.Field) && l.Field.Name == ci.Sym.Name { // Ensure exactness before the suggestion. @@ -415,7 +419,7 @@ func tcConv(n *ir.ConvExpr) ir.Node { n.SetType(nil) return n } - op, why := convertop(n.X.Op() == ir.OLITERAL, t, n.Type()) + op, why := Convertop(n.X.Op() == ir.OLITERAL, t, n.Type()) if op == ir.OXXX { if !n.Diag() && !n.Type().Broke() && !n.X.Diag() { base.Errorf("cannot convert %L to type %v%s", n.X, n.Type(), why) @@ -492,7 +496,7 @@ func tcDot(n *ir.SelectorExpr, top int) ir.Node { return n } - if lookdot(n, t, 0) == nil { + if Lookdot(n, t, 0) == nil { // Legitimate field or method lookup failed, try to explain the error switch { case t.IsEmptyInterface(): @@ -502,12 +506,12 @@ func tcDot(n *ir.SelectorExpr, top int) ir.Node { // Pointer to interface is almost always a mistake. base.Errorf("%v undefined (type %v is pointer to interface, not interface)", n, n.X.Type()) - case lookdot(n, t, 1) != nil: + case Lookdot(n, t, 1) != nil: // Field or method matches by name, but it is not exported. base.Errorf("%v undefined (cannot refer to unexported field or method %v)", n, n.Sel) default: - if mt := lookdot(n, t, 2); mt != nil && visible(mt.Sym) { // Case-insensitive lookup. + if mt := Lookdot(n, t, 2); mt != nil && visible(mt.Sym) { // Case-insensitive lookup. base.Errorf("%v undefined (type %v has no field or method %v, but does have %v)", n, n.X.Type(), n.Sel, mt.Sym) } else { base.Errorf("%v undefined (type %v has no field or method %v)", n, n.X.Type(), n.Sel) diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index 6e2354c2810..f381e1dbdc4 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -105,9 +105,13 @@ func PartialCallType(n *ir.SelectorExpr) *types.Type { // typechecking an inline body, as opposed to the body of a real function. var inTypeCheckInl bool -// Lazy typechecking of imported bodies. For local functions, CanInline will set ->typecheck -// because they're a copy of an already checked body. +// ImportedBody returns immediately if the inlining information for fn is +// populated. Otherwise, fn must be an imported function. If so, ImportedBody +// loads in the dcls and body for fn, and typechecks as needed. func ImportedBody(fn *ir.Func) { + if fn.Inl.Body != nil { + return + } lno := ir.SetPos(fn.Nname) // When we load an inlined body, we need to allow OADDR @@ -141,23 +145,18 @@ func ImportedBody(fn *ir.Func) { fmt.Printf("typecheck import [%v] %L { %v }\n", fn.Sym(), fn, ir.Nodes(fn.Inl.Body)) } - savefn := ir.CurFunc - ir.CurFunc = fn - if inTypeCheckInl { - base.Fatalf("inTypeCheckInl should not be set recursively") + if !go117ExportTypes { + // If we didn't export & import types, typecheck the code here. + savefn := ir.CurFunc + ir.CurFunc = fn + if inTypeCheckInl { + base.Fatalf("inTypeCheckInl should not be set recursively") + } + inTypeCheckInl = true + Stmts(fn.Inl.Body) + inTypeCheckInl = false + ir.CurFunc = savefn } - inTypeCheckInl = true - Stmts(fn.Inl.Body) - inTypeCheckInl = false - ir.CurFunc = savefn - - // During ImportBody (which imports fn.Func.Inl.Body), - // declarations are added to fn.Func.Dcl by funcBody(). Move them - // to fn.Func.Inl.Dcl for consistency with how local functions - // behave. (Append because ImportedBody may be called multiple - // times on same fn.) - fn.Inl.Dcl = append(fn.Inl.Dcl, fn.Dcl...) - fn.Dcl = nil base.Pos = lno } @@ -182,7 +181,7 @@ func fnpkg(fn *ir.Name) *types.Pkg { return fn.Sym().Pkg } -// closurename generates a new unique name for a closure within +// ClosureName generates a new unique name for a closure within // outerfunc. func ClosureName(outerfunc *ir.Func) *types.Sym { outer := "glob." @@ -313,7 +312,7 @@ func tcClosure(clo *ir.ClosureExpr, top int) { return } - // Don't give a name and add to xtop if we are typechecking an inlined + // Don't give a name and add to Target.Decls if we are typechecking an inlined // body in ImportedBody(), since we only want to create the named function // when the closure is actually inlined (and then we force a typecheck // explicitly in (*inlsubst).node()). @@ -359,7 +358,7 @@ func tcClosure(clo *ir.ClosureExpr, top int) { ir.Dump(s, fn) } if !inTypeCheckInl { - // Add function to xtop once only when we give it a name + // Add function to Target.Decls once only when we give it a name Target.Decls = append(Target.Decls, fn) } } @@ -432,7 +431,7 @@ func tcCall(n *ir.CallExpr, top int) ir.Node { u := ir.NewUnaryExpr(n.Pos(), l.BuiltinOp, arg) return typecheck(ir.InitExpr(n.Init(), u), top) // typecheckargs can add to old.Init - case ir.OCOMPLEX, ir.OCOPY: + case ir.OCOMPLEX, ir.OCOPY, ir.OUNSAFEADD, ir.OUNSAFESLICE: typecheckargs(n) arg1, arg2, ok := needTwoArgs(n) if !ok { @@ -464,7 +463,7 @@ func tcCall(n *ir.CallExpr, top int) ir.Node { n := ir.NewConvExpr(n.Pos(), ir.OCONV, nil, arg) n.SetType(l.Type()) - return typecheck1(n, top) + return tcConv(n) } typecheckargs(n) @@ -979,3 +978,39 @@ func tcRecover(n *ir.CallExpr) ir.Node { n.SetType(types.Types[types.TINTER]) return n } + +// tcUnsafeAdd typechecks an OUNSAFEADD node. +func tcUnsafeAdd(n *ir.BinaryExpr) *ir.BinaryExpr { + n.X = AssignConv(Expr(n.X), types.Types[types.TUNSAFEPTR], "argument to unsafe.Add") + n.Y = DefaultLit(Expr(n.Y), types.Types[types.TINT]) + if n.X.Type() == nil || n.Y.Type() == nil { + n.SetType(nil) + return n + } + if !n.Y.Type().IsInteger() { + n.SetType(nil) + return n + } + n.SetType(n.X.Type()) + return n +} + +// tcUnsafeSlice typechecks an OUNSAFESLICE node. +func tcUnsafeSlice(n *ir.BinaryExpr) *ir.BinaryExpr { + n.X = Expr(n.X) + n.Y = Expr(n.Y) + if n.X.Type() == nil || n.Y.Type() == nil { + n.SetType(nil) + return n + } + t := n.X.Type() + if !t.IsPtr() { + base.Errorf("first argument to unsafe.Slice must be pointer; have %L", t) + } + if !checkunsafeslice(&n.Y) { + n.SetType(nil) + return n + } + n.SetType(types.NewSlice(t.Elem())) + return n +} diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index 6fab74e61fe..64d68ef6255 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -246,6 +246,11 @@ const ( interfaceType ) +const ( + debug = false + magic = 0x6742937dc293105 +) + func WriteExports(out *bufio.Writer) { p := iexporter{ allPkgs: map[*types.Pkg]bool{}, @@ -598,9 +603,9 @@ func (w *exportWriter) pkg(pkg *types.Pkg) { w.string(pkg.Path) } -func (w *exportWriter) qualifiedIdent(n ir.Node) { +func (w *exportWriter) qualifiedIdent(n *ir.Name) { // Ensure any referenced declarations are written out too. - w.p.pushDecl(n.Name()) + w.p.pushDecl(n) s := n.Sym() w.string(s.Name) @@ -612,6 +617,11 @@ func (w *exportWriter) selector(s *types.Sym) { base.Fatalf("missing currPkg") } + // If the selector being written is unexported, it comes with a package qualifier. + // If the selector being written is exported, it is not package-qualified. + // See the spec: https://golang.org/ref/spec#Uniqueness_of_identifiers + // As an optimization, we don't actually write the package every time - instead we + // call setPkg before a group of selectors (all of which must have the same package qualifier). pkg := w.currPkg if types.IsExported(s.Name) { pkg = types.LocalPkg @@ -627,6 +637,142 @@ func (w *exportWriter) typ(t *types.Type) { w.data.uint64(w.p.typOff(t)) } +// The "exotic" functions in this section encode a wider range of +// items than the standard encoding functions above. These include +// types that do not appear in declarations, only in code, such as +// method types. These methods need to be separate from the standard +// encoding functions because we don't want to modify the encoding +// generated by the standard functions (because that exported +// information is read by tools besides the compiler). + +// exoticType exports a type to the writer. +func (w *exportWriter) exoticType(t *types.Type) { + switch { + case t == nil: + // Calls-as-statements have no type. + w.data.uint64(exoticTypeNil) + case t.IsStruct() && t.StructType().Funarg != types.FunargNone: + // These are weird structs for representing tuples of types returned + // by multi-return functions. + // They don't fit the standard struct type mold. For instance, + // they don't have any package info. + w.data.uint64(exoticTypeTuple) + w.uint64(uint64(t.StructType().Funarg)) + w.uint64(uint64(t.NumFields())) + for _, f := range t.FieldSlice() { + w.pos(f.Pos) + s := f.Sym + if s == nil { + w.uint64(0) + } else if s.Pkg == nil { + w.uint64(exoticTypeSymNoPkg) + w.string(s.Name) + } else { + w.uint64(exoticTypeSymWithPkg) + w.pkg(s.Pkg) + w.string(s.Name) + } + w.typ(f.Type) + if f.Embedded != 0 || f.Note != "" { + panic("extra info in funarg struct field") + } + } + case t.Kind() == types.TFUNC && t.Recv() != nil: + w.data.uint64(exoticTypeRecv) + // interface method types have a fake receiver type. + isFakeRecv := t.Recv().Type == types.FakeRecvType() + w.bool(isFakeRecv) + if !isFakeRecv { + w.exoticParam(t.Recv()) + } + w.exoticSignature(t) + + default: + // A regular type. + w.data.uint64(exoticTypeRegular) + w.typ(t) + } +} + +const ( + exoticTypeNil = iota + exoticTypeTuple + exoticTypeRecv + exoticTypeRegular +) +const ( + exoticTypeSymNil = iota + exoticTypeSymNoPkg + exoticTypeSymWithPkg +) + +// Export a selector, but one whose package may not match +// the package being compiled. This is a separate function +// because the standard selector() serialization format is fixed +// by the go/types reader. This one can only be used during +// inline/generic body exporting. +func (w *exportWriter) exoticSelector(s *types.Sym) { + pkg := w.currPkg + if types.IsExported(s.Name) { + pkg = types.LocalPkg + } + + w.string(s.Name) + if s.Pkg == pkg { + w.uint64(0) + } else { + w.uint64(1) + w.pkg(s.Pkg) + } +} + +func (w *exportWriter) exoticSignature(t *types.Type) { + hasPkg := t.Pkg() != nil + w.bool(hasPkg) + if hasPkg { + w.pkg(t.Pkg()) + } + w.exoticParamList(t.Params().FieldSlice()) + w.exoticParamList(t.Results().FieldSlice()) +} + +func (w *exportWriter) exoticParamList(fs []*types.Field) { + w.uint64(uint64(len(fs))) + for _, f := range fs { + w.exoticParam(f) + } + +} +func (w *exportWriter) exoticParam(f *types.Field) { + w.pos(f.Pos) + w.exoticSym(f.Sym) + w.uint64(uint64(f.Offset)) + w.exoticType(f.Type) + w.bool(f.IsDDD()) +} + +func (w *exportWriter) exoticField(f *types.Field) { + w.pos(f.Pos) + w.exoticSym(f.Sym) + w.uint64(uint64(f.Offset)) + w.exoticType(f.Type) + w.string(f.Note) +} + +func (w *exportWriter) exoticSym(s *types.Sym) { + if s == nil { + w.string("") + return + } + if s.Name == "" { + base.Fatalf("empty symbol name") + } + w.string(s.Name) + if !types.IsExported(s.Name) { + w.pkg(s.Pkg) + } +} + func (p *iexporter) newWriter() *exportWriter { return &exportWriter{p: p} } @@ -1025,7 +1171,11 @@ func (w *exportWriter) funcExt(n *ir.Name) { w.linkname(n.Sym()) w.symIdx(n.Sym()) - // TODO remove after register abi is working. + // Record definition ABI so cross-ABI calls can be direct. + // This is important for the performance of calling some + // common functions implemented in assembly (e.g., bytealg). + w.uint64(uint64(n.Func.ABI)) + w.uint64(uint64(n.Func.Pragma)) // Escape analysis. @@ -1099,6 +1249,7 @@ func (w *exportWriter) writeNames(dcl []*ir.Name) { } func (w *exportWriter) funcBody(fn *ir.Func) { + //fmt.Printf("Exporting %s\n", fn.Nname.Sym().Name) w.writeNames(fn.Inl.Dcl) w.stmtList(fn.Inl.Body) @@ -1174,7 +1325,11 @@ func (w *exportWriter) stmt(n ir.Node) { case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: n := n.(*ir.AssignListStmt) - w.op(ir.OAS2) + if go117ExportTypes { + w.op(n.Op()) + } else { + w.op(ir.OAS2) + } w.pos(n.Pos()) w.exprList(n.Lhs) w.exprList(n.Rhs) @@ -1186,7 +1341,7 @@ func (w *exportWriter) stmt(n ir.Node) { w.exprList(n.Results) // case ORETJMP: - // unreachable - generated by compiler for trampolin routines + // unreachable - generated by compiler for trampoline routines case ir.OGO, ir.ODEFER: n := n.(*ir.GoDeferStmt) @@ -1294,21 +1449,6 @@ func simplifyForExport(n ir.Node) ir.Node { case ir.OPAREN: n := n.(*ir.ParenExpr) return simplifyForExport(n.X) - case ir.ODEREF: - n := n.(*ir.StarExpr) - if n.Implicit() { - return simplifyForExport(n.X) - } - case ir.OADDR: - n := n.(*ir.AddrExpr) - if n.Implicit() { - return simplifyForExport(n.X) - } - case ir.ODOT, ir.ODOTPTR: - n := n.(*ir.SelectorExpr) - if n.Implicit() { - return simplifyForExport(n.X) - } } return n } @@ -1338,10 +1478,15 @@ func (w *exportWriter) expr(n ir.Node) { if (n.Class == ir.PEXTERN || n.Class == ir.PFUNC) && !ir.IsBlank(n) { w.op(ir.ONONAME) w.qualifiedIdent(n) + if go117ExportTypes { + w.typ(n.Type()) + } break } // Function scope name. + // We don't need a type here, as the type will be provided at the + // declaration of n. w.op(ir.ONAME) w.localName(n) @@ -1399,9 +1544,16 @@ func (w *exportWriter) expr(n ir.Node) { case ir.OPTRLIT: n := n.(*ir.AddrExpr) - w.op(ir.OADDR) + if go117ExportTypes { + w.op(ir.OPTRLIT) + } else { + w.op(ir.OADDR) + } w.pos(n.Pos()) w.expr(n.X) + if go117ExportTypes { + w.typ(n.Type()) + } case ir.OSTRUCTLIT: n := n.(*ir.CompLitExpr) @@ -1412,11 +1564,17 @@ func (w *exportWriter) expr(n ir.Node) { case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT: n := n.(*ir.CompLitExpr) - w.op(ir.OCOMPLIT) + if go117ExportTypes { + w.op(n.Op()) + } else { + w.op(ir.OCOMPLIT) + } w.pos(n.Pos()) w.typ(n.Type()) w.exprList(n.List) - + if go117ExportTypes && n.Op() == ir.OSLICELIT { + w.uint64(uint64(n.Len)) + } case ir.OKEY: n := n.(*ir.KeyExpr) w.op(ir.OKEY) @@ -1429,52 +1587,103 @@ func (w *exportWriter) expr(n ir.Node) { case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR: n := n.(*ir.SelectorExpr) - w.op(ir.OXDOT) + if go117ExportTypes { + if n.Op() == ir.OXDOT { + base.Fatalf("shouldn't encounter XDOT in new exporter") + } + w.op(n.Op()) + } else { + w.op(ir.OXDOT) + } w.pos(n.Pos()) w.expr(n.X) - w.selector(n.Sel) + w.exoticSelector(n.Sel) + if go117ExportTypes { + w.exoticType(n.Type()) + if n.Op() == ir.ODOT || n.Op() == ir.ODOTPTR || n.Op() == ir.ODOTINTER { + w.exoticField(n.Selection) + } + // n.Selection is not required for OMETHEXPR, ODOTMETH, and OCALLPART. It will + // be reconstructed during import. + } case ir.ODOTTYPE, ir.ODOTTYPE2: n := n.(*ir.TypeAssertExpr) - w.op(ir.ODOTTYPE) + if go117ExportTypes { + w.op(n.Op()) + } else { + w.op(ir.ODOTTYPE) + } w.pos(n.Pos()) w.expr(n.X) w.typ(n.Type()) case ir.OINDEX, ir.OINDEXMAP: n := n.(*ir.IndexExpr) - w.op(ir.OINDEX) + if go117ExportTypes { + w.op(n.Op()) + } else { + w.op(ir.OINDEX) + } w.pos(n.Pos()) w.expr(n.X) w.expr(n.Index) + if go117ExportTypes { + w.typ(n.Type()) + if n.Op() == ir.OINDEXMAP { + w.bool(n.Assigned) + } + } case ir.OSLICE, ir.OSLICESTR, ir.OSLICEARR: n := n.(*ir.SliceExpr) - w.op(ir.OSLICE) + if go117ExportTypes { + w.op(n.Op()) + } else { + w.op(ir.OSLICE) + } w.pos(n.Pos()) w.expr(n.X) w.exprsOrNil(n.Low, n.High) + if go117ExportTypes { + w.typ(n.Type()) + } case ir.OSLICE3, ir.OSLICE3ARR: n := n.(*ir.SliceExpr) - w.op(ir.OSLICE3) + if go117ExportTypes { + w.op(n.Op()) + } else { + w.op(ir.OSLICE3) + } w.pos(n.Pos()) w.expr(n.X) w.exprsOrNil(n.Low, n.High) w.expr(n.Max) + if go117ExportTypes { + w.typ(n.Type()) + } - case ir.OCOPY, ir.OCOMPLEX: + case ir.OCOPY, ir.OCOMPLEX, ir.OUNSAFEADD, ir.OUNSAFESLICE: // treated like other builtin calls (see e.g., OREAL) n := n.(*ir.BinaryExpr) w.op(n.Op()) w.pos(n.Pos()) w.expr(n.X) w.expr(n.Y) - w.op(ir.OEND) + if go117ExportTypes { + w.typ(n.Type()) + } else { + w.op(ir.OEND) + } - case ir.OCONV, ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR: + case ir.OCONV, ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR, ir.OSLICE2ARRPTR: n := n.(*ir.ConvExpr) - w.op(ir.OCONV) + if go117ExportTypes { + w.op(n.Op()) + } else { + w.op(ir.OCONV) + } w.pos(n.Pos()) w.typ(n.Type()) w.expr(n.X) @@ -1484,7 +1693,13 @@ func (w *exportWriter) expr(n ir.Node) { w.op(n.Op()) w.pos(n.Pos()) w.expr(n.X) - w.op(ir.OEND) + if go117ExportTypes { + if n.Op() != ir.OPANIC { + w.typ(n.Type()) + } + } else { + w.op(ir.OEND) + } case ir.OAPPEND, ir.ODELETE, ir.ORECOVER, ir.OPRINT, ir.OPRINTN: n := n.(*ir.CallExpr) @@ -1497,15 +1712,28 @@ func (w *exportWriter) expr(n ir.Node) { } else if n.IsDDD { base.Fatalf("exporter: unexpected '...' with %v call", n.Op()) } + if go117ExportTypes { + if n.Op() != ir.ODELETE && n.Op() != ir.OPRINT && n.Op() != ir.OPRINTN { + w.typ(n.Type()) + } + } case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OGETG: n := n.(*ir.CallExpr) - w.op(ir.OCALL) + if go117ExportTypes { + w.op(n.Op()) + } else { + w.op(ir.OCALL) + } w.pos(n.Pos()) w.stmtList(n.Init()) w.expr(n.X) w.exprList(n.Args) w.bool(n.IsDDD) + if go117ExportTypes { + w.exoticType(n.Type()) + w.uint64(uint64(n.Use)) + } case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE: n := n.(*ir.MakeExpr) @@ -1521,6 +1749,12 @@ func (w *exportWriter) expr(n ir.Node) { w.expr(n.Cap) w.op(ir.OEND) case n.Len != nil && (n.Op() == ir.OMAKESLICE || !n.Len.Type().IsUntyped()): + // Note: the extra conditional exists because make(T) for + // T a map or chan type, gets an untyped zero added as + // an argument. Don't serialize that argument here. + w.expr(n.Len) + w.op(ir.OEND) + case n.Len != nil && go117ExportTypes: w.expr(n.Len) w.op(ir.OEND) } @@ -1531,18 +1765,27 @@ func (w *exportWriter) expr(n ir.Node) { w.op(n.Op()) w.pos(n.Pos()) w.expr(n.X) + if go117ExportTypes { + w.typ(n.Type()) + } case ir.OADDR: n := n.(*ir.AddrExpr) w.op(n.Op()) w.pos(n.Pos()) w.expr(n.X) + if go117ExportTypes { + w.typ(n.Type()) + } case ir.ODEREF: n := n.(*ir.StarExpr) w.op(n.Op()) w.pos(n.Pos()) w.expr(n.X) + if go117ExportTypes { + w.typ(n.Type()) + } case ir.OSEND: n := n.(*ir.SendStmt) @@ -1559,6 +1802,9 @@ func (w *exportWriter) expr(n ir.Node) { w.pos(n.Pos()) w.expr(n.X) w.expr(n.Y) + if go117ExportTypes { + w.typ(n.Type()) + } case ir.OANDAND, ir.OOROR: n := n.(*ir.LogicalExpr) @@ -1566,12 +1812,18 @@ func (w *exportWriter) expr(n ir.Node) { w.pos(n.Pos()) w.expr(n.X) w.expr(n.Y) + if go117ExportTypes { + w.typ(n.Type()) + } case ir.OADDSTR: n := n.(*ir.AddStringExpr) w.op(ir.OADDSTR) w.pos(n.Pos()) w.exprList(n.List) + if go117ExportTypes { + w.typ(n.Type()) + } case ir.ODCLCONST: // if exporting, DCLCONST should just be removed as its usage @@ -1584,6 +1836,9 @@ func (w *exportWriter) expr(n ir.Node) { } func (w *exportWriter) op(op ir.Op) { + if debug { + w.uint64(magic) + } w.uint64(uint64(op)) } @@ -1611,6 +1866,9 @@ func (w *exportWriter) fieldList(list ir.Nodes) { w.pos(n.Pos()) w.selector(n.Field) w.expr(n.Value) + if go117ExportTypes { + w.uint64(uint64(n.Offset)) + } } } @@ -1671,3 +1929,13 @@ func (w *intWriter) uint64(x uint64) { n := binary.PutUvarint(buf[:], x) w.Write(buf[:n]) } + +// If go117ExportTypes is true, then we write type information when +// exporting function bodies, so those function bodies don't need to +// be re-typechecked on import. +// This flag adds some other info to the serialized stream as well +// which was previously recomputed during typechecking, like +// specializing opcodes (e.g. OXDOT to ODOTPTR) and ancillary +// information (e.g. length field for OSLICELIT). +const go117ExportTypes = true +const Go117ExportTypes = go117ExportTypes diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go index 7b5b113b158..a5ddbb5a74c 100644 --- a/src/cmd/compile/internal/typecheck/iimport.go +++ b/src/cmd/compile/internal/typecheck/iimport.go @@ -42,6 +42,9 @@ var ( inlineImporter = map[*types.Sym]iimporterAndOffset{} ) +// expandDecl returns immediately if n is already a Name node. Otherwise, n should +// be an Ident node, and expandDecl reads in the definition of the specified +// identifier from the appropriate package. func expandDecl(n ir.Node) ir.Node { if n, ok := n.(*ir.Name); ok { return n @@ -61,9 +64,11 @@ func expandDecl(n ir.Node) ir.Node { return r.doDecl(n.Sym()) } +// ImportBody reads in the dcls and body of an imported function (which should not +// yet have been read in). func ImportBody(fn *ir.Func) { if fn.Inl.Body != nil { - return + base.Fatalf("%v already has inline body", fn) } r := importReaderFor(fn.Nname.Sym(), inlineImporter) @@ -501,6 +506,129 @@ func (r *importReader) typ() *types.Type { return r.p.typAt(r.uint64()) } +func (r *importReader) exoticType() *types.Type { + switch r.uint64() { + case exoticTypeNil: + return nil + case exoticTypeTuple: + funarg := types.Funarg(r.uint64()) + n := r.uint64() + fs := make([]*types.Field, n) + for i := range fs { + pos := r.pos() + var sym *types.Sym + switch r.uint64() { + case exoticTypeSymNil: + sym = nil + case exoticTypeSymNoPkg: + sym = types.NoPkg.Lookup(r.string()) + case exoticTypeSymWithPkg: + pkg := r.pkg() + sym = pkg.Lookup(r.string()) + default: + base.Fatalf("unknown symbol kind") + } + typ := r.typ() + f := types.NewField(pos, sym, typ) + fs[i] = f + } + t := types.NewStruct(types.NoPkg, fs) + t.StructType().Funarg = funarg + return t + case exoticTypeRecv: + var rcvr *types.Field + if r.bool() { // isFakeRecv + rcvr = fakeRecvField() + } else { + rcvr = r.exoticParam() + } + return r.exoticSignature(rcvr) + case exoticTypeRegular: + return r.typ() + default: + base.Fatalf("bad kind of call type") + return nil + } +} + +func (r *importReader) exoticSelector() *types.Sym { + name := r.string() + if name == "" { + return nil + } + pkg := r.currPkg + if types.IsExported(name) { + pkg = types.LocalPkg + } + if r.uint64() != 0 { + pkg = r.pkg() + } + return pkg.Lookup(name) +} + +func (r *importReader) exoticSignature(recv *types.Field) *types.Type { + var pkg *types.Pkg + if r.bool() { // hasPkg + pkg = r.pkg() + } + params := r.exoticParamList() + results := r.exoticParamList() + return types.NewSignature(pkg, recv, nil, params, results) +} + +func (r *importReader) exoticParamList() []*types.Field { + n := r.uint64() + fs := make([]*types.Field, n) + for i := range fs { + fs[i] = r.exoticParam() + } + return fs +} + +func (r *importReader) exoticParam() *types.Field { + pos := r.pos() + sym := r.exoticSym() + off := r.uint64() + typ := r.exoticType() + ddd := r.bool() + f := types.NewField(pos, sym, typ) + f.Offset = int64(off) + if sym != nil { + f.Nname = ir.NewNameAt(pos, sym) + } + f.SetIsDDD(ddd) + return f +} + +func (r *importReader) exoticField() *types.Field { + pos := r.pos() + sym := r.exoticSym() + off := r.uint64() + typ := r.exoticType() + note := r.string() + f := types.NewField(pos, sym, typ) + f.Offset = int64(off) + if sym != nil { + f.Nname = ir.NewNameAt(pos, sym) + } + f.Note = note + return f +} + +func (r *importReader) exoticSym() *types.Sym { + name := r.string() + if name == "" { + return nil + } + var pkg *types.Pkg + if types.IsExported(name) { + pkg = types.LocalPkg + } else { + pkg = r.pkg() + } + return pkg.Lookup(name) +} + func (p *iimporter) typAt(off uint64) *types.Type { t, ok := p.typCache[off] if !ok { @@ -508,6 +636,12 @@ func (p *iimporter) typAt(off uint64) *types.Type { base.Fatalf("predeclared type missing from cache: %d", off) } t = p.newReader(off-predeclReserved, nil).typ1() + // Ensure size is calculated for imported types. Since CL 283313, the compiler + // does not compile the function immediately when it sees them. Instead, funtions + // are pushed to compile queue, then draining from the queue for compiling. + // During this process, the size calculation is disabled, so it is not safe for + // calculating size during SSA generation anymore. See issue #44732. + types.CheckSize(t) p.typCache[off] = t } return t @@ -673,7 +807,8 @@ func (r *importReader) funcExt(n *ir.Name) { r.linkname(n.Sym()) r.symIdx(n.Sym()) - // TODO remove after register abi is working + n.Func.ABI = obj.ABI(r.uint64()) + n.SetPragma(ir.PragmaFlag(r.uint64())) // Escape analysis. @@ -747,7 +882,7 @@ func (r *importReader) doInline(fn *ir.Func) { base.Fatalf("%v already has inline body", fn) } - //fmt.Printf("Importing %v\n", n) + //fmt.Printf("Importing %s\n", fn.Nname.Sym().Name) r.funcBody(fn) importlist = append(importlist, fn) @@ -793,6 +928,11 @@ func (r *importReader) funcBody(fn *ir.Func) { // functions). body = []ir.Node{} } + if go117ExportTypes { + ir.VisitList(body, func(n ir.Node) { + n.SetTypecheck(1) + }) + } fn.Inl.Body = body r.curfn = outerfn @@ -920,7 +1060,8 @@ func (r *importReader) expr() ir.Node { // TODO(gri) split into expr and stmt func (r *importReader) node() ir.Node { - switch op := r.op(); op { + op := r.op() + switch op { // expressions // case OPAREN: // unreachable - unpacked by exporter @@ -942,7 +1083,16 @@ func (r *importReader) node() ir.Node { return n case ir.ONONAME: - return r.qualifiedIdent() + n := r.qualifiedIdent() + if go117ExportTypes { + n2 := Resolve(n) + typ := r.typ() + if n2.Type() == nil { + n2.SetType(typ) + } + return n2 + } + return n case ir.ONAME: return r.localName() @@ -982,79 +1132,174 @@ func (r *importReader) node() ir.Node { cvars := make([]*ir.Name, r.int64()) for i := range cvars { cvars[i] = ir.CaptureName(r.pos(), fn, r.localName().Canonical()) + if go117ExportTypes { + if cvars[i].Type() != nil || cvars[i].Defn == nil { + base.Fatalf("bad import of closure variable") + } + // Closure variable should have Defn set, which is its captured + // variable, and it gets the same type as the captured variable. + cvars[i].SetType(cvars[i].Defn.Type()) + } } fn.ClosureVars = cvars r.allClosureVars = append(r.allClosureVars, cvars...) - fn.Dcl = r.readFuncDcls(fn) - body := r.stmtList() + fn.Inl = &ir.Inline{} + // Read in the Dcls and Body of the closure after temporarily + // setting r.curfn to fn. + r.funcBody(fn) + fn.Dcl = fn.Inl.Dcl + fn.Body = fn.Inl.Body + if len(fn.Body) == 0 { + // An empty closure must be represented as a single empty + // block statement, else it will be dropped. + fn.Body = []ir.Node{ir.NewBlockStmt(src.NoXPos, nil)} + } + fn.Inl = nil + ir.FinishCaptureNames(pos, r.curfn, fn) clo := ir.NewClosureExpr(pos, fn) fn.OClosure = clo - - fn.Body = body + if go117ExportTypes { + clo.SetType(typ) + } return clo - // case OPTRLIT: - // unreachable - mapped to case OADDR below by exporter - case ir.OSTRUCTLIT: + if go117ExportTypes { + pos := r.pos() + typ := r.typ() + list := r.fieldList() + n := ir.NewCompLitExpr(pos, ir.OSTRUCTLIT, nil, list) + n.SetType(typ) + return n + } return ir.NewCompLitExpr(r.pos(), ir.OCOMPLIT, ir.TypeNode(r.typ()), r.fieldList()) - // case OARRAYLIT, OSLICELIT, OMAPLIT: - // unreachable - mapped to case OCOMPLIT below by exporter - case ir.OCOMPLIT: return ir.NewCompLitExpr(r.pos(), ir.OCOMPLIT, ir.TypeNode(r.typ()), r.exprList()) + case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT: + if !go117ExportTypes { + // unreachable - mapped to OCOMPLIT by exporter + goto error + } + pos := r.pos() + typ := r.typ() + list := r.exprList() + n := ir.NewCompLitExpr(pos, op, ir.TypeNode(typ), list) + n.SetType(typ) + if op == ir.OSLICELIT { + n.Len = int64(r.uint64()) + } + return n + case ir.OKEY: return ir.NewKeyExpr(r.pos(), r.expr(), r.expr()) // case OSTRUCTKEY: // unreachable - handled in case OSTRUCTLIT by elemList - // case OCALLPART: - // unreachable - mapped to case OXDOT below by exporter - - // case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH: - // unreachable - mapped to case OXDOT below by exporter - case ir.OXDOT: // see parser.new_dotname - return ir.NewSelectorExpr(r.pos(), ir.OXDOT, r.expr(), r.selector()) + if go117ExportTypes { + base.Fatalf("shouldn't encounter XDOT in new importer") + } + return ir.NewSelectorExpr(r.pos(), ir.OXDOT, r.expr(), r.exoticSelector()) - // case ODOTTYPE, ODOTTYPE2: - // unreachable - mapped to case ODOTTYPE below by exporter - - case ir.ODOTTYPE: - n := ir.NewTypeAssertExpr(r.pos(), r.expr(), nil) - n.SetType(r.typ()) + case ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR: + if !go117ExportTypes { + // unreachable - mapped to case OXDOT by exporter + goto error + } + pos := r.pos() + expr := r.expr() + sel := r.exoticSelector() + n := ir.NewSelectorExpr(pos, op, expr, sel) + n.SetType(r.exoticType()) + switch op { + case ir.ODOT, ir.ODOTPTR, ir.ODOTINTER: + n.Selection = r.exoticField() + case ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR: + // These require a Lookup to link to the correct declaration. + rcvrType := expr.Type() + typ := n.Type() + n.Selection = Lookdot(n, rcvrType, 1) + if op == ir.OCALLPART || op == ir.OMETHEXPR { + // Lookdot clobbers the opcode and type, undo that. + n.SetOp(op) + n.SetType(typ) + } + } return n - // case OINDEX, OINDEXMAP, OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR: - // unreachable - mapped to cases below by exporter + case ir.ODOTTYPE, ir.ODOTTYPE2: + n := ir.NewTypeAssertExpr(r.pos(), r.expr(), nil) + n.SetType(r.typ()) + if go117ExportTypes { + n.SetOp(op) + } + return n - case ir.OINDEX: - return ir.NewIndexExpr(r.pos(), r.expr(), r.expr()) + case ir.OINDEX, ir.OINDEXMAP: + n := ir.NewIndexExpr(r.pos(), r.expr(), r.expr()) + if go117ExportTypes { + n.SetOp(op) + n.SetType(r.typ()) + if op == ir.OINDEXMAP { + n.Assigned = r.bool() + } + } + return n - case ir.OSLICE, ir.OSLICE3: + case ir.OSLICE, ir.OSLICESTR, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR: pos, x := r.pos(), r.expr() low, high := r.exprsOrNil() var max ir.Node if op.IsSlice3() { max = r.expr() } - return ir.NewSliceExpr(pos, op, x, low, high, max) + n := ir.NewSliceExpr(pos, op, x, low, high, max) + if go117ExportTypes { + n.SetType(r.typ()) + } + return n - // case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, ORUNES2STR, OSTR2BYTES, OSTR2RUNES, ORUNESTR: - // unreachable - mapped to OCONV case below by exporter + case ir.OCONV, ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR, ir.OSLICE2ARRPTR: + if !go117ExportTypes && op != ir.OCONV { + // unreachable - mapped to OCONV case by exporter + goto error + } + return ir.NewConvExpr(r.pos(), op, r.typ(), r.expr()) - case ir.OCONV: - return ir.NewConvExpr(r.pos(), ir.OCONV, r.typ(), r.expr()) - - case ir.OCOPY, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN: + case ir.OCOPY, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN, ir.OUNSAFEADD, ir.OUNSAFESLICE: + if go117ExportTypes { + switch op { + case ir.OCOPY, ir.OCOMPLEX, ir.OUNSAFEADD, ir.OUNSAFESLICE: + n := ir.NewBinaryExpr(r.pos(), op, r.expr(), r.expr()) + n.SetType(r.typ()) + return n + case ir.OREAL, ir.OIMAG, ir.OCAP, ir.OCLOSE, ir.OLEN, ir.ONEW, ir.OPANIC: + n := ir.NewUnaryExpr(r.pos(), op, r.expr()) + if op != ir.OPANIC { + n.SetType(r.typ()) + } + return n + case ir.OAPPEND, ir.ODELETE, ir.ORECOVER, ir.OPRINT, ir.OPRINTN: + n := ir.NewCallExpr(r.pos(), op, nil, r.exprList()) + if op == ir.OAPPEND { + n.IsDDD = r.bool() + } + if op == ir.OAPPEND || op == ir.ORECOVER { + n.SetType(r.typ()) + } + return n + } + // ir.OMAKE + goto error + } n := builtinCall(r.pos(), op) n.Args = r.exprList() if op == ir.OAPPEND { @@ -1062,18 +1307,37 @@ func (r *importReader) node() ir.Node { } return n - // case OCALLFUNC, OCALLMETH, OCALLINTER, OGETG: - // unreachable - mapped to OCALL case below by exporter - - case ir.OCALL: + case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OGETG: pos := r.pos() init := r.stmtList() n := ir.NewCallExpr(pos, ir.OCALL, r.expr(), r.exprList()) + if go117ExportTypes { + n.SetOp(op) + } *n.PtrInit() = init n.IsDDD = r.bool() + if go117ExportTypes { + n.SetType(r.exoticType()) + n.Use = ir.CallUse(r.uint64()) + } return n case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE: + if go117ExportTypes { + pos := r.pos() + typ := r.typ() + list := r.exprList() + var len_, cap_ ir.Node + if len(list) > 0 { + len_ = list[0] + } + if len(list) > 1 { + cap_ = list[1] + } + n := ir.NewMakeExpr(pos, op, len_, cap_) + n.SetType(typ) + return n + } n := builtinCall(r.pos(), ir.OMAKE) n.Args.Append(ir.TypeNode(r.typ())) n.Args.Append(r.exprList()...) @@ -1081,21 +1345,42 @@ func (r *importReader) node() ir.Node { // unary expressions case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV: - return ir.NewUnaryExpr(r.pos(), op, r.expr()) + n := ir.NewUnaryExpr(r.pos(), op, r.expr()) + if go117ExportTypes { + n.SetType(r.typ()) + } + return n - case ir.OADDR: - return NodAddrAt(r.pos(), r.expr()) + case ir.OADDR, ir.OPTRLIT: + n := NodAddrAt(r.pos(), r.expr()) + if go117ExportTypes { + n.SetOp(op) + n.SetType(r.typ()) + } + return n case ir.ODEREF: - return ir.NewStarExpr(r.pos(), r.expr()) + n := ir.NewStarExpr(r.pos(), r.expr()) + if go117ExportTypes { + n.SetType(r.typ()) + } + return n // binary expressions case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT, ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.ORSH, ir.OSUB, ir.OXOR: - return ir.NewBinaryExpr(r.pos(), op, r.expr(), r.expr()) + n := ir.NewBinaryExpr(r.pos(), op, r.expr(), r.expr()) + if go117ExportTypes { + n.SetType(r.typ()) + } + return n case ir.OANDAND, ir.OOROR: - return ir.NewLogicalExpr(r.pos(), op, r.expr(), r.expr()) + n := ir.NewLogicalExpr(r.pos(), op, r.expr(), r.expr()) + if go117ExportTypes { + n.SetType(r.typ()) + } + return n case ir.OSEND: return ir.NewSendStmt(r.pos(), r.expr(), r.expr()) @@ -1103,6 +1388,11 @@ func (r *importReader) node() ir.Node { case ir.OADDSTR: pos := r.pos() list := r.exprList() + if go117ExportTypes { + n := ir.NewAddStringExpr(pos, list) + n.SetType(r.typ()) + return n + } x := list[0] for _, y := range list[1:] { x = ir.NewBinaryExpr(pos, ir.OADD, x, y) @@ -1118,8 +1408,8 @@ func (r *importReader) node() ir.Node { stmts.Append(ir.NewAssignStmt(n.Pos(), n, nil)) return ir.NewBlockStmt(n.Pos(), stmts) - // case OAS, OASWB: - // unreachable - mapped to OAS case below by exporter + // case OASWB: + // unreachable - never exported case ir.OAS: return ir.NewAssignStmt(r.pos(), r.expr(), r.expr()) @@ -1134,11 +1424,12 @@ func (r *importReader) node() ir.Node { } return n - // case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: - // unreachable - mapped to OAS2 case below by exporter - - case ir.OAS2: - return ir.NewAssignListStmt(r.pos(), ir.OAS2, r.exprList(), r.exprList()) + case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: + if !go117ExportTypes && op != ir.OAS2 { + // unreachable - mapped to case OAS2 by exporter + goto error + } + return ir.NewAssignListStmt(r.pos(), op, r.exprList(), r.exprList()) case ir.ORETURN: return ir.NewReturnStmt(r.pos(), r.exprList()) @@ -1210,16 +1501,27 @@ func (r *importReader) node() ir.Node { "\t==> please file an issue and assign to gri@", op, int(op)) panic("unreachable") // satisfy compiler } +error: + base.Fatalf("cannot import %v (%d) node\n"+ + "\t==> please file an issue and assign to khr@", op, int(op)) + panic("unreachable") // satisfy compiler } func (r *importReader) op() ir.Op { + if debug && r.uint64() != magic { + base.Fatalf("import stream has desynchronized") + } return ir.Op(r.uint64()) } func (r *importReader) fieldList() []ir.Node { list := make([]ir.Node, r.uint64()) for i := range list { - list[i] = ir.NewStructKeyExpr(r.pos(), r.selector(), r.expr()) + x := ir.NewStructKeyExpr(r.pos(), r.selector(), r.expr()) + if go117ExportTypes { + x.Offset = int64(r.uint64()) + } + list[i] = x } return list } @@ -1236,5 +1538,9 @@ func (r *importReader) exprsOrNil() (a, b ir.Node) { } func builtinCall(pos src.XPos, op ir.Op) *ir.CallExpr { + if go117ExportTypes { + // These should all be encoded as direct ops, not OCALL. + base.Fatalf("builtinCall should not be invoked when types are included in inport/export") + } return ir.NewCallExpr(pos, ir.OCALL, ir.NewIdent(base.Pos, types.BuiltinPkg.Lookup(ir.OpNames[op])), nil) } diff --git a/src/cmd/compile/internal/typecheck/mapfile_mmap.go b/src/cmd/compile/internal/typecheck/mapfile_mmap.go index 2f3aa16decf..298b385bcb0 100644 --- a/src/cmd/compile/internal/typecheck/mapfile_mmap.go +++ b/src/cmd/compile/internal/typecheck/mapfile_mmap.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd // +build darwin dragonfly freebsd linux netbsd openbsd package typecheck diff --git a/src/cmd/compile/internal/typecheck/mapfile_read.go b/src/cmd/compile/internal/typecheck/mapfile_read.go index 4059f261d49..9637ab97abe 100644 --- a/src/cmd/compile/internal/typecheck/mapfile_read.go +++ b/src/cmd/compile/internal/typecheck/mapfile_read.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd // +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd package typecheck diff --git a/src/cmd/compile/internal/typecheck/mkbuiltin.go b/src/cmd/compile/internal/typecheck/mkbuiltin.go index bef510a578a..6dbd1869b3e 100644 --- a/src/cmd/compile/internal/typecheck/mkbuiltin.go +++ b/src/cmd/compile/internal/typecheck/mkbuiltin.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build ignore // +build ignore // Generate builtin.go from builtin/runtime.go. diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go index 14ed175be9c..175216f279c 100644 --- a/src/cmd/compile/internal/typecheck/stmt.go +++ b/src/cmd/compile/internal/typecheck/stmt.go @@ -74,7 +74,7 @@ func typecheckrangeExpr(n *ir.RangeStmt) { if ir.DeclaredBy(nn, n) { nn.SetType(t) } else if nn.Type() != nil { - if op, why := assignop(t, nn.Type()); op == ir.OXXX { + if op, why := Assignop(t, nn.Type()); op == ir.OXXX { base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t, nn, why) } } @@ -519,8 +519,8 @@ func tcSwitchExpr(n *ir.SwitchStmt) { } else if t.IsInterface() && !n1.Type().IsInterface() && !types.IsComparable(n1.Type()) { base.ErrorfAt(ncase.Pos(), "invalid case %L in switch (incomparable type)", n1) } else { - op1, _ := assignop(n1.Type(), t) - op2, _ := assignop(t, n1.Type()) + op1, _ := Assignop(n1.Type(), t) + op2, _ := Assignop(t, n1.Type()) if op1 == ir.OXXX && op2 == ir.OXXX { if n.Tag != nil { base.ErrorfAt(ncase.Pos(), "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Tag, n1.Type(), t) diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go index b88a9f22839..9ee7a94b1f2 100644 --- a/src/cmd/compile/internal/typecheck/subr.go +++ b/src/cmd/compile/internal/typecheck/subr.go @@ -106,7 +106,8 @@ var DirtyAddrtaken = false func ComputeAddrtaken(top []ir.Node) { for _, n := range top { - ir.Visit(n, func(n ir.Node) { + var doVisit func(n ir.Node) + doVisit = func(n ir.Node) { if n.Op() == ir.OADDR { if x := ir.OuterValue(n.(*ir.AddrExpr).X); x.Op() == ir.ONAME { x.Name().SetAddrtaken(true) @@ -117,7 +118,11 @@ func ComputeAddrtaken(top []ir.Node) { } } } - }) + if n.Op() == ir.OCLOSURE { + ir.VisitList(n.(*ir.ClosureExpr).Func.Body, doVisit) + } + } + ir.Visit(n, doVisit) } } @@ -216,7 +221,7 @@ func CalcMethods(t *types.Type) { ms = append(ms, t.Methods().Slice()...) sort.Sort(types.MethodsByName(ms)) - t.AllMethods().Set(ms) + t.SetAllMethods(ms) } // adddot1 returns the number of fields or methods named s at depth d in Type t. @@ -252,7 +257,13 @@ func adddot1(s *types.Sym, t *types.Type, d int, save **types.Field, ignorecase return c, false } - for _, f := range u.Fields().Slice() { + var fields *types.Fields + if u.IsStruct() { + fields = u.Fields() + } else { + fields = u.AllMethods() + } + for _, f := range fields.Slice() { if f.Embedded == 0 || f.Sym == nil { continue } @@ -312,7 +323,7 @@ func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node { return n } - op, why := assignop(n.Type(), t) + op, why := Assignop(n.Type(), t) if op == ir.OXXX { base.Errorf("cannot use %L as type %v in %s%s", n, t, context(), why) op = ir.OCONV @@ -328,7 +339,7 @@ func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node { // If so, return op code to use in conversion. // If not, return OXXX. In this case, the string return parameter may // hold a reason why. In all other cases, it'll be the empty string. -func assignop(src, dst *types.Type) (ir.Op, string) { +func Assignop(src, dst *types.Type) (ir.Op, string) { if src == dst { return ir.OCONVNOP, "" } @@ -455,7 +466,7 @@ func assignop(src, dst *types.Type) (ir.Op, string) { // If not, return OXXX. In this case, the string return parameter may // hold a reason why. In all other cases, it'll be the empty string. // srcConstant indicates whether the value of type src is a constant. -func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) { +func Convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) { if src == dst { return ir.OCONVNOP, "" } @@ -478,7 +489,7 @@ func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) { } // 1. src can be assigned to dst. - op, why := assignop(src, dst) + op, why := Assignop(src, dst) if op != ir.OXXX { return op, why } @@ -563,7 +574,7 @@ func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) { return ir.OCONVNOP, "" } - // src is map and dst is a pointer to corresponding hmap. + // 10. src is map and dst is a pointer to corresponding hmap. // This rule is needed for the implementation detail that // go gc maps are implemented as a pointer to a hmap struct. if src.Kind() == types.TMAP && dst.IsPtr() && @@ -571,6 +582,16 @@ func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) { return ir.OCONVNOP, "" } + // 11. src is a slice and dst is a pointer-to-array. + // They must have same element type. + if src.IsSlice() && dst.IsPtr() && dst.Elem().IsArray() && + types.Identical(src.Elem(), dst.Elem().Elem()) { + if !types.AllowsGoVersion(curpkg(), 1, 17) { + return ir.OXXX, ":\n\tconversion of slices to array pointers only supported as of -lang=go1.17" + } + return ir.OSLICE2ARRPTR, "" + } + return ir.OXXX, "" } @@ -614,7 +635,7 @@ func expand0(t *types.Type) { } if u.IsInterface() { - for _, f := range u.Fields().Slice() { + for _, f := range u.AllMethods().Slice() { if f.Sym.Uniq() { continue } @@ -653,7 +674,13 @@ func expand1(t *types.Type, top bool) { } if u.IsStruct() || u.IsInterface() { - for _, f := range u.Fields().Slice() { + var fields *types.Fields + if u.IsStruct() { + fields = u.Fields() + } else { + fields = u.AllMethods() + } + for _, f := range fields.Slice() { if f.Embedded == 0 { continue } @@ -703,8 +730,8 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool if t.IsInterface() { i := 0 - tms := t.Fields().Slice() - for _, im := range iface.Fields().Slice() { + tms := t.AllMethods().Slice() + for _, im := range iface.AllMethods().Slice() { for i < len(tms) && tms[i].Sym != im.Sym { i++ } @@ -733,7 +760,7 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool tms = t.AllMethods().Slice() } i := 0 - for _, im := range iface.Fields().Slice() { + for _, im := range iface.AllMethods().Slice() { if im.Broke() { continue } @@ -801,7 +828,13 @@ func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) c := 0 if u.IsStruct() || u.IsInterface() { - for _, f := range u.Fields().Slice() { + var fields *types.Fields + if u.IsStruct() { + fields = u.Fields() + } else { + fields = u.AllMethods() + } + for _, f := range fields.Slice() { if f.Sym == s || (ignorecase && f.IsMethod() && strings.EqualFold(f.Sym.Name, s.Name)) { if save != nil { *save = f diff --git a/src/cmd/compile/internal/typecheck/syms.go b/src/cmd/compile/internal/typecheck/syms.go index 202a932e6c9..f29af82db2c 100644 --- a/src/cmd/compile/internal/typecheck/syms.go +++ b/src/cmd/compile/internal/typecheck/syms.go @@ -32,6 +32,7 @@ func SubstArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name { n := ir.NewNameAt(old.Pos(), old.Sym()) n.Class = old.Class n.SetType(types.SubstAny(old.Type(), &types_)) + n.Func = old.Func if len(types_) > 0 { base.Fatalf("SubstArgTypes: too many argument types") } diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index cb434578dd3..95f7b50259f 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -66,6 +66,8 @@ func FuncBody(n *ir.Func) { var importlist []*ir.Func +// AllImportedBodies reads in the bodies of all imported functions and typechecks +// them, if needed. func AllImportedBodies() { for _, n := range importlist { if n.Inl != nil { @@ -297,7 +299,7 @@ func typecheck(n ir.Node, top int) (res ir.Node) { // Skip typecheck if already done. // But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed. - if n.Typecheck() == 1 { + if n.Typecheck() == 1 || n.Typecheck() == 3 { switch n.Op() { case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.OPACK: break @@ -433,8 +435,8 @@ func typecheck(n ir.Node, top int) (res ir.Node) { case top&ctxType == 0 && n.Op() == ir.OTYPE && t != nil: if !n.Type().Broke() { base.Errorf("type %v is not an expression", n.Type()) + n.SetDiag(true) } - n.SetType(nil) case top&(ctxStmt|ctxExpr) == ctxStmt && !isStmt && t != nil: if !n.Diag() { @@ -446,7 +448,11 @@ func typecheck(n ir.Node, top int) (res ir.Node) { case top&(ctxType|ctxExpr) == ctxType && n.Op() != ir.OTYPE && n.Op() != ir.ONONAME && (t != nil || n.Op() == ir.ONAME): base.Errorf("%v is not a type", n) if t != nil { - n.SetType(nil) + if n.Op() == ir.ONAME { + t.SetBroke(true) + } else { + n.SetType(nil) + } } } @@ -482,7 +488,9 @@ func typecheck1(n ir.Node, top int) ir.Node { case ir.OLITERAL: if n.Sym() == nil && n.Type() == nil { - base.Fatalf("literal missing type: %v", n) + if !n.Diag() { + base.Fatalf("literal missing type: %v", n) + } } return n @@ -528,7 +536,7 @@ func typecheck1(n ir.Node, top int) ir.Node { case ir.OPACK: n := n.(*ir.PkgName) base.Errorf("use of package %v without selector", n.Sym()) - n.SetType(nil) + n.SetDiag(true) return n // types (ODEREF is with exprs) @@ -590,6 +598,10 @@ func typecheck1(n ir.Node, top int) ir.Node { case ir.OANDAND, ir.OOROR: n := n.(*ir.LogicalExpr) n.X, n.Y = Expr(n.X), Expr(n.Y) + if n.X.Type() == nil || n.Y.Type() == nil { + n.SetType(nil) + return n + } // For "x == x && len(s)", it's better to report that "len(s)" (type int) // can't be used with "&&" than to report that "x == x" (type untyped bool) // can't be converted to int (see issue #41500). @@ -765,6 +777,14 @@ func typecheck1(n ir.Node, top int) ir.Node { n := n.(*ir.CallExpr) return tcRecover(n) + case ir.OUNSAFEADD: + n := n.(*ir.BinaryExpr) + return tcUnsafeAdd(n) + + case ir.OUNSAFESLICE: + n := n.(*ir.BinaryExpr) + return tcUnsafeSlice(n) + case ir.OCLOSURE: n := n.(*ir.ClosureExpr) tcClosure(n, top) @@ -876,7 +896,7 @@ func typecheck1(n ir.Node, top int) ir.Node { case ir.OTYPESW: n := n.(*ir.TypeSwitchGuard) base.Errorf("use of .(type) outside type switch") - n.SetType(nil) + n.SetDiag(true) return n case ir.ODCLFUNC: @@ -1052,7 +1072,11 @@ func needTwoArgs(n *ir.CallExpr) (ir.Node, ir.Node, bool) { return n.Args[0], n.Args[1], true } -func lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field { +// Lookdot1 looks up the specified method s in the list fs of methods, returning +// the matching field or nil. If dostrcmp is 0, it matches the symbols. If +// dostrcmp is 1, it matches by name exactly. If dostrcmp is 2, it matches names +// with case folding. +func Lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field { var r *types.Field for _, f := range fs.Slice() { if dostrcmp != 0 && f.Sym.Name == s.Name { @@ -1093,7 +1117,7 @@ func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) { // Compute the method set for t. var ms *types.Fields if t.IsInterface() { - ms = t.Fields() + ms = t.AllMethods() } else { mt := types.ReceiverBaseType(t) if mt == nil { @@ -1117,9 +1141,9 @@ func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) { } s := n.Sel - m := lookdot1(n, s, t, ms, 0) + m := Lookdot1(n, s, t, ms, 0) if m == nil { - if lookdot1(n, s, t, ms, 1) != nil { + if Lookdot1(n, s, t, ms, 1) != nil { base.Errorf("%v undefined (cannot refer to unexported method %v)", n, s) } else if _, ambig := dotpath(s, t, nil, false); ambig { base.Errorf("%v undefined (ambiguous selector)", n) // method or field @@ -1149,20 +1173,28 @@ func derefall(t *types.Type) *types.Type { return t } -func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { +// Lookdot looks up field or method n.Sel in the type t and returns the matching +// field. It transforms the op of node n to ODOTINTER or ODOTMETH, if appropriate. +// It also may add a StarExpr node to n.X as needed for access to non-pointer +// methods. If dostrcmp is 0, it matches the field/method with the exact symbol +// as n.Sel (appropriate for exported fields). If dostrcmp is 1, it matches by name +// exactly. If dostrcmp is 2, it matches names with case folding. +func Lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { s := n.Sel types.CalcSize(t) var f1 *types.Field - if t.IsStruct() || t.IsInterface() { - f1 = lookdot1(n, s, t, t.Fields(), dostrcmp) + if t.IsStruct() { + f1 = Lookdot1(n, s, t, t.Fields(), dostrcmp) + } else if t.IsInterface() { + f1 = Lookdot1(n, s, t, t.AllMethods(), dostrcmp) } var f2 *types.Field if n.X.Type() == t || n.X.Type().Sym() == nil { mt := types.ReceiverBaseType(t) if mt != nil { - f2 = lookdot1(n, s, mt, mt.Methods(), dostrcmp) + f2 = Lookdot1(n, s, mt, mt.Methods(), dostrcmp) } } @@ -1175,7 +1207,7 @@ func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { base.Errorf("%v is both field and method", n.Sel) } if f1.Offset == types.BADWIDTH { - base.Fatalf("lookdot badwidth %v %p", f1, f1) + base.Fatalf("Lookdot badwidth t=%v, f1=%v@%p", t, f1, f1) } n.Selection = f1 n.SetType(f1.Type) @@ -1300,6 +1332,9 @@ func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl i n1 := tstruct.NumFields() n2 := len(nl) if !hasddd(tstruct) { + if isddd { + goto invalidddd + } if n2 > n1 { goto toomany } @@ -1365,6 +1400,8 @@ func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl i if i < len(nl) { goto toomany } + +invalidddd: if isddd { if call != nil { base.Errorf("invalid use of ... in call to %v", call) @@ -1612,6 +1649,10 @@ func checkassign(stmt ir.Node, n ir.Node) { return } + defer n.SetType(nil) + if n.Diag() { + return + } switch { case n.Op() == ir.ODOT && n.(*ir.SelectorExpr).X.Op() == ir.OINDEXMAP: base.Errorf("cannot assign to struct field %v in map", n) @@ -1622,13 +1663,6 @@ func checkassign(stmt ir.Node, n ir.Node) { default: base.Errorf("cannot assign to %v", n) } - n.SetType(nil) -} - -func checkassignlist(stmt ir.Node, l ir.Nodes) { - for _, n := range l { - checkassign(stmt, n) - } } func checkassignto(src *types.Type, dst ir.Node) { @@ -1637,7 +1671,7 @@ func checkassignto(src *types.Type, dst ir.Node) { return } - if op, why := assignop(src, dst.Type()); op == ir.OXXX { + if op, why := Assignop(src, dst.Type()); op == ir.OXXX { base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why) return } @@ -1915,6 +1949,35 @@ func checkmake(t *types.Type, arg string, np *ir.Node) bool { return true } +// checkunsafeslice is like checkmake but for unsafe.Slice. +func checkunsafeslice(np *ir.Node) bool { + n := *np + if !n.Type().IsInteger() && n.Type().Kind() != types.TIDEAL { + base.Errorf("non-integer len argument in unsafe.Slice - %v", n.Type()) + return false + } + + // Do range checks for constants before DefaultLit + // to avoid redundant "constant NNN overflows int" errors. + if n.Op() == ir.OLITERAL { + v := toint(n.Val()) + if constant.Sign(v) < 0 { + base.Errorf("negative len argument in unsafe.Slice") + return false + } + if ir.ConstOverflow(v, types.Types[types.TINT]) { + base.Errorf("len argument too large in unsafe.Slice") + return false + } + } + + // DefaultLit is necessary for non-constants too: n might be 1.1< 0 { o = Rnd(o, int64(f.Type.Align)) } - f.Offset = o - if f.Nname != nil { - // addrescapes has similar code to update these offsets. - // Usually addrescapes runs after calcStructOffset, - // in which case we could drop this, - // but function closure functions are the exception. - // NOTE(rsc): This comment may be stale. - // It's possible the ordering has changed and this is - // now the common case. I'm not sure. - f.Nname.(VarObject).RecordFrameOffset(o) + if isStruct { // For receiver/args/results, do not set, it depends on ABI + f.Offset = o } w := f.Type.Width @@ -624,9 +616,11 @@ func PtrDataSize(t *Type) int64 { case TSTRUCT: // Find the last field that has pointers. var lastPtrField *Field - for _, t1 := range t.Fields().Slice() { - if t1.Type.HasPointers() { - lastPtrField = t1 + fs := t.Fields().Slice() + for i := len(fs) - 1; i >= 0; i-- { + if fs[i].Type.HasPointers() { + lastPtrField = fs[i] + break } } return lastPtrField.Offset + PtrDataSize(lastPtrField.Type) diff --git a/src/cmd/compile/internal/types/sizeof_test.go b/src/cmd/compile/internal/types/sizeof_test.go index f80de937bef..70289387421 100644 --- a/src/cmd/compile/internal/types/sizeof_test.go +++ b/src/cmd/compile/internal/types/sizeof_test.go @@ -21,12 +21,12 @@ func TestSizeof(t *testing.T) { _64bit uintptr // size on 64bit platforms }{ {Sym{}, 44, 72}, - {Type{}, 68, 120}, + {Type{}, 60, 104}, {Map{}, 20, 40}, {Forward{}, 20, 32}, {Func{}, 28, 48}, {Struct{}, 16, 32}, - {Interface{}, 8, 16}, + {Interface{}, 4, 8}, {Chan{}, 8, 16}, {Array{}, 12, 16}, {FuncArgs{}, 4, 8}, diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go index 0e66ed348bf..534cf7e2376 100644 --- a/src/cmd/compile/internal/types/sym.go +++ b/src/cmd/compile/internal/types/sym.go @@ -32,8 +32,12 @@ type Sym struct { Pkg *Pkg Name string // object name - // saved and restored by dcopy - Def Object // definition: ONAME OTYPE OPACK or OLITERAL + // Def, Block, and Lastlineno are saved and restored by Pushdcl/Popdcl. + + // The unique ONAME, OTYPE, OPACK, or OLITERAL node that this symbol is + // bound to within the current scope. (Most parts of the compiler should + // prefer passing the Node directly, rather than relying on this field.) + Def Object Block int32 // blocknumber to catch redeclaration Lastlineno src.XPos // last declaration for diagnostic @@ -45,7 +49,7 @@ const ( symUniq symSiggen // type symbol has been generated symAsm // on asmlist, for writing to -asmhdr - symFunc // function symbol; uses internal ABI + symFunc // function symbol ) func (sym *Sym) OnExportList() bool { return sym.flags&symOnExportList != 0 } diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index b6374e49a51..1a9aa6916a2 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -11,9 +11,9 @@ import ( "sync" ) -// IRNode represents an ir.Node, but without needing to import cmd/compile/internal/ir, +// Object represents an ir.Node, but without needing to import cmd/compile/internal/ir, // which would cause an import cycle. The uses in other packages must type assert -// values of type IRNode to ir.Node or a more specific type. +// values of type Object to ir.Node or a more specific type. type Object interface { Pos() src.XPos Sym() *Sym @@ -157,11 +157,15 @@ type Type struct { // Width is the width of this Type in bytes. Width int64 // valid if Align > 0 - methods Fields + // list of base methods (excluding embedding) + methods Fields + // list of all methods (including embedding) allMethods Fields - nod Object // canonical OTYPE node - underlying *Type // original type (type literal or predefined type) + // canonical OTYPE node for a named type (should be an ir.Name node with same sym) + nod Object + // the underlying type (type literal or predeclared type) for a defined type + underlying *Type // Cache of composite types, with this type being the element type. cache struct { @@ -177,10 +181,13 @@ type Type struct { flags bitset8 - // Type params (in order) of this named type that need to be instantiated. - // TODO(danscales): for space reasons, should probably be a pointer to a - // slice, possibly change the name of this field. - RParams []*Type + // For defined (named) generic types, a pointer to the list of type params + // (in order) of this type that need to be instantiated. For + // fully-instantiated generic types, this is the targs used to instantiate + // them (which are used when generating the corresponding instantiated + // methods). rparams is only set for named types that are generic or are + // fully-instantiated from a generic type, and is otherwise set to nil. + rparams *[]*Type } func (*Type) CanBeAnSSAAux() {} @@ -236,6 +243,32 @@ func (t *Type) Pos() src.XPos { return src.NoXPos } +func (t *Type) RParams() []*Type { + if t.rparams == nil { + return nil + } + return *t.rparams +} + +func (t *Type) SetRParams(rparams []*Type) { + if len(rparams) == 0 { + base.Fatalf("Setting nil or zero-length rparams") + } + t.rparams = &rparams + if t.HasTParam() { + return + } + // HasTParam should be set if any rparam is or has a type param. This is + // to handle the case of a generic type which doesn't reference any of its + // type params (e.g. most commonly, an empty struct). + for _, rparam := range rparams { + if rparam.HasTParam() { + t.SetHasTParam(true) + break + } + } +} + // NoPkg is a nil *Pkg value for clarity. // It's intended for use when constructing types that aren't exported // and thus don't need to be associated with any package. @@ -341,8 +374,7 @@ func (t *Type) StructType() *Struct { // Interface contains Type fields specific to interface types. type Interface struct { - Fields Fields - pkg *Pkg + pkg *Pkg } // Ptr contains Type fields specific to pointer types. @@ -394,8 +426,11 @@ type Slice struct { Elem *Type // element type } -// A Field represents a field in a struct or a method in an interface or -// associated with a named type. +// A Field is a (Sym, Type) pairing along with some other information, and, +// depending on the context, is used to represent: +// - a field in a struct +// - a method in an interface or associated with a named type +// - a function parameter type Field struct { flags bitset8 @@ -411,7 +446,8 @@ type Field struct { Nname Object // Offset in bytes of this field or method within its enclosing struct - // or interface Type. + // or interface Type. Exception: if field is function receiver, arg or + // result, then this is BOGUS_FUNARG_OFFSET; types does not know the Abi. Offset int64 } @@ -581,12 +617,19 @@ func NewTuple(t1, t2 *Type) *Type { return t } -func NewResults(types []*Type) *Type { +func newResults(types []*Type) *Type { t := New(TRESULTS) t.Extra.(*Results).Types = types return t } +func NewResults(types []*Type) *Type { + if len(types) == 1 && types[0] == TypeMem { + return TypeResultMem + } + return newResults(types) +} + func newSSA(name string) *Type { t := New(TSSA) t.Extra = name @@ -887,40 +930,49 @@ func (t *Type) IsFuncArgStruct() bool { return t.kind == TSTRUCT && t.Extra.(*Struct).Funarg != FunargNone } +// Methods returns a pointer to the base methods (excluding embedding) for type t. +// These can either be concrete methods (for non-interface types) or interface +// methods (for interface types). func (t *Type) Methods() *Fields { - // TODO(mdempsky): Validate t? return &t.methods } +// AllMethods returns a pointer to all the methods (including embedding) for type t. +// For an interface type, this is the set of methods that are typically iterated over. func (t *Type) AllMethods() *Fields { - // TODO(mdempsky): Validate t? + if t.kind == TINTER { + // Calculate the full method set of an interface type on the fly + // now, if not done yet. + CalcSize(t) + } return &t.allMethods } -func (t *Type) Fields() *Fields { - switch t.kind { - case TSTRUCT: - return &t.Extra.(*Struct).fields - case TINTER: - CalcSize(t) - return &t.Extra.(*Interface).Fields - } - base.Fatalf("Fields: type %v does not have fields", t) - return nil +// SetAllMethods sets the set of all methods (including embedding) for type t. +// Use this method instead of t.AllMethods().Set(), which might call CalcSize() on +// an uninitialized interface type. +func (t *Type) SetAllMethods(fs []*Field) { + t.allMethods.Set(fs) } -// Field returns the i'th field/method of struct/interface type t. +// Fields returns the fields of struct type t. +func (t *Type) Fields() *Fields { + t.wantEtype(TSTRUCT) + return &t.Extra.(*Struct).fields +} + +// Field returns the i'th field of struct type t. func (t *Type) Field(i int) *Field { return t.Fields().Slice()[i] } -// FieldSlice returns a slice of containing all fields/methods of -// struct/interface type t. +// FieldSlice returns a slice of containing all fields of +// a struct type t. func (t *Type) FieldSlice() []*Field { return t.Fields().Slice() } -// SetFields sets struct/interface type t's fields/methods to fields. +// SetFields sets struct type t's fields to fields. func (t *Type) SetFields(fields []*Field) { // If we've calculated the width of t before, // then some other type such as a function signature @@ -946,6 +998,7 @@ func (t *Type) SetFields(fields []*Field) { t.Fields().Set(fields) } +// SetInterface sets the base methods of an interface type t. func (t *Type) SetInterface(methods []*Field) { t.wantEtype(TINTER) t.Methods().Set(methods) @@ -1196,8 +1249,8 @@ func (t *Type) cmp(x *Type) Cmp { return CMPeq case TINTER: - tfs := t.FieldSlice() - xfs := x.FieldSlice() + tfs := t.AllMethods().Slice() + xfs := x.AllMethods().Slice() for i := 0; i < len(tfs) && i < len(xfs); i++ { t1, x1 := tfs[i], xfs[i] if c := t1.Sym.cmpsym(x1.Sym); c != CMPeq { @@ -1385,7 +1438,7 @@ func (t *Type) IsInterface() bool { // IsEmptyInterface reports whether t is an empty interface type. func (t *Type) IsEmptyInterface() bool { - return t.IsInterface() && t.NumFields() == 0 + return t.IsInterface() && t.AllMethods().Len() == 0 } // IsScalar reports whether 't' is a scalar Go type, e.g. @@ -1407,6 +1460,9 @@ func (t *Type) PtrTo() *Type { } func (t *Type) NumFields() int { + if t.kind == TRESULTS { + return len(t.Extra.(*Results).Types) + } return t.Fields().Len() } func (t *Type) FieldType(i int) *Type { @@ -1597,14 +1653,19 @@ func FakeRecvType() *Type { var ( // TSSA types. HasPointers assumes these are pointer-free. - TypeInvalid = newSSA("invalid") - TypeMem = newSSA("mem") - TypeFlags = newSSA("flags") - TypeVoid = newSSA("void") - TypeInt128 = newSSA("int128") + TypeInvalid = newSSA("invalid") + TypeMem = newSSA("mem") + TypeFlags = newSSA("flags") + TypeVoid = newSSA("void") + TypeInt128 = newSSA("int128") + TypeResultMem = newResults([]*Type{TypeMem}) ) -// NewNamed returns a new named type for the given type name. +// NewNamed returns a new named type for the given type name. obj should be an +// ir.Name. The new type is incomplete (marked as TFORW kind), and the underlying +// type should be set later via SetUnderlying(). References to the type are +// maintained until the type is filled in, so those references can be updated when +// the type is complete. func NewNamed(obj Object) *Type { t := New(TFORW) t.sym = obj.Sym() @@ -1612,7 +1673,7 @@ func NewNamed(obj Object) *Type { return t } -// Obj returns the type name for the named type t. +// Obj returns the canonical type name node for a named type t, nil for an unnamed type. func (t *Type) Obj() Object { if t.sym != nil { return t.nod @@ -1620,7 +1681,8 @@ func (t *Type) Obj() Object { return nil } -// SetUnderlying sets the underlying type. +// SetUnderlying sets the underlying type. SetUnderlying automatically updates any +// types that were waiting for this type to be completed. func (t *Type) SetUnderlying(underlying *Type) { if underlying.kind == TFORW { // This type isn't computed yet; when it is, update n. @@ -1690,6 +1752,13 @@ func NewBasic(kind Kind, obj Object) *Type { func NewInterface(pkg *Pkg, methods []*Field) *Type { t := New(TINTER) t.SetInterface(methods) + for _, f := range methods { + // f.Type could be nil for a broken interface declaration + if f.Type != nil && f.Type.HasTParam() { + t.SetHasTParam(true) + break + } + } if anyBroke(methods) { t.SetBroke(true) } @@ -1697,19 +1766,24 @@ func NewInterface(pkg *Pkg, methods []*Field) *Type { return t } -// NewTypeParam returns a new type param with the given constraint (which may -// not really be needed except for the type checker). -func NewTypeParam(pkg *Pkg, constraint *Type) *Type { +// NewTypeParam returns a new type param. +func NewTypeParam(pkg *Pkg) *Type { t := New(TTYPEPARAM) - constraint.wantEtype(TINTER) - t.methods = constraint.methods t.Extra.(*Interface).pkg = pkg t.SetHasTParam(true) return t } +const BOGUS_FUNARG_OFFSET = -1000000000 + +func unzeroFieldOffsets(f []*Field) { + for i := range f { + f[i].Offset = BOGUS_FUNARG_OFFSET // This will cause an explosion if it is not corrected + } +} + // NewSignature returns a new function type for the given receiver, -// parametes, results, and type parameters, any of which may be nil. +// parameters, results, and type parameters, any of which may be nil. func NewSignature(pkg *Pkg, recv *Field, tparams, params, results []*Field) *Type { var recvs []*Field if recv != nil { @@ -1728,7 +1802,13 @@ func NewSignature(pkg *Pkg, recv *Field, tparams, params, results []*Field) *Typ return s } + if recv != nil { + recv.Offset = BOGUS_FUNARG_OFFSET + } + unzeroFieldOffsets(params) + unzeroFieldOffsets(results) ft.Receiver = funargs(recvs, FunargRcvr) + // TODO(danscales): just use nil here (save memory) if no tparams ft.TParams = funargs(tparams, FunargTparams) ft.Params = funargs(params, FunargParams) ft.Results = funargs(results, FunargResults) diff --git a/src/cmd/compile/internal/types2/api.go b/src/cmd/compile/internal/types2/api.go index 30f0430ff18..2939dcc0bdf 100644 --- a/src/cmd/compile/internal/types2/api.go +++ b/src/cmd/compile/internal/types2/api.go @@ -1,4 +1,3 @@ -// UNREVIEWED // Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -23,8 +22,6 @@ // and checks for compliance with the language specification. // Use Info.Types[expr].Type for the results of type inference. // -// For a tutorial, see https://golang.org/s/types-tutorial. -// package types2 import ( @@ -110,13 +107,6 @@ type Config struct { // type-checked. IgnoreFuncBodies bool - // If AcceptMethodTypeParams is set, methods may have type parameters. - AcceptMethodTypeParams bool - - // If InferFromConstraints is set, constraint type inference is used - // if some function type arguments are missing. - InferFromConstraints bool - // If FakeImportC is set, `import "C"` (for packages requiring Cgo) // declares an empty "C" package and errors are omitted for qualified // identifiers referring to package C (which won't find an object). @@ -408,14 +398,15 @@ func (conf *Config) Check(path string, files []*syntax.File, info *Info) (*Packa // AssertableTo reports whether a value of type V can be asserted to have type T. func AssertableTo(V *Interface, T Type) bool { - m, _ := (*Checker)(nil).assertableTo(V, T, false) + m, _ := (*Checker)(nil).assertableTo(V, T) return m == nil } // AssignableTo reports whether a value of type V is assignable to a variable of type T. func AssignableTo(V, T Type) bool { x := operand{mode: value, typ: V} - return x.assignableTo(nil, T, nil) // check not needed for non-constant x + ok, _ := x.assignableTo(nil, T, nil) // check not needed for non-constant x + return ok } // ConvertibleTo reports whether a value of type V is convertible to a value of type T. diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go index 9d23b5b2a6c..873390c1e92 100644 --- a/src/cmd/compile/internal/types2/api_test.go +++ b/src/cmd/compile/internal/types2/api_test.go @@ -1,4 +1,3 @@ -// UNREVIEWED // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -22,9 +21,21 @@ func unimplemented() { panic("unimplemented") } +// genericPkg is a source prefix for packages that contain generic code. +const genericPkg = "package generic_" + +// brokenPkg is a source prefix for packages that are not expected to parse +// or type-check cleanly. They are always parsed assuming that they contain +// generic code. +const brokenPkg = "package broken_" + func parseSrc(path, src string) (*syntax.File, error) { + var mode syntax.Mode + if strings.HasPrefix(src, genericPkg) || strings.HasPrefix(src, brokenPkg) { + mode = syntax.AllowGenerics + } errh := func(error) {} // dummy error handler so that parsing continues in presence of errors - return syntax.Parse(syntax.NewFileBase(path), strings.NewReader(src), errh, nil, syntax.AllowGenerics) + return syntax.Parse(syntax.NewFileBase(path), strings.NewReader(src), errh, nil, mode) } func pkgFor(path, source string, info *Info) (*Package, error) { @@ -54,10 +65,8 @@ func mayTypecheck(t *testing.T, path, source string, info *Info) (string, error) t.Fatalf("%s: unable to parse: %s", path, err) } conf := Config{ - AcceptMethodTypeParams: true, - InferFromConstraints: true, - Error: func(err error) {}, - Importer: defaultImporter(), + Error: func(err error) {}, + Importer: defaultImporter(), } pkg, err := conf.Check(f.PkgName.Value, []*syntax.File{f}, info) return pkg.Name(), err @@ -182,9 +191,6 @@ func TestValuesInfo(t *testing.T) { } func TestTypesInfo(t *testing.T) { - // Test sources that are not expected to typecheck must start with the broken prefix. - const broken = "package broken_" - var tests = []struct { src string expr string // expression @@ -315,34 +321,38 @@ func TestTypesInfo(t *testing.T) { }, // tests for broken code that doesn't parse or type-check - {broken + `x0; func _() { var x struct {f string}; x.f := 0 }`, `x.f`, `string`}, - {broken + `x1; func _() { var z string; type x struct {f string}; y := &x{q: z}}`, `z`, `string`}, - {broken + `x2; func _() { var a, b string; type x struct {f string}; z := &x{f: a, f: b,}}`, `b`, `string`}, - {broken + `x3; var x = panic("");`, `panic`, `func(interface{})`}, + {brokenPkg + `x0; func _() { var x struct {f string}; x.f := 0 }`, `x.f`, `string`}, + {brokenPkg + `x1; func _() { var z string; type x struct {f string}; y := &x{q: z}}`, `z`, `string`}, + {brokenPkg + `x2; func _() { var a, b string; type x struct {f string}; z := &x{f: a, f: b,}}`, `b`, `string`}, + {brokenPkg + `x3; var x = panic("");`, `panic`, `func(interface{})`}, {`package x4; func _() { panic("") }`, `panic`, `func(interface{})`}, - {broken + `x5; func _() { var x map[string][...]int; x = map[string][...]int{"": {1,2,3}} }`, `x`, `map[string][-1]int`}, + {brokenPkg + `x5; func _() { var x map[string][...]int; x = map[string][...]int{"": {1,2,3}} }`, `x`, `map[string]invalid type`}, // parameterized functions - {`package p0; func f[T any](T); var _ = f[int]`, `f`, `func[T₁ interface{}](T₁)`}, - {`package p1; func f[T any](T); var _ = f[int]`, `f[int]`, `func(int)`}, - {`package p2; func f[T any](T); func _() { f(42) }`, `f`, `func[T₁ interface{}](T₁)`}, - {`package p3; func f[T any](T); func _() { f(42) }`, `f(42)`, `()`}, + {genericPkg + `p0; func f[T any](T); var _ = f[int]`, `f`, `func[T₁ interface{}](T₁)`}, + {genericPkg + `p1; func f[T any](T); var _ = f[int]`, `f[int]`, `func(int)`}, + {genericPkg + `p2; func f[T any](T); func _() { f(42) }`, `f`, `func[T₁ interface{}](T₁)`}, + {genericPkg + `p3; func f[T any](T); func _() { f(42) }`, `f(42)`, `()`}, // type parameters - {`package t0; type t[] int; var _ t`, `t`, `t0.t`}, // t[] is a syntax error that is ignored in this test in favor of t - {`package t1; type t[P any] int; var _ t[int]`, `t`, `t1.t[P₁ interface{}]`}, - {`package t2; type t[P interface{}] int; var _ t[int]`, `t`, `t2.t[P₁ interface{}]`}, - {`package t3; type t[P, Q interface{}] int; var _ t[int, int]`, `t`, `t3.t[P₁, Q₂ interface{}]`}, - {broken + `t4; type t[P, Q interface{ m() }] int; var _ t[int, int]`, `t`, `broken_t4.t[P₁, Q₂ interface{m()}]`}, + {genericPkg + `t0; type t[] int; var _ t`, `t`, `generic_t0.t`}, // t[] is a syntax error that is ignored in this test in favor of t + {genericPkg + `t1; type t[P any] int; var _ t[int]`, `t`, `generic_t1.t[P₁ interface{}]`}, + {genericPkg + `t2; type t[P interface{}] int; var _ t[int]`, `t`, `generic_t2.t[P₁ interface{}]`}, + {genericPkg + `t3; type t[P, Q interface{}] int; var _ t[int, int]`, `t`, `generic_t3.t[P₁, Q₂ interface{}]`}, + {brokenPkg + `t4; type t[P, Q interface{ m() }] int; var _ t[int, int]`, `t`, `broken_t4.t[P₁, Q₂ interface{m()}]`}, // instantiated types must be sanitized - {`package g0; type t[P any] int; var x struct{ f t[int] }; var _ = x.f`, `x.f`, `g0.t[int]`}, + {genericPkg + `g0; type t[P any] int; var x struct{ f t[int] }; var _ = x.f`, `x.f`, `generic_g0.t[int]`}, + + // issue 45096 + {genericPkg + `issue45096; func _[T interface{ type int8, int16, int32 }](x T) { _ = x < 0 }`, `0`, `T₁`}, } for _, test := range tests { + ResetId() // avoid renumbering of type parameter ids when adding tests info := Info{Types: make(map[syntax.Expr]TypeAndValue)} var name string - if strings.HasPrefix(test.src, broken) { + if strings.HasPrefix(test.src, brokenPkg) { var err error name, err = mayTypecheck(t, "TypesInfo", test.src, &info) if err == nil { @@ -380,103 +390,103 @@ func TestInferredInfo(t *testing.T) { targs []string sig string }{ - {`package p0; func f[T any](T); func _() { f(42) }`, + {genericPkg + `p0; func f[T any](T); func _() { f(42) }`, `f`, []string{`int`}, `func(int)`, }, - {`package p1; func f[T any](T) T; func _() { f('@') }`, + {genericPkg + `p1; func f[T any](T) T; func _() { f('@') }`, `f`, []string{`rune`}, `func(rune) rune`, }, - {`package p2; func f[T any](...T) T; func _() { f(0i) }`, + {genericPkg + `p2; func f[T any](...T) T; func _() { f(0i) }`, `f`, []string{`complex128`}, `func(...complex128) complex128`, }, - {`package p3; func f[A, B, C any](A, *B, []C); func _() { f(1.2, new(string), []byte{}) }`, + {genericPkg + `p3; func f[A, B, C any](A, *B, []C); func _() { f(1.2, new(string), []byte{}) }`, `f`, []string{`float64`, `string`, `byte`}, `func(float64, *string, []byte)`, }, - {`package p4; func f[A, B any](A, *B, ...[]B); func _() { f(1.2, new(byte)) }`, + {genericPkg + `p4; func f[A, B any](A, *B, ...[]B); func _() { f(1.2, new(byte)) }`, `f`, []string{`float64`, `byte`}, `func(float64, *byte, ...[]byte)`, }, // we don't know how to translate these but we can type-check them - {`package q0; type T struct{}; func (T) m[P any](P); func _(x T) { x.m(42) }`, + {genericPkg + `q0; type T struct{}; func (T) m[P any](P); func _(x T) { x.m(42) }`, `x.m`, []string{`int`}, `func(int)`, }, - {`package q1; type T struct{}; func (T) m[P any](P) P; func _(x T) { x.m(42) }`, + {genericPkg + `q1; type T struct{}; func (T) m[P any](P) P; func _(x T) { x.m(42) }`, `x.m`, []string{`int`}, `func(int) int`, }, - {`package q2; type T struct{}; func (T) m[P any](...P) P; func _(x T) { x.m(42) }`, + {genericPkg + `q2; type T struct{}; func (T) m[P any](...P) P; func _(x T) { x.m(42) }`, `x.m`, []string{`int`}, `func(...int) int`, }, - {`package q3; type T struct{}; func (T) m[A, B, C any](A, *B, []C); func _(x T) { x.m(1.2, new(string), []byte{}) }`, + {genericPkg + `q3; type T struct{}; func (T) m[A, B, C any](A, *B, []C); func _(x T) { x.m(1.2, new(string), []byte{}) }`, `x.m`, []string{`float64`, `string`, `byte`}, `func(float64, *string, []byte)`, }, - {`package q4; type T struct{}; func (T) m[A, B any](A, *B, ...[]B); func _(x T) { x.m(1.2, new(byte)) }`, + {genericPkg + `q4; type T struct{}; func (T) m[A, B any](A, *B, ...[]B); func _(x T) { x.m(1.2, new(byte)) }`, `x.m`, []string{`float64`, `byte`}, `func(float64, *byte, ...[]byte)`, }, - {`package r0; type T[P any] struct{}; func (_ T[P]) m[Q any](Q); func _[P any](x T[P]) { x.m(42) }`, + {genericPkg + `r0; type T[P any] struct{}; func (_ T[P]) m[Q any](Q); func _[P any](x T[P]) { x.m(42) }`, `x.m`, []string{`int`}, `func(int)`, }, // TODO(gri) record method type parameters in syntax.FuncType so we can check this - // {`package r1; type T interface{ m[P any](P) }; func _(x T) { x.m(4.2) }`, + // {genericPkg + `r1; type T interface{ m[P any](P) }; func _(x T) { x.m(4.2) }`, // `x.m`, // []string{`float64`}, // `func(float64)`, // }, - {`package s1; func f[T any, P interface{type *T}](x T); func _(x string) { f(x) }`, + {genericPkg + `s1; func f[T any, P interface{type *T}](x T); func _(x string) { f(x) }`, `f`, []string{`string`, `*string`}, `func(x string)`, }, - {`package s2; func f[T any, P interface{type *T}](x []T); func _(x []int) { f(x) }`, + {genericPkg + `s2; func f[T any, P interface{type *T}](x []T); func _(x []int) { f(x) }`, `f`, []string{`int`, `*int`}, `func(x []int)`, }, - {`package s3; type C[T any] interface{type chan<- T}; func f[T any, P C[T]](x []T); func _(x []int) { f(x) }`, + {genericPkg + `s3; type C[T any] interface{type chan<- T}; func f[T any, P C[T]](x []T); func _(x []int) { f(x) }`, `f`, []string{`int`, `chan<- int`}, `func(x []int)`, }, - {`package s4; type C[T any] interface{type chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T); func _(x []int) { f(x) }`, + {genericPkg + `s4; type C[T any] interface{type chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T); func _(x []int) { f(x) }`, `f`, []string{`int`, `chan<- int`, `chan<- []*chan<- int`}, `func(x []int)`, }, - {`package t1; func f[T any, P interface{type *T}]() T; func _() { _ = f[string] }`, + {genericPkg + `t1; func f[T any, P interface{type *T}]() T; func _() { _ = f[string] }`, `f`, []string{`string`, `*string`}, `func() string`, }, - {`package t2; type C[T any] interface{type chan<- T}; func f[T any, P C[T]]() []T; func _() { _ = f[int] }`, + {genericPkg + `t2; type C[T any] interface{type chan<- T}; func f[T any, P C[T]]() []T; func _() { _ = f[int] }`, `f`, []string{`int`, `chan<- int`}, `func() []int`, }, - {`package t3; type C[T any] interface{type chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T; func _() { _ = f[int] }`, + {genericPkg + `t3; type C[T any] interface{type chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T; func _() { _ = f[int] }`, `f`, []string{`int`, `chan<- int`, `chan<- []*chan<- int`}, `func() []int`, @@ -545,13 +555,14 @@ func TestDefsInfo(t *testing.T) { {`package p2; var x int`, `x`, `var p2.x int`}, {`package p3; type x int`, `x`, `type p3.x int`}, {`package p4; func f()`, `f`, `func p4.f()`}, + {`package p5; func f() int { x, _ := 1, 2; return x }`, `_`, `var _ int`}, // generic types must be sanitized // (need to use sufficiently nested types to provoke unexpanded types) - {`package g0; type t[P any] P; const x = t[int](42)`, `x`, `const g0.x g0.t[int]`}, - {`package g1; type t[P any] P; var x = t[int](42)`, `x`, `var g1.x g1.t[int]`}, - {`package g2; type t[P any] P; type x struct{ f t[int] }`, `x`, `type g2.x struct{f g2.t[int]}`}, - {`package g3; type t[P any] P; func f(x struct{ f t[string] }); var g = f`, `g`, `var g3.g func(x struct{f g3.t[string]})`}, + {genericPkg + `g0; type t[P any] P; const x = t[int](42)`, `x`, `const generic_g0.x generic_g0.t[int]`}, + {genericPkg + `g1; type t[P any] P; var x = t[int](42)`, `x`, `var generic_g1.x generic_g1.t[int]`}, + {genericPkg + `g2; type t[P any] P; type x struct{ f t[int] }`, `x`, `type generic_g2.x struct{f generic_g2.t[int]}`}, + {genericPkg + `g3; type t[P any] P; func f(x struct{ f t[string] }); var g = f`, `g`, `var generic_g3.g func(x struct{f generic_g3.t[string]})`}, } for _, test := range tests { @@ -593,10 +604,10 @@ func TestUsesInfo(t *testing.T) { // generic types must be sanitized // (need to use sufficiently nested types to provoke unexpanded types) - {`package g0; func _() { _ = x }; type t[P any] P; const x = t[int](42)`, `x`, `const g0.x g0.t[int]`}, - {`package g1; func _() { _ = x }; type t[P any] P; var x = t[int](42)`, `x`, `var g1.x g1.t[int]`}, - {`package g2; func _() { type _ x }; type t[P any] P; type x struct{ f t[int] }`, `x`, `type g2.x struct{f g2.t[int]}`}, - {`package g3; func _() { _ = f }; type t[P any] P; func f(x struct{ f t[string] })`, `f`, `func g3.f(x struct{f g3.t[string]})`}, + {genericPkg + `g0; func _() { _ = x }; type t[P any] P; const x = t[int](42)`, `x`, `const generic_g0.x generic_g0.t[int]`}, + {genericPkg + `g1; func _() { _ = x }; type t[P any] P; var x = t[int](42)`, `x`, `var generic_g1.x generic_g1.t[int]`}, + {genericPkg + `g2; func _() { type _ x }; type t[P any] P; type x struct{ f t[int] }`, `x`, `type generic_g2.x struct{f generic_g2.t[int]}`}, + {genericPkg + `g3; func _() { _ = f }; type t[P any] P; func f(x struct{ f t[string] })`, `f`, `func generic_g3.f(x struct{f generic_g3.t[string]})`}, } for _, test := range tests { @@ -1557,6 +1568,53 @@ func F(){ } } +func TestConvertibleTo(t *testing.T) { + for _, test := range []struct { + v, t Type + want bool + }{ + {Typ[Int], Typ[Int], true}, + {Typ[Int], Typ[Float32], true}, + {newDefined(Typ[Int]), Typ[Int], true}, + {newDefined(new(Struct)), new(Struct), true}, + {newDefined(Typ[Int]), new(Struct), false}, + {Typ[UntypedInt], Typ[Int], true}, + {NewSlice(Typ[Int]), NewPointer(NewArray(Typ[Int], 10)), true}, + {NewSlice(Typ[Int]), NewArray(Typ[Int], 10), false}, + {NewSlice(Typ[Int]), NewPointer(NewArray(Typ[Uint], 10)), false}, + // Untyped string values are not permitted by the spec, so the below + // behavior is undefined. + {Typ[UntypedString], Typ[String], true}, + } { + if got := ConvertibleTo(test.v, test.t); got != test.want { + t.Errorf("ConvertibleTo(%v, %v) = %t, want %t", test.v, test.t, got, test.want) + } + } +} + +func TestAssignableTo(t *testing.T) { + for _, test := range []struct { + v, t Type + want bool + }{ + {Typ[Int], Typ[Int], true}, + {Typ[Int], Typ[Float32], false}, + {newDefined(Typ[Int]), Typ[Int], false}, + {newDefined(new(Struct)), new(Struct), true}, + {Typ[UntypedBool], Typ[Bool], true}, + {Typ[UntypedString], Typ[Bool], false}, + // Neither untyped string nor untyped numeric assignments arise during + // normal type checking, so the below behavior is technically undefined by + // the spec. + {Typ[UntypedString], Typ[String], true}, + {Typ[UntypedInt], Typ[Int], true}, + } { + if got := AssignableTo(test.v, test.t); got != test.want { + t.Errorf("AssignableTo(%v, %v) = %t, want %t", test.v, test.t, got, test.want) + } + } +} + func TestIdentical_issue15173(t *testing.T) { // Identical should allow nil arguments and be symmetric. for _, test := range []struct { diff --git a/src/cmd/compile/internal/types2/assignments.go b/src/cmd/compile/internal/types2/assignments.go index 00495f39761..583118c8b23 100644 --- a/src/cmd/compile/internal/types2/assignments.go +++ b/src/cmd/compile/internal/types2/assignments.go @@ -1,4 +1,3 @@ -// UNREVIEWED // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -33,8 +32,8 @@ func (check *Checker) assignment(x *operand, T Type, context string) { // spec: "If an untyped constant is assigned to a variable of interface // type or the blank identifier, the constant is first converted to type // bool, rune, int, float64, complex128 or string respectively, depending - // on whether the value is a boolean, rune, integer, floating-point, complex, - // or string constant." + // on whether the value is a boolean, rune, integer, floating-point, + // complex, or string constant." if x.isNil() { if T == nil { check.errorf(x, "use of untyped nil in %s", context) @@ -44,10 +43,27 @@ func (check *Checker) assignment(x *operand, T Type, context string) { } else if T == nil || IsInterface(T) { target = Default(x.typ) } - check.convertUntyped(x, target) - if x.mode == invalid { + newType, val, code := check.implicitTypeAndValue(x, target) + if code != 0 { + msg := check.sprintf("cannot use %s as %s value in %s", x, target, context) + switch code { + case _TruncatedFloat: + msg += " (truncated)" + case _NumericOverflow: + msg += " (overflows)" + } + check.error(x, msg) + x.mode = invalid return } + if val != nil { + x.val = val + check.updateExprVal(x.expr, val) + } + if newType != x.typ { + x.typ = newType + check.updateExprType(x.expr, newType, false) + } } // x.typ is typed @@ -63,7 +79,8 @@ func (check *Checker) assignment(x *operand, T Type, context string) { return } - if reason := ""; !x.assignableTo(check, T, &reason) { + reason := "" + if ok, _ := x.assignableTo(check, T, &reason); !ok { if check.conf.CompilerErrorMessages { check.errorf(x, "incompatible type: cannot use %s as %s value", x, T) } else { @@ -113,6 +130,8 @@ func (check *Checker) initVar(lhs *Var, x *operand, context string) Type { if lhs.typ == nil { lhs.typ = Typ[Invalid] } + // Note: This was reverted in go/types (https://golang.org/cl/292751). + // TODO(gri): decide what to do (also affects test/run.go exclusion list) lhs.used = true // avoid follow-on "declared but not used" errors return nil } @@ -194,7 +213,7 @@ func (check *Checker) assignVar(lhs syntax.Expr, x *operand) Type { case variable, mapindex: // ok case nilvalue: - check.errorf(&z, "cannot assign to nil") // default would print "untyped nil" + check.error(&z, "cannot assign to nil") // default would print "untyped nil" return nil default: if sel, ok := z.expr.(*syntax.SelectorExpr); ok { @@ -311,40 +330,59 @@ func (check *Checker) shortVarDecl(pos syntax.Pos, lhs, rhs []syntax.Expr) { scope := check.scope // collect lhs variables - var newVars []*Var - var lhsVars = make([]*Var, len(lhs)) + seen := make(map[string]bool, len(lhs)) + lhsVars := make([]*Var, len(lhs)) + newVars := make([]*Var, 0, len(lhs)) + hasErr := false for i, lhs := range lhs { - var obj *Var - if ident, _ := lhs.(*syntax.Name); ident != nil { - // Use the correct obj if the ident is redeclared. The - // variable's scope starts after the declaration; so we - // must use Scope.Lookup here and call Scope.Insert - // (via check.declare) later. - name := ident.Value - if alt := scope.Lookup(name); alt != nil { - // redeclared object must be a variable - if alt, _ := alt.(*Var); alt != nil { - obj = alt - } else { - check.errorf(lhs, "cannot assign to %s", lhs) - } - check.recordUse(ident, alt) - } else { - // declare new variable, possibly a blank (_) variable - obj = NewVar(ident.Pos(), check.pkg, name, nil) - if name != "_" { - newVars = append(newVars, obj) - } - check.recordDef(ident, obj) - } - } else { + ident, _ := lhs.(*syntax.Name) + if ident == nil { check.useLHS(lhs) - check.errorf(lhs, "cannot declare %s", lhs) + check.errorf(lhs, "non-name %s on left side of :=", lhs) + hasErr = true + continue } - if obj == nil { - obj = NewVar(lhs.Pos(), check.pkg, "_", nil) // dummy variable + + name := ident.Value + if name != "_" { + if seen[name] { + check.errorf(lhs, "%s repeated on left side of :=", lhs) + hasErr = true + continue + } + seen[name] = true } + + // Use the correct obj if the ident is redeclared. The + // variable's scope starts after the declaration; so we + // must use Scope.Lookup here and call Scope.Insert + // (via check.declare) later. + if alt := scope.Lookup(name); alt != nil { + check.recordUse(ident, alt) + // redeclared object must be a variable + if obj, _ := alt.(*Var); obj != nil { + lhsVars[i] = obj + } else { + check.errorf(lhs, "cannot assign to %s", lhs) + hasErr = true + } + continue + } + + // declare new variable + obj := NewVar(ident.Pos(), check.pkg, name, nil) lhsVars[i] = obj + if name != "_" { + newVars = append(newVars, obj) + } + check.recordDef(ident, obj) + } + + // create dummy variables where the lhs is invalid + for i, obj := range lhsVars { + if obj == nil { + lhsVars[i] = NewVar(lhs[i].Pos(), check.pkg, "_", nil) + } } check.initVars(lhsVars, rhs, nopos) @@ -352,17 +390,18 @@ func (check *Checker) shortVarDecl(pos syntax.Pos, lhs, rhs []syntax.Expr) { // process function literals in rhs expressions before scope changes check.processDelayed(top) - // declare new variables - if len(newVars) > 0 { - // spec: "The scope of a constant or variable identifier declared inside - // a function begins at the end of the ConstSpec or VarSpec (ShortVarDecl - // for short variable declarations) and ends at the end of the innermost - // containing block." - scopePos := endPos(rhs[len(rhs)-1]) - for _, obj := range newVars { - check.declare(scope, nil, obj, scopePos) // recordObject already called - } - } else { + if len(newVars) == 0 && !hasErr { check.softErrorf(pos, "no new variables on left side of :=") + return + } + + // declare new variables + // spec: "The scope of a constant or variable identifier declared inside + // a function begins at the end of the ConstSpec or VarSpec (ShortVarDecl + // for short variable declarations) and ends at the end of the innermost + // containing block." + scopePos := syntax.EndPos(rhs[len(rhs)-1]) + for _, obj := range newVars { + check.declare(scope, nil, obj, scopePos) // id = nil: recordDef already called } } diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go index a6a9b51dd11..b9e178dd576 100644 --- a/src/cmd/compile/internal/types2/builtins.go +++ b/src/cmd/compile/internal/types2/builtins.go @@ -21,8 +21,8 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // append is the only built-in that permits the use of ... for the last argument bin := predeclaredFuncs[id] if call.HasDots && id != _Append { - //check.invalidOpf(call.Ellipsis, "invalid use of ... with built-in %s", bin.name) - check.invalidOpf(call, "invalid use of ... with built-in %s", bin.name) + //check.errorf(call.Ellipsis, invalidOp + "invalid use of ... with built-in %s", bin.name) + check.errorf(call, invalidOp+"invalid use of ... with built-in %s", bin.name) check.use(call.ArgList...) return } @@ -68,7 +68,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( msg = "too many" } if msg != "" { - check.invalidOpf(call, "%s arguments for %s (expected %d, found %d)", msg, call, bin.nargs, nargs) + check.errorf(call, invalidOp+"%s arguments for %v (expected %d, found %d)", msg, call, bin.nargs, nargs) return } } @@ -85,7 +85,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( if s := asSlice(S); s != nil { T = s.elem } else { - check.invalidArgf(x, "%s is not a slice", x) + check.errorf(x, invalidArg+"%s is not a slice", x) return } @@ -95,23 +95,25 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // spec: "As a special case, append also accepts a first argument assignable // to type []byte with a second argument of string type followed by ... . // This form appends the bytes of the string. - if nargs == 2 && call.HasDots && x.assignableTo(check, NewSlice(universeByte), nil) { - arg(x, 1) - if x.mode == invalid { - return - } - if isString(x.typ) { - if check.Types != nil { - sig := makeSig(S, S, x.typ) - sig.variadic = true - check.recordBuiltinType(call.Fun, sig) + if nargs == 2 && call.HasDots { + if ok, _ := x.assignableTo(check, NewSlice(universeByte), nil); ok { + arg(x, 1) + if x.mode == invalid { + return } - x.mode = value - x.typ = S - break + if isString(x.typ) { + if check.Types != nil { + sig := makeSig(S, S, x.typ) + sig.variadic = true + check.recordBuiltinType(call.Fun, sig) + } + x.mode = value + x.typ = S + break + } + alist = append(alist, *x) + // fallthrough } - alist = append(alist, *x) - // fallthrough } // check general case by creating custom signature @@ -127,7 +129,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( arg(&x, i) xlist = append(xlist, &x) } - check.arguments(call, sig, xlist) // discard result (we know the result type) + check.arguments(call, sig, nil, xlist) // discard result (we know the result type) // ok to continue even if check.arguments reported errors x.mode = value @@ -197,7 +199,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( } if mode == invalid && typ != Typ[Invalid] { - check.invalidArgf(x, "%s for %s", x, bin.name) + check.errorf(x, invalidArg+"%s for %s", x, bin.name) return } @@ -212,11 +214,11 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // close(c) c := asChan(x.typ) if c == nil { - check.invalidArgf(x, "%s is not a channel", x) + check.errorf(x, invalidArg+"%s is not a channel", x) return } if c.dir == RecvOnly { - check.invalidArgf(x, "%s must not be a receive-only channel", x) + check.errorf(x, invalidArg+"%s must not be a receive-only channel", x) return } @@ -280,7 +282,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // both argument types must be identical if !check.identical(x.typ, y.typ) { - check.invalidOpf(x, "%s (mismatched types %s and %s)", call, x.typ, y.typ) + check.errorf(x, invalidOp+"%v (mismatched types %s and %s)", call, x.typ, y.typ) return } @@ -300,7 +302,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( } resTyp := check.applyTypeFunc(f, x.typ) if resTyp == nil { - check.invalidArgf(x, "arguments have type %s, expected floating-point", x.typ) + check.errorf(x, invalidArg+"arguments have type %s, expected floating-point", x.typ) return } @@ -340,12 +342,12 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( } if dst == nil || src == nil { - check.invalidArgf(x, "copy expects slice arguments; found %s and %s", x, &y) + check.errorf(x, invalidArg+"copy expects slice arguments; found %s and %s", x, &y) return } if !check.identical(dst, src) { - check.invalidArgf(x, "arguments to copy %s and %s have different element types %s and %s", x, &y, dst, src) + check.errorf(x, invalidArg+"arguments to copy %s and %s have different element types %s and %s", x, &y, dst, src) return } @@ -359,7 +361,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // delete(m, k) m := asMap(x.typ) if m == nil { - check.invalidArgf(x, "%s is not a map", x) + check.errorf(x, invalidArg+"%s is not a map", x) return } arg(x, 1) // k @@ -418,7 +420,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( } resTyp := check.applyTypeFunc(f, x.typ) if resTyp == nil { - check.invalidArgf(x, "argument has type %s, expected complex type", x.typ) + check.errorf(x, invalidArg+"argument has type %s, expected complex type", x.typ) return } @@ -473,7 +475,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( } if !valid(T) { - check.invalidArgf(arg0, "cannot make %s; type must be slice, map, or channel", arg0) + check.errorf(arg0, invalidArg+"cannot make %s; type must be slice, map, or channel", arg0) return } if nargs < min || max < nargs { @@ -495,7 +497,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( } } if len(sizes) == 2 && sizes[0] > sizes[1] { - check.invalidArgf(call.ArgList[1], "length and capacity swapped") + check.error(call.ArgList[1], invalidArg+"length and capacity swapped") // safe to continue } x.mode = value @@ -575,10 +577,29 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( check.recordBuiltinType(call.Fun, makeSig(x.typ)) } + case _Add: + // unsafe.Add(ptr unsafe.Pointer, len IntegerType) unsafe.Pointer + check.assignment(x, Typ[UnsafePointer], "argument to unsafe.Add") + if x.mode == invalid { + return + } + + var y operand + arg(&y, 1) + if !check.isValidIndex(&y, "length", true) { + return + } + + x.mode = value + x.typ = Typ[UnsafePointer] + if check.Types != nil { + check.recordBuiltinType(call.Fun, makeSig(x.typ, x.typ, y.typ)) + } + case _Alignof: // unsafe.Alignof(x T) uintptr if asTypeParam(x.typ) != nil { - check.invalidOpf(call, "unsafe.Alignof undefined for %s", x) + check.errorf(call, invalidOp+"unsafe.Alignof undefined for %s", x) return } check.assignment(x, nil, "argument to unsafe.Alignof") @@ -597,7 +618,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( arg0 := call.ArgList[0] selx, _ := unparen(arg0).(*syntax.SelectorExpr) if selx == nil { - check.invalidArgf(arg0, "%s is not a selector expression", arg0) + check.errorf(arg0, invalidArg+"%s is not a selector expression", arg0) check.use(arg0) return } @@ -612,18 +633,18 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( obj, index, indirect := check.lookupFieldOrMethod(base, false, check.pkg, sel) switch obj.(type) { case nil: - check.invalidArgf(x, "%s has no single field %s", base, sel) + check.errorf(x, invalidArg+"%s has no single field %s", base, sel) return case *Func: // TODO(gri) Using derefStructPtr may result in methods being found // that don't actually exist. An error either way, but the error // message is confusing. See: https://play.golang.org/p/al75v23kUy , // but go/types reports: "invalid argument: x.m is a method value". - check.invalidArgf(arg0, "%s is a method value", arg0) + check.errorf(arg0, invalidArg+"%s is a method value", arg0) return } if indirect { - check.invalidArgf(x, "field %s is embedded via a pointer in %s", sel, base) + check.errorf(x, invalidArg+"field %s is embedded via a pointer in %s", sel, base) return } @@ -639,7 +660,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( case _Sizeof: // unsafe.Sizeof(x T) uintptr if asTypeParam(x.typ) != nil { - check.invalidOpf(call, "unsafe.Sizeof undefined for %s", x) + check.errorf(call, invalidOp+"unsafe.Sizeof undefined for %s", x) return } check.assignment(x, nil, "argument to unsafe.Sizeof") @@ -652,12 +673,32 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( x.typ = Typ[Uintptr] // result is constant - no need to record signature + case _Slice: + // unsafe.Slice(ptr *T, len IntegerType) []T + typ := asPointer(x.typ) + if typ == nil { + check.errorf(x, invalidArg+"%s is not a pointer", x) + return + } + + var y operand + arg(&y, 1) + if !check.isValidIndex(&y, "length", false) { + return + } + + x.mode = value + x.typ = NewSlice(typ.base) + if check.Types != nil { + check.recordBuiltinType(call.Fun, makeSig(x.typ, typ, y.typ)) + } + case _Assert: // assert(pred) causes a typechecker error if pred is false. // The result of assert is the value of pred if there is no error. // Note: assert is only available in self-test mode. if x.mode != constant_ || !isBoolean(x.typ) { - check.invalidArgf(x, "%s is not a boolean constant", x) + check.errorf(x, invalidArg+"%s is not a boolean constant", x) return } if x.val.Kind() != constant.Bool { diff --git a/src/cmd/compile/internal/types2/builtins_test.go b/src/cmd/compile/internal/types2/builtins_test.go index 780d0a15a71..82c786b86ea 100644 --- a/src/cmd/compile/internal/types2/builtins_test.go +++ b/src/cmd/compile/internal/types2/builtins_test.go @@ -85,6 +85,9 @@ var builtinCalls = []struct { {"make", `var c int32; _ = make([]float64 , 0, c)`, `func([]float64, int, int32) []float64`}, {"make", `var l, c uint ; _ = make([]complex128, l, c)`, `func([]complex128, uint, uint) []complex128`}, + // issue #45667 + {"make", `const l uint = 1; _ = make([]int, l)`, `func([]int, uint) []int`}, + {"new", `_ = new(int)`, `func(int) *int`}, {"new", `type T struct{}; _ = new(T)`, `func(p.T) *p.T`}, @@ -102,6 +105,10 @@ var builtinCalls = []struct { {"recover", `recover()`, `func() interface{}`}, {"recover", `_ = recover()`, `func() interface{}`}, + {"Add", `var p unsafe.Pointer; _ = unsafe.Add(p, -1.0)`, `func(unsafe.Pointer, int) unsafe.Pointer`}, + {"Add", `var p unsafe.Pointer; var n uintptr; _ = unsafe.Add(p, n)`, `func(unsafe.Pointer, uintptr) unsafe.Pointer`}, + {"Add", `_ = unsafe.Add(nil, 0)`, `func(unsafe.Pointer, int) unsafe.Pointer`}, + {"Alignof", `_ = unsafe.Alignof(0)`, `invalid type`}, // constant {"Alignof", `var x struct{}; _ = unsafe.Alignof(x)`, `invalid type`}, // constant @@ -111,6 +118,9 @@ var builtinCalls = []struct { {"Sizeof", `_ = unsafe.Sizeof(0)`, `invalid type`}, // constant {"Sizeof", `var x struct{}; _ = unsafe.Sizeof(x)`, `invalid type`}, // constant + {"Slice", `var p *int; _ = unsafe.Slice(p, 1)`, `func(*int, int) []int`}, + {"Slice", `var p *byte; var n uintptr; _ = unsafe.Slice(p, n)`, `func(*byte, uintptr) []byte`}, + {"assert", `assert(true)`, `invalid type`}, // constant {"assert", `type B bool; const pred B = 1 < 2; assert(pred)`, `invalid type`}, // constant diff --git a/src/cmd/compile/internal/types2/call.go b/src/cmd/compile/internal/types2/call.go index 72805c453bc..6d149340b28 100644 --- a/src/cmd/compile/internal/types2/call.go +++ b/src/cmd/compile/internal/types2/call.go @@ -1,4 +1,3 @@ -// UNREVIEWED // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -13,84 +12,50 @@ import ( "unicode" ) -// funcInst type-checks a function instantiaton inst and returns the result in x. +// funcInst type-checks a function instantiation inst and returns the result in x. // The operand x must be the evaluation of inst.X and its type must be a signature. func (check *Checker) funcInst(x *operand, inst *syntax.IndexExpr) { - args, ok := check.exprOrTypeList(unpackExpr(inst.Index)) - if !ok { + xlist := unpackExpr(inst.Index) + targs := check.typeList(xlist) + if targs == nil { x.mode = invalid x.expr = inst return } - if len(args) > 0 && args[0].mode != typexpr { - check.errorf(args[0], "%s is not a type", args[0]) - ok = false - } + assert(len(targs) == len(xlist)) - // check number of type arguments - n := len(args) + // check number of type arguments (got) vs number of type parameters (want) sig := x.typ.(*Signature) - if !check.conf.InferFromConstraints && n != len(sig.tparams) || n > len(sig.tparams) { - check.errorf(args[n-1], "got %d type arguments but want %d", n, len(sig.tparams)) + got, want := len(targs), len(sig.tparams) + if !useConstraintTypeInference && got != want || got > want { + check.errorf(xlist[got-1], "got %d type arguments but want %d", got, want) x.mode = invalid x.expr = inst return } - // collect types - targs := make([]Type, n) - poslist := make([]syntax.Pos, n) - for i, a := range args { - if a.mode != typexpr { - // error was reported earlier - x.mode = invalid - x.expr = inst - return - } - targs[i] = a.typ - poslist[i] = a.Pos() - } - - // if we don't have enough type arguments, use constraint type inference - var inferred bool - if n < len(sig.tparams) { - var failed int - targs, failed = check.inferB(sig.tparams, targs) + // if we don't have enough type arguments, try type inference + inferred := false + if got < want { + targs = check.infer(inst.Pos(), sig.tparams, targs, nil, nil, true) if targs == nil { // error was already reported x.mode = invalid x.expr = inst return } - if failed >= 0 { - // at least one type argument couldn't be inferred - assert(targs[failed] == nil) - tpar := sig.tparams[failed] - check.errorf(inst, "cannot infer %s (%s) (%s)", tpar.name, tpar.pos, targs) - x.mode = invalid - x.expr = inst - return - } - // all type arguments were inferred sucessfully - if debug { - for _, targ := range targs { - assert(targ != nil) - } - } - n = len(targs) + got = len(targs) inferred = true } - assert(n == len(sig.tparams)) + assert(got == want) + + // determine argument positions (for error reporting) + poslist := make([]syntax.Pos, len(xlist)) + for i, x := range xlist { + poslist[i] = syntax.StartPos(x) + } // instantiate function signature - for i, typ := range targs { - // some positions may be missing if types are inferred - var pos syntax.Pos - if i < len(poslist) { - pos = poslist[i] - } - check.ordinaryType(pos, typ) - } res := check.instantiate(x.Pos(), sig, targs, poslist).(*Signature) assert(res.tparams == nil) // signature is not generic anymore if inferred { @@ -101,8 +66,21 @@ func (check *Checker) funcInst(x *operand, inst *syntax.IndexExpr) { x.expr = inst } -func (check *Checker) call(x *operand, call *syntax.CallExpr) exprKind { - check.exprOrType(x, call.Fun) +func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind { + var inst *syntax.IndexExpr // function instantiation, if any + if iexpr, _ := call.Fun.(*syntax.IndexExpr); iexpr != nil { + if check.indexExpr(x, iexpr) { + // Delay function instantiation to argument checking, + // where we combine type and value arguments for type + // inference. + assert(x.mode == value) + inst = iexpr + } + x.expr = iexpr + check.record(x) + } else { + check.exprOrType(x, call.Fun) + } switch x.mode { case invalid: @@ -151,109 +129,72 @@ func (check *Checker) call(x *operand, call *syntax.CallExpr) exprKind { check.hasCallOrRecv = true } return predeclaredFuncs[id].kind + } - default: - // function/method call - cgocall := x.mode == cgofunc + // ordinary function/method call + cgocall := x.mode == cgofunc - sig := asSignature(x.typ) - if sig == nil { - check.invalidOpf(x, "cannot call non-function %s", x) + sig := asSignature(x.typ) + if sig == nil { + check.errorf(x, invalidOp+"cannot call non-function %s", x) + x.mode = invalid + x.expr = call + return statement + } + + // evaluate type arguments, if any + var targs []Type + if inst != nil { + xlist := unpackExpr(inst.Index) + targs = check.typeList(xlist) + if targs == nil { + check.use(call.ArgList...) x.mode = invalid x.expr = call return statement } + assert(len(targs) == len(xlist)) - // evaluate arguments - args, ok := check.exprOrTypeList(call.ArgList) - if !ok { + // check number of type arguments (got) vs number of type parameters (want) + got, want := len(targs), len(sig.tparams) + if got > want { + check.errorf(xlist[want], "got %d type arguments but want %d", got, want) + check.use(call.ArgList...) x.mode = invalid x.expr = call - return expression + return statement } - - sig = check.arguments(call, sig, args) - - // determine result - switch sig.results.Len() { - case 0: - x.mode = novalue - case 1: - if cgocall { - x.mode = commaerr - } else { - x.mode = value - } - x.typ = sig.results.vars[0].typ // unpack tuple - default: - x.mode = value - x.typ = sig.results - } - x.expr = call - check.hasCallOrRecv = true - - // if type inference failed, a parametrized result must be invalidated - // (operands cannot have a parametrized type) - if x.mode == value && len(sig.tparams) > 0 && isParameterized(sig.tparams, x.typ) { - x.mode = invalid - } - - return statement } -} -// exprOrTypeList returns a list of operands and reports an error if the -// list contains a mix of values and types (ignoring invalid operands). -// TODO(gri) Now we can split this into exprList and typeList. -func (check *Checker) exprOrTypeList(elist []syntax.Expr) (xlist []*operand, ok bool) { - ok = true + // evaluate arguments + args, _ := check.exprList(call.ArgList, false) + sig = check.arguments(call, sig, targs, args) - switch len(elist) { + // determine result + switch sig.results.Len() { case 0: - // nothing to do - + x.mode = novalue case 1: - // single (possibly comma-ok) value or type, or function returning multiple values - e := elist[0] - var x operand - check.multiExprOrType(&x, e) - if t, ok := x.typ.(*Tuple); ok && x.mode != invalid && x.mode != typexpr { - // multiple values - xlist = make([]*operand, t.Len()) - for i, v := range t.vars { - xlist[i] = &operand{mode: value, expr: e, typ: v.typ} - } - break + if cgocall { + x.mode = commaerr + } else { + x.mode = value } - - check.instantiatedOperand(&x) - - // exactly one (possibly invalid or comma-ok) value or type - xlist = []*operand{&x} - + x.typ = sig.results.vars[0].typ // unpack tuple default: - // multiple (possibly invalid) values or types - xlist = make([]*operand, len(elist)) - ntypes := 0 - for i, e := range elist { - var x operand - check.exprOrType(&x, e) - xlist[i] = &x - switch x.mode { - case invalid: - ntypes = len(xlist) // make 'if' condition fail below (no additional error in this case) - case typexpr: - ntypes++ - check.instantiatedOperand(&x) - } - } - if 0 < ntypes && ntypes < len(xlist) { - check.errorf(xlist[0], "mix of value and type expressions") - ok = false - } + x.mode = value + x.typ = sig.results + } + x.expr = call + check.hasCallOrRecv = true + + // if type inference failed, a parametrized result must be invalidated + // (operands cannot have a parametrized type) + if x.mode == value && len(sig.tparams) > 0 && isParameterized(sig.tparams, x.typ) { + x.mode = invalid } - return + return statement } func (check *Checker) exprList(elist []syntax.Expr, allowCommaOk bool) (xlist []*operand, commaOk bool) { @@ -296,7 +237,7 @@ func (check *Checker) exprList(elist []syntax.Expr, allowCommaOk bool) (xlist [] return } -func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, args []*operand) (rsig *Signature) { +func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []Type, args []*operand) (rsig *Signature) { rsig = sig // TODO(gri) try to eliminate this extra verification loop @@ -324,8 +265,8 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, args []*o ddd := call.HasDots // set up parameters - sig_params := sig.params // adjusted for variadic functions (may be nil for empty parameter lists!) - adjusted := false // indicates if sig_params is different from t.params + sigParams := sig.params // adjusted for variadic functions (may be nil for empty parameter lists!) + adjusted := false // indicates if sigParams is different from t.params if sig.variadic { if ddd { // variadic_func(a, b, c...) @@ -348,7 +289,7 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, args []*o for len(vars) < nargs { vars = append(vars, NewParam(last.pos, last.pkg, last.name, typ)) } - sig_params = NewTuple(vars...) // possibly nil! + sigParams = NewTuple(vars...) // possibly nil! adjusted = true npars = nargs } else { @@ -380,35 +321,10 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, args []*o if len(sig.tparams) > 0 { // TODO(gri) provide position information for targs so we can feed // it to the instantiate call for better error reporting - targs, failed := check.infer(sig.tparams, sig_params, args) + targs = check.infer(call.Pos(), sig.tparams, targs, sigParams, args, true) if targs == nil { return // error already reported } - if failed >= 0 { - // Some type arguments couldn't be inferred. Use - // bounds type inference to try to make progress. - if check.conf.InferFromConstraints { - targs, failed = check.inferB(sig.tparams, targs) - if targs == nil { - return // error already reported - } - } - if failed >= 0 { - // at least one type argument couldn't be inferred - assert(targs[failed] == nil) - tpar := sig.tparams[failed] - // TODO(gri) here we'd like to use the position of the call's ')' - check.errorf(call.Pos(), "cannot infer %s (%s) (%s)", tpar.name, tpar.pos, targs) - return - } - } - // all type arguments were inferred sucessfully - if debug { - for _, targ := range targs { - assert(targ != nil) - } - } - //check.dump("### inferred targs = %s", targs) // compute result signature rsig = check.instantiate(call.Pos(), sig, targs, nil).(*Signature) @@ -419,15 +335,15 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, args []*o // need to compute it from the adjusted list; otherwise we can // simply use the result signature's parameter list. if adjusted { - sig_params = check.subst(call.Pos(), sig_params, makeSubstMap(sig.tparams, targs)).(*Tuple) + sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(sig.tparams, targs)).(*Tuple) } else { - sig_params = rsig.params + sigParams = rsig.params } } // check arguments for i, a := range args { - check.assignment(a, sig_params.vars[i].typ, "argument") + check.assignment(a, sigParams.vars[i].typ, check.sprintf("argument to %s", call.Fun)) } return @@ -597,34 +513,44 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr) { if m, _ := obj.(*Func); m != nil { // check.dump("### found method %s", m) check.objDecl(m, nil) - // If m has a parameterized receiver type, infer the type parameter - // values from the actual receiver provided and then substitute the - // type parameters in the signature accordingly. + // If m has a parameterized receiver type, infer the type arguments from + // the actual receiver provided and then substitute the type parameters in + // the signature accordingly. // TODO(gri) factor this code out sig := m.typ.(*Signature) if len(sig.rparams) > 0 { - //check.dump("### recv typ = %s", x.typ) + // For inference to work, we must use the receiver type + // matching the receiver in the actual method declaration. + // If the method is embedded, the matching receiver is the + // embedded struct or interface that declared the method. + // Traverse the embedding to find that type (issue #44688). + recv := x.typ + for i := 0; i < len(index)-1; i++ { + // The embedded type is either a struct or a pointer to + // a struct except for the last one (which we don't need). + recv = asStruct(derefStructPtr(recv)).Field(index[i]).typ + } + //check.dump("### recv = %s", recv) //check.dump("### method = %s rparams = %s tparams = %s", m, sig.rparams, sig.tparams) // The method may have a pointer receiver, but the actually provided receiver // may be a (hopefully addressable) non-pointer value, or vice versa. Here we // only care about inferring receiver type parameters; to make the inference // work, match up pointer-ness of receiver and argument. - arg := x - if ptrRecv := isPointer(sig.recv.typ); ptrRecv != isPointer(arg.typ) { - copy := *arg + if ptrRecv := isPointer(sig.recv.typ); ptrRecv != isPointer(recv) { if ptrRecv { - copy.typ = NewPointer(arg.typ) + recv = NewPointer(recv) } else { - copy.typ = arg.typ.(*Pointer).base + recv = recv.(*Pointer).base } - arg = © } - targs, failed := check.infer(sig.rparams, NewTuple(sig.recv), []*operand{arg}) + // Disable reporting of errors during inference below. If we're unable to infer + // the receiver type arguments here, the receiver must be be otherwise invalid + // and an error has been reported elsewhere. + arg := operand{mode: variable, expr: x.expr, typ: recv} + targs := check.infer(m.pos, sig.rparams, nil, NewTuple(sig.recv), []*operand{&arg}, false /* no error reporting */) //check.dump("### inferred targs = %s", targs) - if failed >= 0 { + if targs == nil { // We may reach here if there were other errors (see issue #40056). - // check.infer will report a follow-up error. - // TODO(gri) avoid the follow-up error or provide better explanation. goto Error } // Don't modify m. Instead - for now - make a copy of m and use that instead. diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go index 95fb4e1076c..8d6cd1edab9 100644 --- a/src/cmd/compile/internal/types2/check.go +++ b/src/cmd/compile/internal/types2/check.go @@ -1,4 +1,3 @@ -// UNREVIEWED // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -17,7 +16,7 @@ import ( var nopos syntax.Pos // debugging/development support -const debug = true // leave on during development +const debug = false // leave on during development // If forceStrict is set, the type-checker enforces additional // rules not specified by the Go 1 spec, but which will @@ -32,11 +31,6 @@ const debug = true // leave on during development // const forceStrict = false -// If methodTypeParamsOk is set, type parameters are -// permitted in method declarations (in interfaces, too). -// Generalization and experimental feature. -const methodTypeParamsOk = true - // exprInfo stores information about an untyped expression. type exprInfo struct { isLhs bool // expression is lhs operand of a shift with delayed type-check @@ -89,12 +83,20 @@ type Checker struct { pkg *Package *Info version version // accepted language version - nextId uint64 // unique Id for type parameters (first valid Id is 1) objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package posMap map[*Interface][]syntax.Pos // maps interface types to lists of embedded interface positions typMap map[string]*Named // maps an instantiated named type hash to a *Named type - pkgCnt map[string]int // counts number of imported packages with a given name (for better error messages) + + // pkgPathMap maps package names to the set of distinct import paths we've + // seen for that name, anywhere in the import graph. It is used for + // disambiguating package names in error messages. + // + // pkgPathMap is allocated lazily, so that we don't pay the price of building + // it on the happy path. seenPkgMap tracks the packages that we've already + // walked. + pkgPathMap map[string]map[string]bool + seenPkgMap map[*Package]bool // information collected during type-checking of a set of package files // (initialized by Files, valid only for the duration of check.Files; @@ -107,7 +109,6 @@ type Checker struct { methods map[*TypeName][]*Func // maps package scope type names to associated non-blank (non-interface) methods untyped map[syntax.Expr]exprInfo // map of expressions without final type delayed []func() // stack of delayed action segments; segments are processed in FIFO order - finals []func() // list of final actions; processed at the end of type-checking the current set of files objPath []Object // path of object dependencies during type inference (for cycle reporting) // context within which the current object is type-checked @@ -147,14 +148,6 @@ func (check *Checker) later(f func()) { check.delayed = append(check.delayed, f) } -// atEnd adds f to the list of actions processed at the end -// of type-checking, before initialization order computation. -// Actions added by atEnd are processed after any actions -// added by later. -func (check *Checker) atEnd(f func()) { - check.finals = append(check.finals, f) -} - // push pushes obj onto the object path and returns its index in the path. func (check *Checker) push(obj Object) int { check.objPath = append(check.objPath, obj) @@ -193,12 +186,10 @@ func NewChecker(conf *Config, pkg *Package, info *Info) *Checker { pkg: pkg, Info: info, version: version, - nextId: 1, objMap: make(map[Object]*declInfo), impMap: make(map[importKey]*Package), posMap: make(map[*Interface][]syntax.Pos), typMap: make(map[string]*Named), - pkgCnt: make(map[string]int), } } @@ -214,7 +205,6 @@ func (check *Checker) initFiles(files []*syntax.File) { check.methods = nil check.untyped = nil check.delayed = nil - check.finals = nil // determine package name and collect valid files pkg := check.pkg @@ -224,7 +214,7 @@ func (check *Checker) initFiles(files []*syntax.File) { if name != "_" { pkg.name = name } else { - check.errorf(file.PkgName, "invalid package name _") + check.error(file.PkgName, "invalid package name _") } fallthrough @@ -281,7 +271,6 @@ func (check *Checker) checkFiles(files []*syntax.File) (err error) { print("== processDelayed ==") check.processDelayed(0) // incl. all functions - check.processFinals() print("== initOrder ==") check.initOrder() @@ -290,9 +279,6 @@ func (check *Checker) checkFiles(files []*syntax.File) (err error) { print("== unusedImports ==") check.unusedImports() } - // no longer needed - release memory - check.imports = nil - check.dotImportMap = nil print("== recordUntyped ==") check.recordUntyped() @@ -304,6 +290,12 @@ func (check *Checker) checkFiles(files []*syntax.File) (err error) { check.pkg.complete = true + // no longer needed - release memory + check.imports = nil + check.dotImportMap = nil + check.pkgPathMap = nil + check.seenPkgMap = nil + // TODO(gri) There's more memory we should release at this point. return @@ -324,13 +316,30 @@ func (check *Checker) processDelayed(top int) { check.delayed = check.delayed[:top] } -func (check *Checker) processFinals() { - n := len(check.finals) - for _, f := range check.finals { - f() // must not append to check.finals +func (check *Checker) record(x *operand) { + // convert x into a user-friendly set of values + // TODO(gri) this code can be simplified + var typ Type + var val constant.Value + switch x.mode { + case invalid: + typ = Typ[Invalid] + case novalue: + typ = (*Tuple)(nil) + case constant_: + typ = x.typ + val = x.val + default: + typ = x.typ } - if len(check.finals) != n { - panic("internal error: final action list grew") + assert(x.expr != nil && typ != nil) + + if isUntyped(typ) { + // delay type and value recording until we know the type + // or until the end of type checking + check.rememberUntyped(x.expr, false, x.mode, typ.(*Basic), val) + } else { + check.recordTypeAndValue(x.expr, x.mode, typ, val) } } @@ -356,7 +365,9 @@ func (check *Checker) recordTypeAndValue(x syntax.Expr, mode operandMode, typ Ty } if mode == constant_ { assert(val != nil) - assert(typ == Typ[Invalid] || isConstType(typ)) + // We check is(typ, IsConstType) here as constant expressions may be + // recorded as type parameters. + assert(typ == Typ[Invalid] || is(typ, IsConstType)) } if m := check.Types; m != nil { m[x] = TypeAndValue{mode, typ, val} @@ -364,14 +375,14 @@ func (check *Checker) recordTypeAndValue(x syntax.Expr, mode operandMode, typ Ty } func (check *Checker) recordBuiltinType(f syntax.Expr, sig *Signature) { - // f must be a (possibly parenthesized) identifier denoting a built-in - // (built-ins in package unsafe always produce a constant result and - // we don't record their signatures, so we don't see qualified idents - // here): record the signature for f and possible children. + // f must be a (possibly parenthesized, possibly qualified) + // identifier denoting a built-in (including unsafe's non-constant + // functions Add and Slice): record the signature for f and possible + // children. for { check.recordTypeAndValue(f, builtin, sig, nil) switch p := f.(type) { - case *syntax.Name: + case *syntax.Name, *syntax.SelectorExpr: return // we're done case *syntax.ParenExpr: f = p.X diff --git a/src/cmd/compile/internal/types2/check_test.go b/src/cmd/compile/internal/types2/check_test.go index 9c1d278520d..41b0c54702d 100644 --- a/src/cmd/compile/internal/types2/check_test.go +++ b/src/cmd/compile/internal/types2/check_test.go @@ -1,4 +1,3 @@ -// UNREVIEWED // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -29,12 +28,11 @@ package types2_test import ( "cmd/compile/internal/syntax" "flag" - "fmt" "internal/testenv" - "io/ioutil" "os" "path/filepath" "regexp" + "sort" "strings" "testing" @@ -42,10 +40,9 @@ import ( ) var ( - haltOnError = flag.Bool("halt", false, "halt on error") - listErrors = flag.Bool("errlist", false, "list errors") - testFiles = flag.String("files", "", "comma-separated list of test files") - goVersion = flag.String("lang", "", "Go language version (e.g. \"go1.12\"") + haltOnError = flag.Bool("halt", false, "halt on error") + verifyErrors = flag.Bool("verify", false, "verify errors (rather than list them) in TestManual") + goVersion = flag.String("lang", "", "Go language version (e.g. \"go1.12\")") ) func parseFiles(t *testing.T, filenames []string, mode syntax.Mode) ([]*syntax.File, []error) { @@ -73,6 +70,7 @@ func unpackError(err error) syntax.Error { } } +// delta returns the absolute difference between x and y. func delta(x, y uint) uint { switch { case x < y: @@ -98,17 +96,17 @@ func asGoVersion(s string) string { return "" } -func checkFiles(t *testing.T, sources []string, goVersion string, colDelta uint, trace bool) { - if len(sources) == 0 { +func testFiles(t *testing.T, filenames []string, colDelta uint, manual bool) { + if len(filenames) == 0 { t.Fatal("no source files") } var mode syntax.Mode - if strings.HasSuffix(sources[0], ".go2") { + if strings.HasSuffix(filenames[0], ".go2") { mode |= syntax.AllowGenerics } // parse files and collect parser errors - files, errlist := parseFiles(t, sources, mode) + files, errlist := parseFiles(t, filenames, mode) pkgName := "" if len(files) > 0 { @@ -116,11 +114,13 @@ func checkFiles(t *testing.T, sources []string, goVersion string, colDelta uint, } // if no Go version is given, consider the package name + goVersion := *goVersion if goVersion == "" { goVersion = asGoVersion(pkgName) } - if *listErrors && len(errlist) > 0 { + listErrors := manual && !*verifyErrors + if listErrors && len(errlist) > 0 { t.Errorf("--- %s:", pkgName) for _, err := range errlist { t.Error(err) @@ -130,37 +130,38 @@ func checkFiles(t *testing.T, sources []string, goVersion string, colDelta uint, // typecheck and collect typechecker errors var conf Config conf.GoVersion = goVersion - conf.AcceptMethodTypeParams = true - conf.InferFromConstraints = true // special case for importC.src - if len(sources) == 1 && strings.HasSuffix(sources[0], "importC.src") { + if len(filenames) == 1 && strings.HasSuffix(filenames[0], "importC.src") { conf.FakeImportC = true } - conf.Trace = trace + conf.Trace = manual && testing.Verbose() conf.Importer = defaultImporter() conf.Error = func(err error) { if *haltOnError { defer panic(err) } - if *listErrors { + if listErrors { t.Error(err) return } - // Ignore secondary error messages starting with "\t"; - // they are clarifying messages for a primary error. - if !strings.Contains(err.Error(), ": \t") { - errlist = append(errlist, err) - } + errlist = append(errlist, err) } conf.Check(pkgName, files, nil) - if *listErrors { + if listErrors { return } + // sort errlist in source order + sort.Slice(errlist, func(i, j int) bool { + pi := unpackError(errlist[i]).Pos + pj := unpackError(errlist[j]).Pos + return pi.Cmp(pj) < 0 + }) + // collect expected errors errmap := make(map[string]map[uint][]syntax.Error) - for _, filename := range sources { + for _, filename := range filenames { f, err := os.Open(filename) if err != nil { t.Error(err) @@ -179,10 +180,9 @@ func checkFiles(t *testing.T, sources []string, goVersion string, colDelta uint, // find list of errors for the respective error line filename := got.Pos.Base().Filename() filemap := errmap[filename] - var line uint + line := got.Pos.Line() var list []syntax.Error if filemap != nil { - line = got.Pos.Line() list = filemap[line] } // list may be nil @@ -213,8 +213,8 @@ func checkFiles(t *testing.T, sources []string, goVersion string, colDelta uint, // eliminate from list if n := len(list) - 1; n > 0 { - // not the last entry - swap in last element and shorten list by 1 - list[index] = list[n] + // not the last entry - slide entries down (don't reorder) + copy(list[index:], list[index+1:]) filemap[line] = list[:n] } else { // last entry - remove list from filemap @@ -240,59 +240,87 @@ func checkFiles(t *testing.T, sources []string, goVersion string, colDelta uint, } } -// TestCheck is for manual testing of selected input files, provided with -files. -// The accepted Go language version can be controlled with the -lang flag. -func TestCheck(t *testing.T) { - if *testFiles == "" { - return - } +// TestManual is for manual testing of a package - either provided +// as a list of filenames belonging to the package, or a directory +// name containing the package files - after the test arguments +// (and a separating "--"). For instance, to test the package made +// of the files foo.go and bar.go, use: +// +// go test -run Manual -- foo.go bar.go +// +// If no source arguments are provided, the file testdata/manual.go2 +// is used instead. +// Provide the -verify flag to verify errors against ERROR comments +// in the input files rather than having a list of errors reported. +// The accepted Go language version can be controlled with the -lang +// flag. +func TestManual(t *testing.T) { testenv.MustHaveGoBuild(t) + + filenames := flag.Args() + if len(filenames) == 0 { + filenames = []string{filepath.FromSlash("testdata/manual.go2")} + } + + info, err := os.Stat(filenames[0]) + if err != nil { + t.Fatalf("TestManual: %v", err) + } + DefPredeclaredTestFuncs() - checkFiles(t, strings.Split(*testFiles, ","), *goVersion, 0, testing.Verbose()) + if info.IsDir() { + if len(filenames) > 1 { + t.Fatal("TestManual: must have only one directory argument") + } + testDir(t, filenames[0], 0, true) + } else { + testFiles(t, filenames, 0, true) + } } -func TestTestdata(t *testing.T) { DefPredeclaredTestFuncs(); testDir(t, 75, "testdata") } // TODO(gri) narrow column tolerance -func TestExamples(t *testing.T) { testDir(t, 0, "examples") } -func TestFixedbugs(t *testing.T) { testDir(t, 0, "fixedbugs") } +// TODO(gri) go/types has extra TestLongConstants and TestIndexRepresentability tests -func testDir(t *testing.T, colDelta uint, dir string) { +func TestCheck(t *testing.T) { DefPredeclaredTestFuncs(); testDirFiles(t, "testdata/check", 75, false) } // TODO(gri) narrow column tolerance +func TestExamples(t *testing.T) { testDirFiles(t, "testdata/examples", 0, false) } +func TestFixedbugs(t *testing.T) { testDirFiles(t, "testdata/fixedbugs", 0, false) } + +func testDirFiles(t *testing.T, dir string, colDelta uint, manual bool) { testenv.MustHaveGoBuild(t) + dir = filepath.FromSlash(dir) - fis, err := ioutil.ReadDir(dir) + fis, err := os.ReadDir(dir) if err != nil { t.Error(err) return } - for count, fi := range fis { + for _, fi := range fis { path := filepath.Join(dir, fi.Name()) - // if fi is a directory, its files make up a single package + // If fi is a directory, its files make up a single package. if fi.IsDir() { - if testing.Verbose() { - fmt.Printf("%3d %s\n", count, path) - } - fis, err := ioutil.ReadDir(path) - if err != nil { - t.Error(err) - continue - } - files := make([]string, len(fis)) - for i, fi := range fis { - // if fi is a directory, checkFiles below will complain - files[i] = filepath.Join(path, fi.Name()) - if testing.Verbose() { - fmt.Printf("\t%s\n", files[i]) - } - } - checkFiles(t, files, "", colDelta, false) - continue + testDir(t, path, colDelta, manual) + } else { + t.Run(filepath.Base(path), func(t *testing.T) { + testFiles(t, []string{path}, colDelta, manual) + }) } - - // otherwise, fi is a stand-alone file - if testing.Verbose() { - fmt.Printf("%3d %s\n", count, path) - } - checkFiles(t, []string{path}, "", colDelta, false) } } + +func testDir(t *testing.T, dir string, colDelta uint, manual bool) { + fis, err := os.ReadDir(dir) + if err != nil { + t.Error(err) + return + } + + var filenames []string + for _, fi := range fis { + filenames = append(filenames, filepath.Join(dir, fi.Name())) + } + + t.Run(filepath.Base(dir), func(t *testing.T) { + testFiles(t, filenames, colDelta, manual) + }) +} diff --git a/src/cmd/compile/internal/types2/conversions.go b/src/cmd/compile/internal/types2/conversions.go index dc0621919e5..30201e2b7f4 100644 --- a/src/cmd/compile/internal/types2/conversions.go +++ b/src/cmd/compile/internal/types2/conversions.go @@ -1,4 +1,3 @@ -// UNREVIEWED // Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -39,8 +38,10 @@ func (check *Checker) conversion(x *operand, T Type) { } if !ok { - check.errorf(x, "cannot convert %s to %s", x, T) - x.mode = invalid + if x.mode != invalid { + check.errorf(x, "cannot convert %s to %s", x, T) + x.mode = invalid + } return } @@ -84,7 +85,7 @@ func (check *Checker) conversion(x *operand, T Type) { // exported API call, i.e., when all methods have been type-checked. func (x *operand) convertibleTo(check *Checker, T Type) bool { // "x is assignable to T" - if x.assignableTo(check, T, nil) { + if ok, _ := x.assignableTo(check, T, nil); ok { return true } @@ -136,6 +137,27 @@ func (x *operand) convertibleTo(check *Checker, T Type) bool { return true } + // "x is a slice, T is a pointer-to-array type, + // and the slice and array types have identical element types." + if s := asSlice(V); s != nil { + if p := asPointer(T); p != nil { + if a := asArray(p.Elem()); a != nil { + if check.identical(s.Elem(), a.Elem()) { + if check == nil || check.allowVersion(check.pkg, 1, 17) { + return true + } + // check != nil + if check.conf.CompilerErrorMessages { + check.error(x, "conversion of slices to array pointers only supported as of -lang=go1.17") + } else { + check.error(x, "conversion of slices to array pointers requires go1.17 or later") + } + x.mode = invalid // avoid follow-up error + } + } + } + } + return false } diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go index 677172d40f9..1333e4c0eca 100644 --- a/src/cmd/compile/internal/types2/decl.go +++ b/src/cmd/compile/internal/types2/decl.go @@ -1,4 +1,3 @@ -// UNREVIEWED // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -11,12 +10,12 @@ import ( "go/constant" ) -func (check *Checker) reportAltDecl(obj Object) { +func (err *error_) recordAltDecl(obj Object) { if pos := obj.Pos(); pos.IsKnown() { // We use "other" rather than "previous" here because // the first declaration seen may not be textually // earlier in the source. - check.errorf(pos, "\tother declaration of %s", obj.Name()) // secondary error, \t indented + err.errorf(pos, "other declaration of %s", obj.Name()) } } @@ -27,8 +26,10 @@ func (check *Checker) declare(scope *Scope, id *syntax.Name, obj Object, pos syn // binding." if obj.Name() != "_" { if alt := scope.Insert(obj); alt != nil { - check.errorf(obj.Pos(), "%s redeclared in this block", obj.Name()) - check.reportAltDecl(alt) + var err error_ + err.errorf(obj, "%s redeclared in this block", obj.Name()) + err.recordAltDecl(alt) + check.report(&err) return } obj.setScopePos(pos) @@ -323,7 +324,7 @@ func (check *Checker) validType(typ Type, path []Object) typeInfo { } // don't report a 2nd error if we already know the type is invalid - // (e.g., if a cycle was detected earlier, via Checker.underlying). + // (e.g., if a cycle was detected earlier, via under). if t.underlying == Typ[Invalid] { t.info = invalid return invalid @@ -332,7 +333,7 @@ func (check *Checker) validType(typ Type, path []Object) typeInfo { switch t.info { case unknown: t.info = marked - t.info = check.validType(t.orig, append(path, t.obj)) // only types of current package added to path + t.info = check.validType(t.fromRHS, append(path, t.obj)) // only types of current package added to path case marked: // cycle detected for i, tn := range path { @@ -364,53 +365,22 @@ func (check *Checker) cycleError(cycle []Object) { // cycle? That would be more consistent with other error messages. i := firstInSrc(cycle) obj := cycle[i] + var err error_ if check.conf.CompilerErrorMessages { - check.errorf(obj.Pos(), "invalid recursive type %s", obj.Name()) + err.errorf(obj, "invalid recursive type %s", obj.Name()) } else { - check.errorf(obj.Pos(), "illegal cycle in declaration of %s", obj.Name()) + err.errorf(obj, "illegal cycle in declaration of %s", obj.Name()) } for range cycle { - check.errorf(obj.Pos(), "\t%s refers to", obj.Name()) // secondary error, \t indented + err.errorf(obj, "%s refers to", obj.Name()) i++ if i >= len(cycle) { i = 0 } obj = cycle[i] } - check.errorf(obj.Pos(), "\t%s", obj.Name()) -} - -// TODO(gri) This functionality should probably be with the Pos implementation. -func cmpPos(p, q syntax.Pos) int { - // TODO(gri) is RelFilename correct here? - pname := p.RelFilename() - qname := q.RelFilename() - switch { - case pname < qname: - return -1 - case pname > qname: - return +1 - } - - pline := p.Line() - qline := q.Line() - switch { - case pline < qline: - return -1 - case pline > qline: - return +1 - } - - pcol := p.Col() - qcol := q.Col() - switch { - case pcol < qcol: - return -1 - case pcol > qcol: - return +1 - } - - return 0 + err.errorf(obj, "%s", obj.Name()) + check.report(&err) } // firstInSrc reports the index of the object with the "smallest" @@ -418,7 +388,7 @@ func cmpPos(p, q syntax.Pos) int { func firstInSrc(path []Object) int { fst, pos := 0, path[0].Pos() for i, t := range path[1:] { - if cmpPos(t.Pos(), pos) < 0 { + if t.Pos().Cmp(pos) < 0 { fst, pos = i+1, t.Pos() } } @@ -625,14 +595,18 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *Named if alias && tdecl.TParamList != nil { // The parser will ensure this but we may still get an invalid AST. // Complain and continue as regular type definition. - check.errorf(tdecl, "generic type cannot be alias") + check.error(tdecl, "generic type cannot be alias") alias = false } if alias { // type alias declaration - if !check.allowVersion(obj.pkg, 1, 9) { - check.errorf(tdecl, "type aliases requires go1.9 or later") + if !check.allowVersion(check.pkg, 1, 9) { + if check.conf.CompilerErrorMessages { + check.error(tdecl, "type aliases only supported as of -lang=go1.9") + } else { + check.error(tdecl, "type aliases requires go1.9 or later") + } } obj.typ = Typ[Invalid] @@ -641,9 +615,8 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *Named } else { // defined type declaration - named := &Named{check: check, obj: obj} + named := check.newNamed(obj, nil, nil, nil, nil) def.setUnderlying(named) - obj.typ = named // make sure recursive type declarations terminate if tdecl.TParamList != nil { check.openScope(tdecl, "type parameters") @@ -652,7 +625,7 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *Named } // determine underlying type of named - named.orig = check.definedType(tdecl.Type, named) + named.fromRHS = check.definedType(tdecl.Type, named) // The underlying type of named may be itself a named type that is // incomplete: @@ -667,7 +640,7 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *Named // and which has as its underlying type the named type B. // Determine the (final, unnamed) underlying type by resolving // any forward chain. - // TODO(gri) Investigate if we can just use named.origin here + // TODO(gri) Investigate if we can just use named.fromRHS here // and rely on lazy computation of the underlying type. named.underlying = under(named) } @@ -707,7 +680,7 @@ func (check *Checker) collectTypeParams(list []*syntax.Field) (tparams []*TypeNa // The predeclared identifier "any" is visible only as a constraint // in a type parameter list. Look for it before general constraint // resolution. - if tident, _ := f.Type.(*syntax.Name); tident != nil && tident.Value == "any" && check.lookup("any") == nil { + if tident, _ := unparen(f.Type).(*syntax.Name); tident != nil && tident.Value == "any" && check.lookup("any") == nil { bound = universeAny } else { bound = check.typ(f.Type) @@ -787,19 +760,21 @@ func (check *Checker) collectMethods(obj *TypeName) { // to it must be unique." assert(m.name != "_") if alt := mset.insert(m); alt != nil { + var err error_ switch alt.(type) { case *Var: - check.errorf(m.pos, "field and method with the same name %s", m.name) + err.errorf(m.pos, "field and method with the same name %s", m.name) case *Func: if check.conf.CompilerErrorMessages { - check.errorf(m.pos, "%s.%s redeclared in this block", obj.Name(), m.name) + err.errorf(m.pos, "%s.%s redeclared in this block", obj.Name(), m.name) } else { - check.errorf(m.pos, "method %s already declared for %s", m.name, obj) + err.errorf(m.pos, "method %s already declared for %s", m.name, obj) } default: unreachable() } - check.reportAltDecl(alt) + err.recordAltDecl(alt) + check.report(&err) continue } @@ -896,7 +871,7 @@ func (check *Checker) declStmt(list []syntax.Decl) { // inside a function begins at the end of the ConstSpec or VarSpec // (ShortVarDecl for short variable declarations) and ends at the // end of the innermost containing block." - scopePos := endPos(s) + scopePos := syntax.EndPos(s) for i, name := range s.NameList { check.declare(check.scope, name, lhs[i], scopePos) } @@ -953,7 +928,7 @@ func (check *Checker) declStmt(list []syntax.Decl) { // declare all variables // (only at this point are the variable scopes (parents) set) - scopePos := endPos(s) // see constant declarations + scopePos := syntax.EndPos(s) // see constant declarations for i, name := range s.NameList { // see constant declarations check.declare(check.scope, name, lhs0[i], scopePos) @@ -972,7 +947,7 @@ func (check *Checker) declStmt(list []syntax.Decl) { check.pop().setColor(black) default: - check.invalidASTf(s, "unknown syntax.Decl node %T", s) + check.errorf(s, invalidAST+"unknown syntax.Decl node %T", s) } } } diff --git a/src/cmd/compile/internal/types2/errorcalls_test.go b/src/cmd/compile/internal/types2/errorcalls_test.go new file mode 100644 index 00000000000..28bb33aaffd --- /dev/null +++ b/src/cmd/compile/internal/types2/errorcalls_test.go @@ -0,0 +1,49 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE ast. + +package types2_test + +import ( + "cmd/compile/internal/syntax" + "testing" +) + +// TestErrorCalls makes sure that check.errorf calls have at +// least 3 arguments (otherwise we should be using check.error). +func TestErrorCalls(t *testing.T) { + files, err := pkgFiles(".") + if err != nil { + t.Fatal(err) + } + + for _, file := range files { + syntax.Walk(file, func(n syntax.Node) bool { + call, _ := n.(*syntax.CallExpr) + if call == nil { + return false + } + selx, _ := call.Fun.(*syntax.SelectorExpr) + if selx == nil { + return false + } + if !(isName(selx.X, "check") && isName(selx.Sel, "errorf")) { + return false + } + // check.errorf calls should have more than 2 arguments: + // position, format string, and arguments to format + if n := len(call.ArgList); n <= 2 { + t.Errorf("%s: got %d arguments, want > 2", call.Pos(), n) + return true + } + return false + }) + } +} + +func isName(n syntax.Node, name string) bool { + if n, ok := n.(*syntax.Name); ok { + return n.Value == name + } + return false +} diff --git a/src/cmd/compile/internal/types2/errors.go b/src/cmd/compile/internal/types2/errors.go index 62b1d39d836..af4ecb2300a 100644 --- a/src/cmd/compile/internal/types2/errors.go +++ b/src/cmd/compile/internal/types2/errors.go @@ -1,4 +1,3 @@ -// UNREVIEWED // Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -29,19 +28,61 @@ func unreachable() { panic("unreachable") } -func (check *Checker) qualifier(pkg *Package) string { - // Qualify the package unless it's the package being type-checked. - if pkg != check.pkg { - // If the same package name was used by multiple packages, display the full path. - if check.pkgCnt[pkg.name] > 1 { - return strconv.Quote(pkg.path) - } - return pkg.name - } - return "" +// An error_ represents a type-checking error. +// To report an error_, call Checker.report. +type error_ struct { + desc []errorDesc + soft bool // TODO(gri) eventually determine this from an error code } -func (check *Checker) sprintf(format string, args ...interface{}) string { +// An errorDesc describes part of a type-checking error. +type errorDesc struct { + pos syntax.Pos + format string + args []interface{} +} + +func (err *error_) empty() bool { + return err.desc == nil +} + +func (err *error_) pos() syntax.Pos { + if err.empty() { + return nopos + } + return err.desc[0].pos +} + +func (err *error_) msg(qf Qualifier) string { + if err.empty() { + return "no error" + } + var buf bytes.Buffer + for i := range err.desc { + p := &err.desc[i] + if i > 0 { + fmt.Fprintf(&buf, "\n\t%s: ", p.pos) + } + buf.WriteString(sprintf(qf, p.format, p.args...)) + } + return buf.String() +} + +// String is for testing. +func (err *error_) String() string { + if err.empty() { + return "no error" + } + return fmt.Sprintf("%s: %s", err.pos(), err.msg(nil)) +} + +// errorf adds formatted error information to err. +// It may be called multiple times to provide additional information. +func (err *error_) errorf(at poser, format string, args ...interface{}) { + err.desc = append(err.desc, errorDesc{posFor(at), format, args}) +} + +func sprintf(qf Qualifier, format string, args ...interface{}) string { for i, arg := range args { switch a := arg.(type) { case nil: @@ -49,21 +90,69 @@ func (check *Checker) sprintf(format string, args ...interface{}) string { case operand: panic("internal error: should always pass *operand") case *operand: - arg = operandString(a, check.qualifier) + arg = operandString(a, qf) case syntax.Pos: arg = a.String() case syntax.Expr: arg = syntax.String(a) case Object: - arg = ObjectString(a, check.qualifier) + arg = ObjectString(a, qf) case Type: - arg = TypeString(a, check.qualifier) + arg = TypeString(a, qf) } args[i] = arg } return fmt.Sprintf(format, args...) } +func (check *Checker) qualifier(pkg *Package) string { + // Qualify the package unless it's the package being type-checked. + if pkg != check.pkg { + if check.pkgPathMap == nil { + check.pkgPathMap = make(map[string]map[string]bool) + check.seenPkgMap = make(map[*Package]bool) + check.markImports(pkg) + } + // If the same package name was used by multiple packages, display the full path. + if len(check.pkgPathMap[pkg.name]) > 1 { + return strconv.Quote(pkg.path) + } + return pkg.name + } + return "" +} + +// markImports recursively walks pkg and its imports, to record unique import +// paths in pkgPathMap. +func (check *Checker) markImports(pkg *Package) { + if check.seenPkgMap[pkg] { + return + } + check.seenPkgMap[pkg] = true + + forName, ok := check.pkgPathMap[pkg.name] + if !ok { + forName = make(map[string]bool) + check.pkgPathMap[pkg.name] = forName + } + forName[pkg.path] = true + + for _, imp := range pkg.imports { + check.markImports(imp) + } +} + +func (check *Checker) sprintf(format string, args ...interface{}) string { + return sprintf(check.qualifier, format, args...) +} + +func (check *Checker) report(err *error_) { + if err.empty() { + panic("internal error: reporting no error") + } + check.err(err.pos(), err.msg(check.qualifier), err.soft) +} + func (check *Checker) trace(pos syntax.Pos, format string, args ...interface{}) { fmt.Printf("%s:\t%s%s\n", pos, @@ -77,7 +166,7 @@ func (check *Checker) dump(format string, args ...interface{}) { fmt.Println(check.sprintf(format, args...)) } -func (check *Checker) err(pos syntax.Pos, msg string, soft bool) { +func (check *Checker) err(at poser, msg string, soft bool) { // Cheap trick: Don't report errors with messages containing // "invalid operand" or "invalid type" as those tend to be // follow-on errors which don't add useful information. Only @@ -87,6 +176,8 @@ func (check *Checker) err(pos syntax.Pos, msg string, soft bool) { return } + pos := posFor(at) + // If we are encountering an error while evaluating an inherited // constant initialization expression, pos is the position of in // the original expression, and not of the currently declared @@ -114,32 +205,26 @@ func (check *Checker) err(pos syntax.Pos, msg string, soft bool) { f(err) } +const ( + invalidAST = "invalid AST: " + invalidArg = "invalid argument: " + invalidOp = "invalid operation: " +) + type poser interface { Pos() syntax.Pos } func (check *Checker) error(at poser, msg string) { - check.err(posFor(at), msg, false) + check.err(at, msg, false) } func (check *Checker) errorf(at poser, format string, args ...interface{}) { - check.err(posFor(at), check.sprintf(format, args...), false) + check.err(at, check.sprintf(format, args...), false) } func (check *Checker) softErrorf(at poser, format string, args ...interface{}) { - check.err(posFor(at), check.sprintf(format, args...), true) -} - -func (check *Checker) invalidASTf(at poser, format string, args ...interface{}) { - check.errorf(at, "invalid AST: "+format, args...) -} - -func (check *Checker) invalidArgf(at poser, format string, args ...interface{}) { - check.errorf(at, "invalid argument: "+format, args...) -} - -func (check *Checker) invalidOpf(at poser, format string, args ...interface{}) { - check.errorf(at, "invalid operation: "+format, args...) + check.err(at, check.sprintf(format, args...), true) } // posFor reports the left (= start) position of at. @@ -147,10 +232,10 @@ func posFor(at poser) syntax.Pos { switch x := at.(type) { case *operand: if x.expr != nil { - return startPos(x.expr) + return syntax.StartPos(x.expr) } case syntax.Node: - return startPos(x) + return syntax.StartPos(x) } return at.Pos() } diff --git a/src/cmd/compile/internal/types2/errors_test.go b/src/cmd/compile/internal/types2/errors_test.go index cb21ff1ad3d..e1f0e83fc97 100644 --- a/src/cmd/compile/internal/types2/errors_test.go +++ b/src/cmd/compile/internal/types2/errors_test.go @@ -6,6 +6,26 @@ package types2 import "testing" +func TestError(t *testing.T) { + var err error_ + want := "no error" + if got := err.String(); got != want { + t.Errorf("empty error: got %q, want %q", got, want) + } + + want = ": foo 42" + err.errorf(nopos, "foo %d", 42) + if got := err.String(); got != want { + t.Errorf("simple error: got %q, want %q", got, want) + } + + want = ": foo 42\n\t: bar 43" + err.errorf(nopos, "bar %d", 43) + if got := err.String(); got != want { + t.Errorf("simple error: got %q, want %q", got, want) + } +} + func TestStripAnnotations(t *testing.T) { for _, test := range []struct { in, want string diff --git a/src/cmd/compile/internal/types2/example_test.go b/src/cmd/compile/internal/types2/example_test.go index ffd54fe4591..714bf778213 100644 --- a/src/cmd/compile/internal/types2/example_test.go +++ b/src/cmd/compile/internal/types2/example_test.go @@ -1,4 +1,3 @@ -// UNREVIEWED // Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -6,6 +5,7 @@ // Only run where builders (build.golang.org) have // access to compiled packages for import. // +//go:build !arm && !arm64 // +build !arm,!arm64 package types2_test diff --git a/src/cmd/compile/internal/types2/expr.go b/src/cmd/compile/internal/types2/expr.go index 584b8ee6a01..23b79656bb5 100644 --- a/src/cmd/compile/internal/types2/expr.go +++ b/src/cmd/compile/internal/types2/expr.go @@ -1,4 +1,3 @@ -// UNREVIEWED // Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -75,14 +74,14 @@ func (check *Checker) op(m opPredicates, x *operand, op syntax.Operator) bool { if pred := m[op]; pred != nil { if !pred(x.typ) { if check.conf.CompilerErrorMessages { - check.invalidOpf(x, "operator %s not defined on %s", op, x) + check.errorf(x, invalidOp+"operator %s not defined on %s", op, x) } else { - check.invalidOpf(x, "operator %s not defined for %s", op, x) + check.errorf(x, invalidOp+"operator %s not defined for %s", op, x) } return false } } else { - check.invalidASTf(x, "unknown operator %s", op) + check.errorf(x, invalidAST+"unknown operator %s", op) return false } return true @@ -97,7 +96,7 @@ func (check *Checker) overflow(x *operand) { // If the corresponding expression is an operation, use the // operator position rather than the start of the expression // as error position. - pos := startPos(x.expr) + pos := syntax.StartPos(x.expr) what := "" // operator description, if any if op, _ := x.expr.(*syntax.Operation); op != nil { pos = op.Pos() @@ -108,7 +107,7 @@ func (check *Checker) overflow(x *operand) { // TODO(gri) We should report exactly what went wrong. At the // moment we don't have the (go/constant) API for that. // See also TODO in go/constant/value.go. - check.errorf(pos, "constant result is not representable") + check.error(pos, "constant result is not representable") return } @@ -145,11 +144,11 @@ func opName(e *syntax.Operation) string { return "" } -// Entries must be "" or end with a space. var op2str1 = [...]string{ syntax.Xor: "bitwise complement", } +// This is only used for operations that may cause overflow. var op2str2 = [...]string{ syntax.Add: "addition", syntax.Sub: "subtraction", @@ -169,7 +168,7 @@ func (check *Checker) unary(x *operand, e *syntax.Operation) { // spec: "As an exception to the addressability // requirement x may also be a composite literal." if _, ok := unparen(e.X).(*syntax.CompositeLit); !ok && x.mode != variable { - check.invalidOpf(x, "cannot take address of %s", x) + check.errorf(x, invalidOp+"cannot take address of %s", x) x.mode = invalid return } @@ -180,12 +179,12 @@ func (check *Checker) unary(x *operand, e *syntax.Operation) { case syntax.Recv: typ := asChan(x.typ) if typ == nil { - check.invalidOpf(x, "cannot receive from non-channel %s", x) + check.errorf(x, invalidOp+"cannot receive from non-channel %s", x) x.mode = invalid return } if typ.dir == SendOnly { - check.invalidOpf(x, "cannot receive from send-only channel %s", x) + check.errorf(x, invalidOp+"cannot receive from send-only channel %s", x) x.mode = invalid return } @@ -410,11 +409,51 @@ func representableConst(x constant.Value, check *Checker, typ *Basic, rounded *c return false } -// representable checks that a constant operand is representable in the given basic type. +// An errorCode is a (constant) value uniquely identifing a specific error. +type errorCode int + +// The following error codes are "borrowed" from go/types which codes for +// all errors. Here we list the few codes currently needed by the various +// conversion checking functions. +// Eventually we will switch to reporting codes for all errors, using a +// an error code table shared between types2 and go/types. +const ( + _ = errorCode(iota) + _TruncatedFloat + _NumericOverflow + _InvalidConstVal + _InvalidUntypedConversion + + // The following error codes are only returned by operand.assignableTo + // and none of its callers use the error. Still, we keep returning the + // error codes to make the transition to reporting error codes all the + // time easier in the future. + _IncompatibleAssign + _InvalidIfaceAssign + _InvalidChanAssign +) + +// representable checks that a constant operand is representable in the given +// basic type. func (check *Checker) representable(x *operand, typ *Basic) { + v, code := check.representation(x, typ) + if code != 0 { + check.invalidConversion(code, x, typ) + x.mode = invalid + return + } + assert(v != nil) + x.val = v +} + +// representation returns the representation of the constant operand x as the +// basic type typ. +// +// If no such representation is possible, it returns a non-zero error code. +func (check *Checker) representation(x *operand, typ *Basic) (constant.Value, errorCode) { assert(x.mode == constant_) - if !representableConst(x.val, check, typ, &x.val) { - var msg string + v := x.val + if !representableConst(x.val, check, typ, &v) { if isNumeric(x.typ) && isNumeric(typ) { // numeric conversion : error msg // @@ -424,16 +463,25 @@ func (check *Checker) representable(x *operand, typ *Basic) { // float -> float : overflows // if !isInteger(x.typ) && isInteger(typ) { - msg = "%s truncated to %s" + return nil, _TruncatedFloat } else { - msg = "%s overflows %s" + return nil, _NumericOverflow } - } else { - msg = "cannot convert %s to %s" } - check.errorf(x, msg, x, typ) - x.mode = invalid + return nil, _InvalidConstVal } + return v, 0 +} + +func (check *Checker) invalidConversion(code errorCode, x *operand, target Type) { + msg := "cannot convert %s to %s" + switch code { + case _TruncatedFloat: + msg = "%s truncated to %s" + case _NumericOverflow: + msg = "%s overflows %s" + } + check.errorf(x, msg, x, target) } // updateExprType updates the type of x to typ and invokes itself @@ -460,6 +508,7 @@ func (check *Checker) updateExprType(x syntax.Expr, typ Type, final bool) { *syntax.IndexExpr, *syntax.SliceExpr, *syntax.AssertExpr, + *syntax.ListExpr, //*syntax.StarExpr, *syntax.KeyValueExpr, *syntax.ArrayType, @@ -562,7 +611,7 @@ func (check *Checker) updateExprType(x syntax.Expr, typ Type, final bool) { // We already know from the shift check that it is representable // as an integer if it is a constant. if !isInteger(typ) { - check.invalidOpf(x, "shifted operand %s (type %s) must be integer", x, typ) + check.errorf(x, invalidOp+"shifted operand %s (type %s) must be integer", x, typ) return } // Even if we have an integer, if the value is a constant we @@ -592,13 +641,33 @@ func (check *Checker) updateExprVal(x syntax.Expr, val constant.Value) { // convertUntyped attempts to set the type of an untyped value to the target type. func (check *Checker) convertUntyped(x *operand, target Type) { - target = expand(target) - if x.mode == invalid || isTyped(x.typ) || target == Typ[Invalid] { + newType, val, code := check.implicitTypeAndValue(x, target) + if code != 0 { + check.invalidConversion(code, x, target.Underlying()) + x.mode = invalid return } + if val != nil { + x.val = val + check.updateExprVal(x.expr, val) + } + if newType != x.typ { + x.typ = newType + check.updateExprType(x.expr, newType, false) + } +} - // TODO(gri) Sloppy code - clean up. This function is central - // to assignment and expression checking. +// implicitTypeAndValue returns the implicit type of x when used in a context +// where the target type is expected. If no such implicit conversion is +// possible, it returns a nil Type and non-zero error code. +// +// If x is a constant operand, the returned constant.Value will be the +// representation of x in this context. +func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, constant.Value, errorCode) { + target = expand(target) + if x.mode == invalid || isTyped(x.typ) || target == Typ[Invalid] { + return x.typ, nil, 0 + } if isUntyped(target) { // both x and target are untyped @@ -606,125 +675,84 @@ func (check *Checker) convertUntyped(x *operand, target Type) { tkind := target.(*Basic).kind if isNumeric(x.typ) && isNumeric(target) { if xkind < tkind { - x.typ = target - check.updateExprType(x.expr, target, false) + return target, nil, 0 } } else if xkind != tkind { - goto Error + return nil, nil, _InvalidUntypedConversion } - return + return x.typ, nil, 0 } - // In case of a type parameter, conversion must succeed against - // all types enumerated by the type parameter bound. - // TODO(gri) We should not need this because we have the code - // for Sum types in convertUntypedInternal. But at least one - // test fails. Investigate. - if t := asTypeParam(target); t != nil { - types := t.Bound().allTypes - if types == nil { - goto Error - } - - for _, t := range unpack(types) { - x := *x // make a copy; convertUntypedInternal modifies x - check.convertUntypedInternal(&x, t) - if x.mode == invalid { - goto Error - } - } - - x.typ = target - check.updateExprType(x.expr, target, true) - return - } - - check.convertUntypedInternal(x, target) - return - -Error: - // TODO(gri) better error message (explain cause) - check.errorf(x, "cannot convert %s to %s", x, target) - x.mode = invalid -} - -// convertUntypedInternal should only be called by convertUntyped. -func (check *Checker) convertUntypedInternal(x *operand, target Type) { - assert(isTyped(target)) - if x.isNil() { assert(isUntyped(x.typ)) if hasNil(target) { - goto OK + return target, nil, 0 } - goto Error + return nil, nil, _InvalidUntypedConversion } - // typed target switch t := optype(target).(type) { case *Basic: if x.mode == constant_ { - check.representable(x, t) - if x.mode == invalid { - return + v, code := check.representation(x, t) + if code != 0 { + return nil, nil, code } - // expression value may have been rounded - update if needed - check.updateExprVal(x.expr, x.val) - } else { - // Non-constant untyped values may appear as the - // result of comparisons (untyped bool), intermediate - // (delayed-checked) rhs operands of shifts, and as - // the value nil. Nil was handled upfront. - switch x.typ.(*Basic).kind { - case UntypedBool: - if !isBoolean(target) { - goto Error - } - case UntypedInt, UntypedRune, UntypedFloat, UntypedComplex: - if !isNumeric(target) { - goto Error - } - case UntypedString: - // Non-constant untyped string values are not - // permitted by the spec and should not occur. - unreachable() - default: - goto Error + return target, v, code + } + // Non-constant untyped values may appear as the + // result of comparisons (untyped bool), intermediate + // (delayed-checked) rhs operands of shifts, and as + // the value nil. + switch x.typ.(*Basic).kind { + case UntypedBool: + if !isBoolean(target) { + return nil, nil, _InvalidUntypedConversion } + case UntypedInt, UntypedRune, UntypedFloat, UntypedComplex: + if !isNumeric(target) { + return nil, nil, _InvalidUntypedConversion + } + case UntypedString: + // Non-constant untyped string values are not permitted by the spec and + // should not occur during normal typechecking passes, but this path is + // reachable via the AssignableTo API. + if !isString(target) { + return nil, nil, _InvalidUntypedConversion + } + default: + return nil, nil, _InvalidUntypedConversion } case *Sum: - t.is(func(t Type) bool { - check.convertUntypedInternal(x, t) - return x.mode != invalid + ok := t.is(func(t Type) bool { + target, _, _ := check.implicitTypeAndValue(x, t) + return target != nil }) + if !ok { + return nil, nil, _InvalidUntypedConversion + } case *Interface: - // Update operand types to the default type rather then the target + // Update operand types to the default type rather than the target // (interface) type: values must have concrete dynamic types. // Untyped nil was handled upfront. check.completeInterface(nopos, t) if !t.Empty() { - goto Error // cannot assign untyped values to non-empty interfaces + return nil, nil, _InvalidUntypedConversion // cannot assign untyped values to non-empty interfaces } - target = Default(x.typ) + return Default(x.typ), nil, 0 // default type for nil is nil default: - goto Error + return nil, nil, _InvalidUntypedConversion } - -OK: - x.typ = target - check.updateExprType(x.expr, target, true) - return - -Error: - check.errorf(x, "cannot convert %s to %s", x, target) - x.mode = invalid + return target, nil, 0 } func (check *Checker) comparison(x, y *operand, op syntax.Operator) { // spec: "In any comparison, the first operand must be assignable // to the type of the second operand, or vice versa." err := "" - if x.assignableTo(check, y.typ, nil) || y.assignableTo(check, x.typ, nil) { + xok, _ := x.assignableTo(check, y.typ, nil) + yok, _ := y.assignableTo(check, x.typ, nil) + if xok || yok { defined := false switch op { case syntax.Eql, syntax.Neq: @@ -753,7 +781,7 @@ func (check *Checker) comparison(x, y *operand, op syntax.Operator) { if err != "" { // TODO(gri) better error message for cases where one can only compare against nil - check.invalidOpf(x, "cannot compare %s %s %s (%s)", x.expr, op, y.expr, err) + check.errorf(x, invalidOp+"cannot compare %s %s %s (%s)", x.expr, op, y.expr, err) x.mode = invalid return } @@ -791,7 +819,7 @@ func (check *Checker) shift(x, y *operand, e syntax.Expr, op syntax.Operator) { // as an integer. Nothing to do. } else { // shift has no chance - check.invalidOpf(x, "shifted operand %s must be integer", x) + check.errorf(x, invalidOp+"shifted operand %s must be integer", x) x.mode = invalid return } @@ -803,7 +831,7 @@ func (check *Checker) shift(x, y *operand, e syntax.Expr, op syntax.Operator) { if y.mode == constant_ { yval := constant.ToInt(y.val) // consider -1, 1.0, but not -1.1 if yval.Kind() == constant.Int && constant.Sign(yval) < 0 { - check.invalidOpf(y, "negative shift count %s", y) + check.errorf(y, invalidOp+"negative shift count %s", y) x.mode = invalid return } @@ -818,11 +846,11 @@ func (check *Checker) shift(x, y *operand, e syntax.Expr, op syntax.Operator) { return } } else if !isInteger(y.typ) { - check.invalidOpf(y, "shift count %s must be integer", y) + check.errorf(y, invalidOp+"shift count %s must be integer", y) x.mode = invalid return } else if !isUnsigned(y.typ) && !check.allowVersion(check.pkg, 1, 13) { - check.invalidOpf(y, "signed shift count %s requires go1.13 or later", y) + check.errorf(y, invalidOp+"signed shift count %s requires go1.13 or later", y) x.mode = invalid return } @@ -842,7 +870,7 @@ func (check *Checker) shift(x, y *operand, e syntax.Expr, op syntax.Operator) { const shiftBound = 1023 - 1 + 52 // so we can express smallestFloat64 (see issue #44057) s, ok := constant.Uint64Val(y.val) if !ok || s > shiftBound { - check.invalidOpf(y, "invalid shift count %s", y) + check.errorf(y, invalidOp+"invalid shift count %s", y) x.mode = invalid return } @@ -893,7 +921,7 @@ func (check *Checker) shift(x, y *operand, e syntax.Expr, op syntax.Operator) { // non-constant shift - lhs must be an integer if !isInteger(x.typ) { - check.invalidOpf(x, "shifted operand %s must be integer", x) + check.errorf(x, invalidOp+"shifted operand %s must be integer", x) x.mode = invalid return } @@ -963,7 +991,7 @@ func (check *Checker) binary(x *operand, e syntax.Expr, lhs, rhs syntax.Expr, op // only report an error if we have valid types // (otherwise we had an error reported elsewhere already) if x.typ != Typ[Invalid] && y.typ != Typ[Invalid] { - check.invalidOpf(x, "mismatched types %s and %s", x.typ, y.typ) + check.errorf(x, invalidOp+"mismatched types %s and %s", x.typ, y.typ) } x.mode = invalid return @@ -977,7 +1005,7 @@ func (check *Checker) binary(x *operand, e syntax.Expr, lhs, rhs syntax.Expr, op if op == syntax.Div || op == syntax.Rem { // check for zero divisor if (x.mode == constant_ || isInteger(x.typ)) && y.mode == constant_ && constant.Sign(y.val) == 0 { - check.invalidOpf(&y, "division by zero") + check.error(&y, invalidOp+"division by zero") x.mode = invalid return } @@ -987,7 +1015,7 @@ func (check *Checker) binary(x *operand, e syntax.Expr, lhs, rhs syntax.Expr, op re, im := constant.Real(y.val), constant.Imag(y.val) re2, im2 := constant.BinaryOp(re, token.MUL, re), constant.BinaryOp(im, token.MUL, im) if constant.Sign(re2) == 0 && constant.Sign(im2) == 0 { - check.invalidOpf(&y, "division by zero") + check.error(&y, invalidOp+"division by zero") x.mode = invalid return } @@ -1016,104 +1044,6 @@ func (check *Checker) binary(x *operand, e syntax.Expr, lhs, rhs syntax.Expr, op // x.typ is unchanged } -// index checks an index expression for validity. -// If max >= 0, it is the upper bound for index. -// If the result typ is != Typ[Invalid], index is valid and typ is its (possibly named) integer type. -// If the result val >= 0, index is valid and val is its constant int value. -func (check *Checker) index(index syntax.Expr, max int64) (typ Type, val int64) { - typ = Typ[Invalid] - val = -1 - - var x operand - check.expr(&x, index) - if x.mode == invalid { - return - } - - // an untyped constant must be representable as Int - check.convertUntyped(&x, Typ[Int]) - if x.mode == invalid { - return - } - - // the index must be of integer type - if !isInteger(x.typ) { - check.invalidArgf(&x, "index %s must be integer", &x) - return - } - - if x.mode != constant_ { - return x.typ, -1 - } - - // a constant index i must be in bounds - if constant.Sign(x.val) < 0 { - check.invalidArgf(&x, "index %s must not be negative", &x) - return - } - - v, valid := constant.Int64Val(constant.ToInt(x.val)) - if !valid || max >= 0 && v >= max { - if check.conf.CompilerErrorMessages { - check.errorf(&x, "array index %s out of bounds [0:%d]", x.val.String(), max) - } else { - check.errorf(&x, "index %s is out of bounds", &x) - } - return - } - - // 0 <= v [ && v < max ] - return Typ[Int], v -} - -// indexElts checks the elements (elts) of an array or slice composite literal -// against the literal's element type (typ), and the element indices against -// the literal length if known (length >= 0). It returns the length of the -// literal (maximum index value + 1). -// -func (check *Checker) indexedElts(elts []syntax.Expr, typ Type, length int64) int64 { - visited := make(map[int64]bool, len(elts)) - var index, max int64 - for _, e := range elts { - // determine and check index - validIndex := false - eval := e - if kv, _ := e.(*syntax.KeyValueExpr); kv != nil { - if typ, i := check.index(kv.Key, length); typ != Typ[Invalid] { - if i >= 0 { - index = i - validIndex = true - } else { - check.errorf(e, "index %s must be integer constant", kv.Key) - } - } - eval = kv.Value - } else if length >= 0 && index >= length { - check.errorf(e, "index %d is out of bounds (>= %d)", index, length) - } else { - validIndex = true - } - - // if we have a valid index, check for duplicate entries - if validIndex { - if visited[index] { - check.errorf(e, "duplicate index %d in array or slice literal", index) - } - visited[index] = true - } - index++ - if index > max { - max = index - } - - // check element against composite literal element type - var x operand - check.exprWithHint(&x, eval, typ) - check.assignment(&x, typ, "array or slice literal") - } - return max -} - // exprKind describes the kind of an expression; the kind // determines if an expression is valid in 'statement context'. type exprKind int @@ -1139,31 +1069,7 @@ func (check *Checker) rawExpr(x *operand, e syntax.Expr, hint Type) exprKind { } kind := check.exprInternal(x, e, hint) - - // convert x into a user-friendly set of values - // TODO(gri) this code can be simplified - var typ Type - var val constant.Value - switch x.mode { - case invalid: - typ = Typ[Invalid] - case novalue: - typ = (*Tuple)(nil) - case constant_: - typ = x.typ - val = x.val - default: - typ = x.typ - } - assert(x.expr != nil && typ != nil) - - if isUntyped(typ) { - // delay type and value recording until we know the type - // or until the end of type checking - check.rememberUntyped(x.expr, false, x.mode, typ.(*Basic), val) - } else { - check.recordTypeAndValue(e, x.mode, typ, val) - } + check.record(x) return kind } @@ -1227,22 +1133,24 @@ func (check *Checker) exprInternal(x *operand, e syntax.Expr, hint Type) exprKin case *syntax.FuncLit: if sig, ok := check.typ(e.Type).(*Signature); ok { - // Anonymous functions are considered part of the - // init expression/func declaration which contains - // them: use existing package-level declaration info. - decl := check.decl // capture for use in closure below - iota := check.iota // capture for use in closure below (#22345) - // Don't type-check right away because the function may - // be part of a type definition to which the function - // body refers. Instead, type-check as soon as possible, - // but before the enclosing scope contents changes (#22992). - check.later(func() { - check.funcBody(decl, "", sig, e.Body, iota) - }) + if !check.conf.IgnoreFuncBodies && e.Body != nil { + // Anonymous functions are considered part of the + // init expression/func declaration which contains + // them: use existing package-level declaration info. + decl := check.decl // capture for use in closure below + iota := check.iota // capture for use in closure below (#22345) + // Don't type-check right away because the function may + // be part of a type definition to which the function + // body refers. Instead, type-check as soon as possible, + // but before the enclosing scope contents changes (#22992). + check.later(func() { + check.funcBody(decl, "", sig, e.Body, iota) + }) + } x.mode = value x.typ = sig } else { - check.invalidASTf(e, "invalid function literal %s", e) + check.errorf(e, invalidAST+"invalid function literal %v", e) goto Error } @@ -1458,291 +1366,19 @@ func (check *Checker) exprInternal(x *operand, e syntax.Expr, hint Type) exprKin check.selector(x, e) case *syntax.IndexExpr: - check.exprOrType(x, e.X) + if check.indexExpr(x, e) { + check.funcInst(x, e) + } if x.mode == invalid { - check.use(e.Index) goto Error } - if x.mode == typexpr { - // type instantiation - x.mode = invalid - x.typ = check.varType(e) - if x.typ != Typ[Invalid] { - x.mode = typexpr - } - return expression - } - - if x.mode == value { - if sig := asSignature(x.typ); sig != nil && len(sig.tparams) > 0 { - // function instantiation - check.funcInst(x, e) - return expression - } - } - - // ordinary index expression - valid := false - length := int64(-1) // valid if >= 0 - switch typ := optype(x.typ).(type) { - case *Basic: - if isString(typ) { - valid = true - if x.mode == constant_ { - length = int64(len(constant.StringVal(x.val))) - } - // an indexed string always yields a byte value - // (not a constant) even if the string and the - // index are constant - x.mode = value - x.typ = universeByte // use 'byte' name - } - - case *Array: - valid = true - length = typ.len - if x.mode != variable { - x.mode = value - } - x.typ = typ.elem - - case *Pointer: - if typ := asArray(typ.base); typ != nil { - valid = true - length = typ.len - x.mode = variable - x.typ = typ.elem - } - - case *Slice: - valid = true - x.mode = variable - x.typ = typ.elem - - case *Map: - var key operand - check.expr(&key, e.Index) - check.assignment(&key, typ.key, "map index") - // ok to continue even if indexing failed - map element type is known - x.mode = mapindex - x.typ = typ.elem - x.expr = e - return expression - - case *Sum: - // A sum type can be indexed if all of the sum's types - // support indexing and have the same index and element - // type. Special rules apply for maps in the sum type. - var tkey, telem Type // key is for map types only - nmaps := 0 // number of map types in sum type - if typ.is(func(t Type) bool { - var e Type - switch t := under(t).(type) { - case *Basic: - if isString(t) { - e = universeByte - } - case *Array: - e = t.elem - case *Pointer: - if t := asArray(t.base); t != nil { - e = t.elem - } - case *Slice: - e = t.elem - case *Map: - // If there are multiple maps in the sum type, - // they must have identical key types. - // TODO(gri) We may be able to relax this rule - // but it becomes complicated very quickly. - if tkey != nil && !Identical(t.key, tkey) { - return false - } - tkey = t.key - e = t.elem - nmaps++ - case *TypeParam: - check.errorf(x, "type of %s contains a type parameter - cannot index (implementation restriction)", x) - case *instance: - panic("unimplemented") - } - if e == nil || telem != nil && !Identical(e, telem) { - return false - } - telem = e - return true - }) { - // If there are maps, the index expression must be assignable - // to the map key type (as for simple map index expressions). - if nmaps > 0 { - var key operand - check.expr(&key, e.Index) - check.assignment(&key, tkey, "map index") - // ok to continue even if indexing failed - map element type is known - - // If there are only maps, we are done. - if nmaps == len(typ.types) { - x.mode = mapindex - x.typ = telem - x.expr = e - return expression - } - - // Otherwise we have mix of maps and other types. For - // now we require that the map key be an integer type. - // TODO(gri) This is probably not good enough. - valid = isInteger(tkey) - // avoid 2nd indexing error if indexing failed above - if !valid && key.mode == invalid { - goto Error - } - x.mode = value // map index expressions are not addressable - } else { - // no maps - valid = true - x.mode = variable - } - x.typ = telem - } - } - - if !valid { - check.invalidOpf(x, "cannot index %s", x) - goto Error - } - - if e.Index == nil { - check.invalidASTf(e, "missing index for %s", x) - goto Error - } - - index := e.Index - if l, _ := index.(*syntax.ListExpr); l != nil { - if n := len(l.ElemList); n <= 1 { - check.invalidASTf(e, "invalid use of ListExpr for index expression %s with %d indices", e, n) - goto Error - } - // len(l.ElemList) > 1 - check.invalidOpf(l.ElemList[1], "more than one index") - index = l.ElemList[0] // continue with first index - } - - // In pathological (invalid) cases (e.g.: type T1 [][[]T1{}[0][0]]T0) - // the element type may be accessed before it's set. Make sure we have - // a valid type. - if x.typ == nil { - x.typ = Typ[Invalid] - } - - check.index(index, length) - // ok to continue - case *syntax.SliceExpr: - check.expr(x, e.X) + check.sliceExpr(x, e) if x.mode == invalid { - check.use(e.Index[:]...) goto Error } - valid := false - length := int64(-1) // valid if >= 0 - switch typ := optype(x.typ).(type) { - case *Basic: - if isString(typ) { - if e.Full { - check.invalidOpf(x, "3-index slice of string") - goto Error - } - valid = true - if x.mode == constant_ { - length = int64(len(constant.StringVal(x.val))) - } - // spec: "For untyped string operands the result - // is a non-constant value of type string." - if typ.kind == UntypedString { - x.typ = Typ[String] - } - } - - case *Array: - valid = true - length = typ.len - if x.mode != variable { - check.invalidOpf(x, "%s (slice of unaddressable value)", x) - goto Error - } - x.typ = &Slice{elem: typ.elem} - - case *Pointer: - if typ := asArray(typ.base); typ != nil { - valid = true - length = typ.len - x.typ = &Slice{elem: typ.elem} - } - - case *Slice: - valid = true - // x.typ doesn't change - - case *Sum, *TypeParam: - check.errorf(x, "generic slice expressions not yet implemented") - goto Error - } - - if !valid { - check.invalidOpf(x, "cannot slice %s", x) - goto Error - } - - x.mode = value - - // spec: "Only the first index may be omitted; it defaults to 0." - if e.Full && (e.Index[1] == nil || e.Index[2] == nil) { - check.invalidASTf(e, "2nd and 3rd index required in 3-index slice") - goto Error - } - - // check indices - var ind [3]int64 - for i, expr := range e.Index { - x := int64(-1) - switch { - case expr != nil: - // The "capacity" is only known statically for strings, arrays, - // and pointers to arrays, and it is the same as the length for - // those types. - max := int64(-1) - if length >= 0 { - max = length + 1 - } - if _, v := check.index(expr, max); v >= 0 { - x = v - } - case i == 0: - // default is 0 for the first index - x = 0 - case length >= 0: - // default is length (== capacity) otherwise - x = length - } - ind[i] = x - } - - // constant indices must be in range - // (check.index already checks that existing indices >= 0) - L: - for i, x := range ind[:len(ind)-1] { - if x > 0 { - for _, y := range ind[i+1:] { - if y >= 0 && x > y { - check.errorf(e, "invalid slice indices: %d > %d", x, y) - break L // only report one error, ok to continue - } - } - } - } - case *syntax.AssertExpr: check.expr(x, e.X) if x.mode == invalid { @@ -1756,24 +1392,29 @@ func (check *Checker) exprInternal(x *operand, e syntax.Expr, hint Type) exprKin check.ordinaryType(x.Pos(), xtyp) // x.(type) expressions are encoded via TypeSwitchGuards if e.Type == nil { - check.invalidASTf(e, "invalid use of AssertExpr") + check.error(e, invalidAST+"invalid use of AssertExpr") goto Error } T := check.varType(e.Type) if T == Typ[Invalid] { goto Error } - check.typeAssertion(posFor(x), x, xtyp, T, false) + check.typeAssertion(posFor(x), x, xtyp, T) x.mode = commaok x.typ = T case *syntax.TypeSwitchGuard: // x.(type) expressions are handled explicitly in type switches - check.invalidASTf(e, "use of .(type) outside type switch") + check.error(e, invalidAST+"use of .(type) outside type switch") goto Error case *syntax.CallExpr: - return check.call(x, e) + return check.callExpr(x, e) + + case *syntax.ListExpr: + // catch-all for unexpected expression lists + check.error(e, "unexpected list of expressions") + goto Error // case *syntax.UnaryExpr: // check.expr(x, e.X) @@ -1811,7 +1452,7 @@ func (check *Checker) exprInternal(x *operand, e syntax.Expr, hint Type) exprKin x.mode = variable x.typ = typ.base } else { - check.invalidOpf(x, "cannot indirect %s", x) + check.errorf(x, invalidOp+"cannot indirect %s", x) goto Error } } @@ -1837,7 +1478,7 @@ func (check *Checker) exprInternal(x *operand, e syntax.Expr, hint Type) exprKin case *syntax.KeyValueExpr: // key:value expressions are handled in composite literals - check.invalidASTf(e, "no key:value expected") + check.error(e, invalidAST+"no key:value expected") goto Error case *syntax.ArrayType, *syntax.SliceType, *syntax.StructType, *syntax.FuncType, @@ -1889,8 +1530,8 @@ func keyVal(x constant.Value) interface{} { } // typeAssertion checks that x.(T) is legal; xtyp must be the type of x. -func (check *Checker) typeAssertion(pos syntax.Pos, x *operand, xtyp *Interface, T Type, strict bool) { - method, wrongType := check.assertableTo(xtyp, T, strict) +func (check *Checker) typeAssertion(pos syntax.Pos, x *operand, xtyp *Interface, T Type) { + method, wrongType := check.assertableTo(xtyp, T) if method == nil { return } @@ -1927,12 +1568,6 @@ func (check *Checker) multiExpr(x *operand, e syntax.Expr) { check.exclude(x, 1< 0 { + // function instantiation + return true + } + } + + // ordinary index expression + valid := false + length := int64(-1) // valid if >= 0 + switch typ := optype(x.typ).(type) { + case *Basic: + if isString(typ) { + valid = true + if x.mode == constant_ { + length = int64(len(constant.StringVal(x.val))) + } + // an indexed string always yields a byte value + // (not a constant) even if the string and the + // index are constant + x.mode = value + x.typ = universeByte // use 'byte' name + } + + case *Array: + valid = true + length = typ.len + if x.mode != variable { + x.mode = value + } + x.typ = typ.elem + + case *Pointer: + if typ := asArray(typ.base); typ != nil { + valid = true + length = typ.len + x.mode = variable + x.typ = typ.elem + } + + case *Slice: + valid = true + x.mode = variable + x.typ = typ.elem + + case *Map: + index := check.singleIndex(e) + if index == nil { + x.mode = invalid + return + } + var key operand + check.expr(&key, index) + check.assignment(&key, typ.key, "map index") + // ok to continue even if indexing failed - map element type is known + x.mode = mapindex + x.typ = typ.elem + x.expr = e + return + + case *Sum: + // A sum type can be indexed if all of the sum's types + // support indexing and have the same index and element + // type. Special rules apply for maps in the sum type. + var tkey, telem Type // key is for map types only + nmaps := 0 // number of map types in sum type + if typ.is(func(t Type) bool { + var e Type + switch t := under(t).(type) { + case *Basic: + if isString(t) { + e = universeByte + } + case *Array: + e = t.elem + case *Pointer: + if t := asArray(t.base); t != nil { + e = t.elem + } + case *Slice: + e = t.elem + case *Map: + // If there are multiple maps in the sum type, + // they must have identical key types. + // TODO(gri) We may be able to relax this rule + // but it becomes complicated very quickly. + if tkey != nil && !Identical(t.key, tkey) { + return false + } + tkey = t.key + e = t.elem + nmaps++ + case *TypeParam: + check.errorf(x, "type of %s contains a type parameter - cannot index (implementation restriction)", x) + case *instance: + panic("unimplemented") + } + if e == nil || telem != nil && !Identical(e, telem) { + return false + } + telem = e + return true + }) { + // If there are maps, the index expression must be assignable + // to the map key type (as for simple map index expressions). + if nmaps > 0 { + index := check.singleIndex(e) + if index == nil { + x.mode = invalid + return + } + var key operand + check.expr(&key, index) + check.assignment(&key, tkey, "map index") + // ok to continue even if indexing failed - map element type is known + + // If there are only maps, we are done. + if nmaps == len(typ.types) { + x.mode = mapindex + x.typ = telem + x.expr = e + return + } + + // Otherwise we have mix of maps and other types. For + // now we require that the map key be an integer type. + // TODO(gri) This is probably not good enough. + valid = isInteger(tkey) + // avoid 2nd indexing error if indexing failed above + if !valid && key.mode == invalid { + x.mode = invalid + return + } + x.mode = value // map index expressions are not addressable + } else { + // no maps + valid = true + x.mode = variable + } + x.typ = telem + } + } + + if !valid { + check.errorf(x, invalidOp+"cannot index %s", x) + x.mode = invalid + return + } + + index := check.singleIndex(e) + if index == nil { + x.mode = invalid + return + } + + // In pathological (invalid) cases (e.g.: type T1 [][[]T1{}[0][0]]T0) + // the element type may be accessed before it's set. Make sure we have + // a valid type. + if x.typ == nil { + x.typ = Typ[Invalid] + } + + check.index(index, length) + return false +} + +func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) { + check.expr(x, e.X) + if x.mode == invalid { + check.use(e.Index[:]...) + return + } + + valid := false + length := int64(-1) // valid if >= 0 + switch typ := optype(x.typ).(type) { + case *Basic: + if isString(typ) { + if e.Full { + check.error(x, invalidOp+"3-index slice of string") + x.mode = invalid + return + } + valid = true + if x.mode == constant_ { + length = int64(len(constant.StringVal(x.val))) + } + // spec: "For untyped string operands the result + // is a non-constant value of type string." + if typ.kind == UntypedString { + x.typ = Typ[String] + } + } + + case *Array: + valid = true + length = typ.len + if x.mode != variable { + check.errorf(x, invalidOp+"%s (slice of unaddressable value)", x) + x.mode = invalid + return + } + x.typ = &Slice{elem: typ.elem} + + case *Pointer: + if typ := asArray(typ.base); typ != nil { + valid = true + length = typ.len + x.typ = &Slice{elem: typ.elem} + } + + case *Slice: + valid = true + // x.typ doesn't change + + case *Sum, *TypeParam: + check.error(x, "generic slice expressions not yet implemented") + x.mode = invalid + return + } + + if !valid { + check.errorf(x, invalidOp+"cannot slice %s", x) + x.mode = invalid + return + } + + x.mode = value + + // spec: "Only the first index may be omitted; it defaults to 0." + if e.Full && (e.Index[1] == nil || e.Index[2] == nil) { + check.error(e, invalidAST+"2nd and 3rd index required in 3-index slice") + x.mode = invalid + return + } + + // check indices + var ind [3]int64 + for i, expr := range e.Index { + x := int64(-1) + switch { + case expr != nil: + // The "capacity" is only known statically for strings, arrays, + // and pointers to arrays, and it is the same as the length for + // those types. + max := int64(-1) + if length >= 0 { + max = length + 1 + } + if _, v := check.index(expr, max); v >= 0 { + x = v + } + case i == 0: + // default is 0 for the first index + x = 0 + case length >= 0: + // default is length (== capacity) otherwise + x = length + } + ind[i] = x + } + + // constant indices must be in range + // (check.index already checks that existing indices >= 0) +L: + for i, x := range ind[:len(ind)-1] { + if x > 0 { + for _, y := range ind[i+1:] { + if y >= 0 && x > y { + check.errorf(e, "invalid slice indices: %d > %d", x, y) + break L // only report one error, ok to continue + } + } + } + } +} + +// singleIndex returns the (single) index from the index expression e. +// If the index is missing, or if there are multiple indices, an error +// is reported and the result is nil. +func (check *Checker) singleIndex(e *syntax.IndexExpr) syntax.Expr { + index := e.Index + if index == nil { + check.errorf(e, invalidAST+"missing index for %s", e.X) + return nil + } + if l, _ := index.(*syntax.ListExpr); l != nil { + if n := len(l.ElemList); n <= 1 { + check.errorf(e, invalidAST+"invalid use of ListExpr for index expression %v with %d indices", e, n) + return nil + } + // len(l.ElemList) > 1 + check.error(l.ElemList[1], invalidOp+"more than one index") + index = l.ElemList[0] // continue with first index + } + return index +} + +// index checks an index expression for validity. +// If max >= 0, it is the upper bound for index. +// If the result typ is != Typ[Invalid], index is valid and typ is its (possibly named) integer type. +// If the result val >= 0, index is valid and val is its constant int value. +func (check *Checker) index(index syntax.Expr, max int64) (typ Type, val int64) { + typ = Typ[Invalid] + val = -1 + + var x operand + check.expr(&x, index) + if !check.isValidIndex(&x, "index", false) { + return + } + + if x.mode != constant_ { + return x.typ, -1 + } + + if x.val.Kind() == constant.Unknown { + return + } + + v, ok := constant.Int64Val(x.val) + assert(ok) + if max >= 0 && v >= max { + if check.conf.CompilerErrorMessages { + check.errorf(&x, invalidArg+"array index %s out of bounds [0:%d]", x.val.String(), max) + } else { + check.errorf(&x, invalidArg+"index %s is out of bounds", &x) + } + return + } + + // 0 <= v [ && v < max ] + return x.typ, v +} + +// isValidIndex checks whether operand x satisfies the criteria for integer +// index values. If allowNegative is set, a constant operand may be negative. +// If the operand is not valid, an error is reported (using what as context) +// and the result is false. +func (check *Checker) isValidIndex(x *operand, what string, allowNegative bool) bool { + if x.mode == invalid { + return false + } + + // spec: "a constant index that is untyped is given type int" + check.convertUntyped(x, Typ[Int]) + if x.mode == invalid { + return false + } + + // spec: "the index x must be of integer type or an untyped constant" + if !isInteger(x.typ) { + check.errorf(x, invalidArg+"%s %s must be integer", what, x) + return false + } + + if x.mode == constant_ { + // spec: "a constant index must be non-negative ..." + if !allowNegative && constant.Sign(x.val) < 0 { + check.errorf(x, invalidArg+"%s %s must not be negative", what, x) + return false + } + + // spec: "... and representable by a value of type int" + if !representableConst(x.val, check, Typ[Int], &x.val) { + check.errorf(x, invalidArg+"%s %s overflows int", what, x) + return false + } + } + + return true +} + +// indexElts checks the elements (elts) of an array or slice composite literal +// against the literal's element type (typ), and the element indices against +// the literal length if known (length >= 0). It returns the length of the +// literal (maximum index value + 1). +func (check *Checker) indexedElts(elts []syntax.Expr, typ Type, length int64) int64 { + visited := make(map[int64]bool, len(elts)) + var index, max int64 + for _, e := range elts { + // determine and check index + validIndex := false + eval := e + if kv, _ := e.(*syntax.KeyValueExpr); kv != nil { + if typ, i := check.index(kv.Key, length); typ != Typ[Invalid] { + if i >= 0 { + index = i + validIndex = true + } else { + check.errorf(e, "index %s must be integer constant", kv.Key) + } + } + eval = kv.Value + } else if length >= 0 && index >= length { + check.errorf(e, "index %d is out of bounds (>= %d)", index, length) + } else { + validIndex = true + } + + // if we have a valid index, check for duplicate entries + if validIndex { + if visited[index] { + check.errorf(e, "duplicate index %d in array or slice literal", index) + } + visited[index] = true + } + index++ + if index > max { + max = index + } + + // check element against composite literal element type + var x operand + check.exprWithHint(&x, eval, typ) + check.assignment(&x, typ, "array or slice literal") + } + return max +} diff --git a/src/cmd/compile/internal/types2/infer.go b/src/cmd/compile/internal/types2/infer.go index 061b919239f..f37d7f6477e 100644 --- a/src/cmd/compile/internal/types2/infer.go +++ b/src/cmd/compile/internal/types2/infer.go @@ -2,29 +2,115 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file implements type parameter inference given -// a list of concrete arguments and a parameter list. +// This file implements type parameter inference. package types2 -import "bytes" +import ( + "bytes" + "cmd/compile/internal/syntax" +) -// infer returns the list of actual type arguments for the given list of type parameters tparams -// by inferring them from the actual arguments args for the parameters params. If type inference -// is impossible because unification fails, an error is reported and the resulting types list is -// nil, and index is 0. Otherwise, types is the list of inferred type arguments, and index is -// the index of the first type argument in that list that couldn't be inferred (and thus is nil). -// If all type arguments were inferred successfully, index is < 0. -func (check *Checker) infer(tparams []*TypeName, params *Tuple, args []*operand) (types []Type, index int) { +const useConstraintTypeInference = true + +// infer attempts to infer the complete set of type arguments for generic function instantiation/call +// based on the given type parameters tparams, type arguments targs, function parameters params, and +// function arguments args, if any. There must be at least one type parameter, no more type arguments +// than type parameters, and params and args must match in number (incl. zero). +// If successful, infer returns the complete list of type arguments, one for each type parameter. +// Otherwise the result is nil and appropriate errors will be reported unless report is set to false. +// +// Inference proceeds in 3 steps: +// +// 1) Start with given type arguments. +// 2) Infer type arguments from typed function arguments. +// 3) Infer type arguments from untyped function arguments. +// +// Constraint type inference is used after each step to expand the set of type arguments. +// +func (check *Checker) infer(pos syntax.Pos, tparams []*TypeName, targs []Type, params *Tuple, args []*operand, report bool) (result []Type) { + if debug { + defer func() { + assert(result == nil || len(result) == len(tparams)) + for _, targ := range result { + assert(targ != nil) + } + //check.dump("### inferred targs = %s", result) + }() + } + + // There must be at least one type parameter, and no more type arguments than type parameters. + n := len(tparams) + assert(n > 0 && len(targs) <= n) + + // Function parameters and arguments must match in number. assert(params.Len() == len(args)) + // --- 0 --- + // If we already have all type arguments, we're done. + if len(targs) == n { + return targs + } + // len(targs) < n + + // --- 1 --- + // Explicitly provided type arguments take precedence over any inferred types; + // and types inferred via constraint type inference take precedence over types + // inferred from function arguments. + // If we have type arguments, see how far we get with constraint type inference. + if len(targs) > 0 && useConstraintTypeInference { + var index int + targs, index = check.inferB(tparams, targs, report) + if targs == nil || index < 0 { + return targs + } + } + + // Continue with the type arguments we have now. Avoid matching generic + // parameters that already have type arguments against function arguments: + // It may fail because matching uses type identity while parameter passing + // uses assignment rules. Instantiate the parameter list with the type + // arguments we have, and continue with that parameter list. + + // First, make sure we have a "full" list of type arguments, so of which + // may be nil (unknown). + if len(targs) < n { + targs2 := make([]Type, n) + copy(targs2, targs) + targs = targs2 + } + // len(targs) == n + + // Substitute type arguments for their respective type parameters in params, + // if any. Note that nil targs entries are ignored by check.subst. + // TODO(gri) Can we avoid this (we're setting known type argumemts below, + // but that doesn't impact the isParameterized check for now). + if params.Len() > 0 { + smap := makeSubstMap(tparams, targs) + params = check.subst(nopos, params, smap).(*Tuple) + } + + // --- 2 --- + // Unify parameter and argument types for generic parameters with typed arguments + // and collect the indices of generic parameters with untyped arguments. + // Terminology: generic parameter = function parameter with a type-parameterized type u := newUnifier(check, false) u.x.init(tparams) + // Set the type arguments which we know already. + for i, targ := range targs { + if targ != nil { + u.x.set(i, targ) + } + } + errorf := func(kind string, tpar, targ Type, arg *operand) { + if !report { + return + } // provide a better error message if we can - targs, failed := u.x.types() - if failed == 0 { + targs, index := u.x.types() + if index == 0 { // The first type parameter couldn't be inferred. // If none of them could be inferred, don't try // to provide the inferred type in the error msg. @@ -49,16 +135,13 @@ func (check *Checker) infer(tparams []*TypeName, params *Tuple, args []*operand) } } - // Terminology: generic parameter = function parameter with a type-parameterized type - - // 1st pass: Unify parameter and argument types for generic parameters with typed arguments - // and collect the indices of generic parameters with untyped arguments. + // indices of the generic parameters with untyped arguments - save for later var indices []int for i, arg := range args { par := params.At(i) // If we permit bidirectional unification, this conditional code needs to be // executed even if par.typ is not parameterized since the argument may be a - // generic function (for which we want to infer // its type arguments). + // generic function (for which we want to infer its type arguments). if isParameterized(tparams, par.typ) { if arg.mode == invalid { // An error was reported earlier. Ignore this targ @@ -72,7 +155,7 @@ func (check *Checker) infer(tparams []*TypeName, params *Tuple, args []*operand) // the respective type parameters of targ. if !u.unify(par.typ, targ) { errorf("type", par.typ, targ, arg) - return nil, 0 + return nil } } else { indices = append(indices, i) @@ -80,42 +163,68 @@ func (check *Checker) infer(tparams []*TypeName, params *Tuple, args []*operand) } } - // Some generic parameters with untyped arguments may have been given a type - // indirectly through another generic parameter with a typed argument; we can - // ignore those now. (This only means that we know the types for those generic - // parameters; it doesn't mean untyped arguments can be passed safely. We still - // need to verify that assignment of those arguments is valid when we check - // function parameter passing external to infer.) - j := 0 + // If we've got all type arguments, we're done. + var index int + targs, index = u.x.types() + if index < 0 { + return targs + } + + // See how far we get with constraint type inference. + // Note that even if we don't have any type arguments, constraint type inference + // may produce results for constraints that explicitly specify a type. + if useConstraintTypeInference { + targs, index = check.inferB(tparams, targs, report) + if targs == nil || index < 0 { + return targs + } + } + + // --- 3 --- + // Use any untyped arguments to infer additional type arguments. + // Some generic parameters with untyped arguments may have been given + // a type by now, we can ignore them. for _, i := range indices { par := params.At(i) // Since untyped types are all basic (i.e., non-composite) types, an // untyped argument will never match a composite parameter type; the // only parameter type it can possibly match against is a *TypeParam. - // Thus, only keep the indices of generic parameters that are not of - // composite types and which don't have a type inferred yet. - if tpar, _ := par.typ.(*TypeParam); tpar != nil && u.x.at(tpar.index) == nil { - indices[j] = i - j++ - } - } - indices = indices[:j] - - // 2nd pass: Unify parameter and default argument types for remaining generic parameters. - for _, i := range indices { - par := params.At(i) - arg := args[i] - targ := Default(arg.typ) - // The default type for an untyped nil is untyped nil. We must not - // infer an untyped nil type as type parameter type. Ignore untyped - // nil by making sure all default argument types are typed. - if isTyped(targ) && !u.unify(par.typ, targ) { - errorf("default type", par.typ, targ, arg) - return nil, 0 + // Thus, only consider untyped arguments for generic parameters that + // are not of composite types and which don't have a type inferred yet. + if tpar, _ := par.typ.(*TypeParam); tpar != nil && targs[tpar.index] == nil { + arg := args[i] + targ := Default(arg.typ) + // The default type for an untyped nil is untyped nil. We must not + // infer an untyped nil type as type parameter type. Ignore untyped + // nil by making sure all default argument types are typed. + if isTyped(targ) && !u.unify(par.typ, targ) { + errorf("default type", par.typ, targ, arg) + return nil + } } } - return u.x.types() + // If we've got all type arguments, we're done. + targs, index = u.x.types() + if index < 0 { + return targs + } + + // Again, follow up with constraint type inference. + if useConstraintTypeInference { + targs, index = check.inferB(tparams, targs, report) + if targs == nil || index < 0 { + return targs + } + } + + // At least one type argument couldn't be inferred. + assert(targs != nil && index >= 0 && targs[index] == nil) + tpar := tparams[index] + if report { + check.errorf(pos, "cannot infer %s (%s) (%s)", tpar.name, tpar.pos, targs) + } + return nil } // typeNamesString produces a string containing all the @@ -265,12 +374,13 @@ func (w *tpWalker) isParameterizedList(list []Type) bool { // inferB returns the list of actual type arguments inferred from the type parameters' // bounds and an initial set of type arguments. If type inference is impossible because -// unification fails, an error is reported, the resulting types list is nil, and index is 0. +// unification fails, an error is reported if report is set to true, the resulting types +// list is nil, and index is 0. // Otherwise, types is the list of inferred type arguments, and index is the index of the // first type argument in that list that couldn't be inferred (and thus is nil). If all -// type arguments where inferred successfully, index is < 0. The number of type arguments +// type arguments were inferred successfully, index is < 0. The number of type arguments // provided may be less than the number of type parameters, but there must be at least one. -func (check *Checker) inferB(tparams []*TypeName, targs []Type) (types []Type, index int) { +func (check *Checker) inferB(tparams []*TypeName, targs []Type, report bool) (types []Type, index int) { assert(len(tparams) >= len(targs) && len(targs) > 0) // Setup bidirectional unification between those structural bounds @@ -292,7 +402,9 @@ func (check *Checker) inferB(tparams []*TypeName, targs []Type) (types []Type, i sbound := check.structuralType(typ.bound) if sbound != nil { if !u.unify(typ, sbound) { - check.errorf(tpar.pos, "%s does not match %s", tpar, sbound) + if report { + check.errorf(tpar, "%s does not match %s", tpar, sbound) + } return nil, 0 } } @@ -305,7 +417,7 @@ func (check *Checker) inferB(tparams []*TypeName, targs []Type) (types []Type, i // was given, unification produced the type list [int, []C, *A]. We eliminate the // remaining type parameters by substituting the type parameters in this type list // until nothing changes anymore. - types, index = u.x.types() + types, _ = u.x.types() if debug { for i, targ := range targs { assert(targ == nil || types[i] == targ) @@ -339,6 +451,25 @@ func (check *Checker) inferB(tparams []*TypeName, targs []Type) (types []Type, i dirty = dirty[:n] } + // Once nothing changes anymore, we may still have type parameters left; + // e.g., a structural constraint *P may match a type parameter Q but we + // don't have any type arguments to fill in for *P or Q (issue #45548). + // Don't let such inferences escape, instead nil them out. + for i, typ := range types { + if typ != nil && isParameterized(tparams, typ) { + types[i] = nil + } + } + + // update index + index = -1 + for i, typ := range types { + if typ == nil { + index = i + break + } + } + return } diff --git a/src/cmd/compile/internal/types2/initorder.go b/src/cmd/compile/internal/types2/initorder.go index a9cabecdf27..40816276665 100644 --- a/src/cmd/compile/internal/types2/initorder.go +++ b/src/cmd/compile/internal/types2/initorder.go @@ -151,18 +151,20 @@ func findPath(objMap map[Object]*declInfo, from, to Object, seen map[Object]bool // reportCycle reports an error for the given cycle. func (check *Checker) reportCycle(cycle []Object) { obj := cycle[0] + var err error_ if check.conf.CompilerErrorMessages { - check.errorf(obj, "initialization loop for %s", obj.Name()) + err.errorf(obj, "initialization loop for %s", obj.Name()) } else { - check.errorf(obj, "initialization cycle for %s", obj.Name()) + err.errorf(obj, "initialization cycle for %s", obj.Name()) } // subtle loop: print cycle[i] for i = 0, n-1, n-2, ... 1 for len(cycle) = n for i := len(cycle) - 1; i >= 0; i-- { - check.errorf(obj, "\t%s refers to", obj.Name()) // secondary error, \t indented + err.errorf(obj, "%s refers to", obj.Name()) obj = cycle[i] } // print cycle[0] again to close the cycle - check.errorf(obj, "\t%s", obj.Name()) + err.errorf(obj, "%s", obj.Name()) + check.report(&err) } // ---------------------------------------------------------------------------- diff --git a/src/cmd/compile/internal/types2/instantiate.go b/src/cmd/compile/internal/types2/instantiate.go new file mode 100644 index 00000000000..0df52e851c9 --- /dev/null +++ b/src/cmd/compile/internal/types2/instantiate.go @@ -0,0 +1,63 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "cmd/compile/internal/syntax" + "fmt" +) + +// Instantiate instantiates the type typ with the given type arguments. +// typ must be a *Named or a *Signature type, it must be generic, and +// its number of type parameters must match the number of provided type +// arguments. The result is a new, instantiated (not generic) type of +// the same kind (either a *Named or a *Signature). The type arguments +// are not checked against the constraints of the type parameters. +// Any methods attached to a *Named are simply copied; they are not +// instantiated. +func Instantiate(pos syntax.Pos, typ Type, targs []Type) (res Type) { + // TODO(gri) This code is basically identical to the prolog + // in Checker.instantiate. Factor. + var tparams []*TypeName + switch t := typ.(type) { + case *Named: + tparams = t.tparams + case *Signature: + tparams = t.tparams + defer func() { + // If we had an unexpected failure somewhere don't panic below when + // asserting res.(*Signature). Check for *Signature in case Typ[Invalid] + // is returned. + if _, ok := res.(*Signature); !ok { + return + } + // If the signature doesn't use its type parameters, subst + // will not make a copy. In that case, make a copy now (so + // we can set tparams to nil w/o causing side-effects). + if t == res { + copy := *t + res = © + } + // After instantiating a generic signature, it is not generic + // anymore; we need to set tparams to nil. + res.(*Signature).tparams = nil + }() + + default: + panic(fmt.Sprintf("%v: cannot instantiate %v", pos, typ)) + } + + // the number of supplied types must match the number of type parameters + if len(targs) != len(tparams) { + panic(fmt.Sprintf("%v: got %d arguments but %d type parameters", pos, len(targs), len(tparams))) + } + + if len(tparams) == 0 { + return typ // nothing to do (minor optimization) + } + + smap := makeSubstMap(tparams, targs) + return (*Checker)(nil).subst(pos, typ, smap) +} diff --git a/src/cmd/compile/internal/types2/issues_test.go b/src/cmd/compile/internal/types2/issues_test.go index 5a32fa590ac..e716a480385 100644 --- a/src/cmd/compile/internal/types2/issues_test.go +++ b/src/cmd/compile/internal/types2/issues_test.go @@ -1,4 +1,3 @@ -// UNREVIEWED // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -29,7 +28,6 @@ func mustParse(t *testing.T, src string) *syntax.File { func TestIssue5770(t *testing.T) { f := mustParse(t, `package p; type S struct{T}`) var conf Config - // conf := Config{Importer: importer.Default()} _, err := conf.Check(f.PkgName.Value, []*syntax.File{f}, nil) // do not crash want := "undeclared name: T" if err == nil || !strings.Contains(err.Error(), want) { @@ -76,7 +74,7 @@ var ( } case *syntax.Name: if x.Value == "nil" { - want = NewInterfaceType(nil, nil) // interface{} + want = NewInterfaceType(nil, nil) // interface{} (for now, go/types types this as "untyped nil") } } if want != nil && !Identical(tv.Type, want) { @@ -197,7 +195,7 @@ L7 uses var z int` } } -// This tests that the package associated with the types.Object.Pkg method +// This tests that the package associated with the types2.Object.Pkg method // is the type's package independent of the order in which the imports are // listed in the sources src1, src2 below. // The actual issue is in go/internal/gcimporter which has a corresponding @@ -300,8 +298,6 @@ func TestIssue22525(t *testing.T) { } func TestIssue25627(t *testing.T) { - t.Skip("requires syntax tree inspection") - const prefix = `package p; import "unsafe"; type P *struct{}; type I interface{}; type T ` // The src strings (without prefix) are constructed such that the number of semicolons // plus one corresponds to the number of fields expected in the respective struct. @@ -325,20 +321,17 @@ func TestIssue25627(t *testing.T) { } } - unimplemented() - /* - ast.Inspect(f, func(n syntax.Node) bool { - if spec, _ := n.(*syntax.TypeDecl); spec != nil { - if tv, ok := info.Types[spec.Type]; ok && spec.Name.Value == "T" { - want := strings.Count(src, ";") + 1 - if got := tv.Type.(*Struct).NumFields(); got != want { - t.Errorf("%s: got %d fields; want %d", src, got, want) - } + syntax.Walk(f, func(n syntax.Node) bool { + if decl, _ := n.(*syntax.TypeDecl); decl != nil { + if tv, ok := info.Types[decl.Type]; ok && decl.Name.Value == "T" { + want := strings.Count(src, ";") + 1 + if got := tv.Type.(*Struct).NumFields(); got != want { + t.Errorf("%s: got %d fields; want %d", src, got, want) } } - return true - }) - */ + } + return false + }) } } @@ -392,9 +385,6 @@ func TestIssue28005(t *testing.T) { t.Fatal("object X not found") } iface := obj.Type().Underlying().(*Interface) // object X must be an interface - if iface == nil { - t.Fatalf("%s is not an interface", obj) - } // Each iface method m is embedded; and m's receiver base type name // must match the method's name per the choice in the source file. @@ -484,7 +474,7 @@ func TestIssue34151(t *testing.T) { } bast := mustParse(t, bsrc) - conf := Config{Importer: importHelper{a}} + conf := Config{Importer: importHelper{pkg: a}} b, err := conf.Check(bast.PkgName.Value, []*syntax.File{bast}, nil) if err != nil { t.Errorf("package %s failed to typecheck: %v", b.Name(), err) @@ -492,14 +482,18 @@ func TestIssue34151(t *testing.T) { } type importHelper struct { - pkg *Package + pkg *Package + fallback Importer } func (h importHelper) Import(path string) (*Package, error) { - if path != h.pkg.Path() { + if path == h.pkg.Path() { + return h.pkg, nil + } + if h.fallback == nil { return nil, fmt.Errorf("got package path %q; want %q", path, h.pkg.Path()) } - return h.pkg, nil + return h.fallback.Import(path) } // TestIssue34921 verifies that we don't update an imported type's underlying @@ -523,7 +517,7 @@ func TestIssue34921(t *testing.T) { var pkg *Package for _, src := range sources { f := mustParse(t, src) - conf := Config{Importer: importHelper{pkg}} + conf := Config{Importer: importHelper{pkg: pkg}} res, err := conf.Check(f.PkgName.Value, []*syntax.File{f}, nil) if err != nil { t.Errorf("%q failed to typecheck: %v", src, err) @@ -534,25 +528,85 @@ func TestIssue34921(t *testing.T) { func TestIssue43088(t *testing.T) { // type T1 struct { - // x T2 + // _ T2 // } // // type T2 struct { - // x struct { - // x T2 + // _ struct { + // _ T2 // } // } n1 := NewTypeName(syntax.Pos{}, nil, "T1", nil) T1 := NewNamed(n1, nil, nil) n2 := NewTypeName(syntax.Pos{}, nil, "T2", nil) T2 := NewNamed(n2, nil, nil) - s1 := NewStruct([]*Var{NewField(syntax.Pos{}, nil, "x", T2, false)}, nil) + s1 := NewStruct([]*Var{NewField(syntax.Pos{}, nil, "_", T2, false)}, nil) T1.SetUnderlying(s1) - s2 := NewStruct([]*Var{NewField(syntax.Pos{}, nil, "x", T2, false)}, nil) - s3 := NewStruct([]*Var{NewField(syntax.Pos{}, nil, "x", s2, false)}, nil) + s2 := NewStruct([]*Var{NewField(syntax.Pos{}, nil, "_", T2, false)}, nil) + s3 := NewStruct([]*Var{NewField(syntax.Pos{}, nil, "_", s2, false)}, nil) T2.SetUnderlying(s3) // These calls must terminate (no endless recursion). Comparable(T1) Comparable(T2) } + +func TestIssue44515(t *testing.T) { + typ := Unsafe.Scope().Lookup("Pointer").Type() + + got := TypeString(typ, nil) + want := "unsafe.Pointer" + if got != want { + t.Errorf("got %q; want %q", got, want) + } + + qf := func(pkg *Package) string { + if pkg == Unsafe { + return "foo" + } + return "" + } + got = TypeString(typ, qf) + want = "foo.Pointer" + if got != want { + t.Errorf("got %q; want %q", got, want) + } +} + +func TestIssue43124(t *testing.T) { + // All involved packages have the same name (template). Error messages should + // disambiguate between text/template and html/template by printing the full + // path. + const ( + asrc = `package a; import "text/template"; func F(template.Template) {}; func G(int) {}` + bsrc = `package b; import ("a"; "html/template"); func _() { a.F(template.Template{}) }` + csrc = `package c; import ("a"; "html/template"); func _() { a.G(template.Template{}) }` + ) + + a, err := pkgFor("a", asrc, nil) + if err != nil { + t.Fatalf("package a failed to typecheck: %v", err) + } + conf := Config{Importer: importHelper{pkg: a, fallback: defaultImporter()}} + + // Packages should be fully qualified when there is ambiguity within the + // error string itself. + bast := mustParse(t, bsrc) + _, err = conf.Check(bast.PkgName.Value, []*syntax.File{bast}, nil) + if err == nil { + t.Fatal("package b had no errors") + } + if !strings.Contains(err.Error(), "text/template") || !strings.Contains(err.Error(), "html/template") { + t.Errorf("type checking error for b does not disambiguate package template: %q", err) + } + + // ...and also when there is any ambiguity in reachable packages. + cast := mustParse(t, csrc) + _, err = conf.Check(cast.PkgName.Value, []*syntax.File{cast}, nil) + if err == nil { + t.Fatal("package c had no errors") + } + if !strings.Contains(err.Error(), "html/template") { + t.Errorf("type checking error for c does not disambiguate package template: %q", err) + } +} diff --git a/src/cmd/compile/internal/types2/labels.go b/src/cmd/compile/internal/types2/labels.go index b20b454dea2..d3206988b54 100644 --- a/src/cmd/compile/internal/types2/labels.go +++ b/src/cmd/compile/internal/types2/labels.go @@ -11,7 +11,7 @@ import ( // labels checks correct label use in body. func (check *Checker) labels(body *syntax.BlockStmt) { // set of all labels in this body - all := NewScope(nil, body.Pos(), endPos(body), "label") + all := NewScope(nil, body.Pos(), syntax.EndPos(body), "label") fwdJumps := check.blockBranches(all, nil, nil, body.List) @@ -128,8 +128,11 @@ func (check *Checker) blockBranches(all *Scope, parent *block, lstmt *syntax.Lab if name := s.Label.Value; name != "_" { lbl := NewLabel(s.Label.Pos(), check.pkg, name) if alt := all.Insert(lbl); alt != nil { - check.softErrorf(lbl.pos, "label %s already declared", name) - check.reportAltDecl(alt) + var err error_ + err.soft = true + err.errorf(lbl.pos, "label %s already declared", name) + err.recordAltDecl(alt) + check.report(&err) // ok to continue } else { b.insert(s) @@ -209,7 +212,7 @@ func (check *Checker) blockBranches(all *Scope, parent *block, lstmt *syntax.Lab } default: - check.invalidASTf(s, "branch statement: %s %s", s.Tok, name) + check.errorf(s, invalidAST+"branch statement: %s %s", s.Tok, name) return } diff --git a/src/cmd/compile/internal/types2/lookup.go b/src/cmd/compile/internal/types2/lookup.go index 34d18acdfce..78299502e9c 100644 --- a/src/cmd/compile/internal/types2/lookup.go +++ b/src/cmd/compile/internal/types2/lookup.go @@ -1,4 +1,3 @@ -// UNREVIEWED // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -141,7 +140,7 @@ func (check *Checker) rawLookupFieldOrMethod(T Type, addressable bool, pkg *Pack // continue with underlying type, but only if it's not a type parameter // TODO(gri) is this what we want to do for type parameters? (spec question) - typ = under(named) + typ = named.under() if asTypeParam(typ) != nil { continue } @@ -207,8 +206,8 @@ func (check *Checker) rawLookupFieldOrMethod(T Type, addressable bool, pkg *Pack } if obj == nil { // At this point we're not (yet) looking into methods - // that any underlyng type of the types in the type list - // migth have. + // that any underlying type of the types in the type list + // might have. // TODO(gri) Do we want to specify the language that way? } } @@ -428,13 +427,13 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method, // method required by V and whether it is missing or just has the wrong type. // The receiver may be nil if assertableTo is invoked through an exported API call // (such as AssertableTo), i.e., when all methods have been type-checked. -// If strict (or the global constant forceStrict) is set, assertions that -// are known to fail are not permitted. -func (check *Checker) assertableTo(V *Interface, T Type, strict bool) (method, wrongType *Func) { +// If the global constant forceStrict is set, assertions that are known to fail +// are not permitted. +func (check *Checker) assertableTo(V *Interface, T Type) (method, wrongType *Func) { // no static check is required if T is an interface // spec: "If T is an interface type, x.(T) asserts that the // dynamic type of x implements the interface T." - if asInterface(T) != nil && !(strict || forceStrict) { + if asInterface(T) != nil && !forceStrict { return } return check.missingMethod(T, V, false) diff --git a/src/cmd/compile/internal/types2/operand.go b/src/cmd/compile/internal/types2/operand.go index 001e905a7b5..455d8b5dd1d 100644 --- a/src/cmd/compile/internal/types2/operand.go +++ b/src/cmd/compile/internal/types2/operand.go @@ -1,4 +1,3 @@ -// UNREVIEWED // Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -236,65 +235,45 @@ func (x *operand) setConst(k syntax.LitKind, lit string) { // isNil reports whether x is a typed or the untyped nil value. func (x *operand) isNil() bool { return x.mode == nilvalue } -// TODO(gri) The functions operand.assignableTo, checker.convertUntyped, -// checker.representable, and checker.assignment are -// overlapping in functionality. Need to simplify and clean up. - -// assignableTo reports whether x is assignable to a variable of type T. -// If the result is false and a non-nil reason is provided, it may be set -// to a more detailed explanation of the failure (result != ""). -// The check parameter may be nil if assignableTo is invoked through -// an exported API call, i.e., when all methods have been type-checked. -func (x *operand) assignableTo(check *Checker, T Type, reason *string) bool { +// assignableTo reports whether x is assignable to a variable of type T. If the +// result is false and a non-nil reason is provided, it may be set to a more +// detailed explanation of the failure (result != ""). The returned error code +// is only valid if the (first) result is false. The check parameter may be nil +// if assignableTo is invoked through an exported API call, i.e., when all +// methods have been type-checked. +func (x *operand) assignableTo(check *Checker, T Type, reason *string) (bool, errorCode) { if x.mode == invalid || T == Typ[Invalid] { - return true // avoid spurious errors + return true, 0 // avoid spurious errors } V := x.typ // x's type is identical to T if check.identical(V, T) { - return true + return true, 0 } Vu := optype(V) Tu := optype(T) - // x is an untyped value representable by a value of type T - // TODO(gri) This is borrowing from checker.convertUntyped and - // checker.representable. Need to clean up. + // x is an untyped value representable by a value of type T. if isUntyped(Vu) { - switch t := Tu.(type) { - case *Basic: - if x.isNil() && t.kind == UnsafePointer { - return true - } - if x.mode == constant_ { - return representableConst(x.val, check, t, nil) - } - // The result of a comparison is an untyped boolean, - // but may not be a constant. - if Vb, _ := Vu.(*Basic); Vb != nil { - return Vb.kind == UntypedBool && isBoolean(Tu) - } - case *Sum: + if t, ok := Tu.(*Sum); ok { return t.is(func(t Type) bool { // TODO(gri) this could probably be more efficient - return x.assignableTo(check, t, reason) - }) - case *Interface: - check.completeInterface(nopos, t) - return x.isNil() || t.Empty() - case *Pointer, *Signature, *Slice, *Map, *Chan: - return x.isNil() + ok, _ := x.assignableTo(check, t, reason) + return ok + }), _IncompatibleAssign } + newType, _, _ := check.implicitTypeAndValue(x, Tu) + return newType != nil, _IncompatibleAssign } // Vu is typed // x's type V and T have identical underlying types // and at least one of V or T is not a named type if check.identical(Vu, Tu) && (!isNamed(V) || !isNamed(T)) { - return true + return true, 0 } // T is an interface type and x implements T @@ -312,9 +291,9 @@ func (x *operand) assignableTo(check *Checker, T Type, reason *string) bool { *reason = "missing method " + m.Name() } } - return false + return false, _InvalidIfaceAssign } - return true + return true, 0 } // x is a bidirectional channel value, T is a channel @@ -322,11 +301,11 @@ func (x *operand) assignableTo(check *Checker, T Type, reason *string) bool { // and at least one of V or T is not a named type if Vc, ok := Vu.(*Chan); ok && Vc.dir == SendRecv { if Tc, ok := Tu.(*Chan); ok && check.identical(Vc.elem, Tc.elem) { - return !isNamed(V) || !isNamed(T) + return !isNamed(V) || !isNamed(T), _InvalidChanAssign } } - return false + return false, _IncompatibleAssign } // kind2tok translates syntax.LitKinds into token.Tokens. diff --git a/src/cmd/compile/internal/types2/resolver.go b/src/cmd/compile/internal/types2/resolver.go index 44fa51a8e52..fa30650bd44 100644 --- a/src/cmd/compile/internal/types2/resolver.go +++ b/src/cmd/compile/internal/types2/resolver.go @@ -1,4 +1,3 @@ -// UNREVIEWED // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -92,14 +91,14 @@ func (check *Checker) declarePkgObj(ident *syntax.Name, obj Object, d *declInfo) // spec: "A package-scope or file-scope identifier with name init // may only be declared to be a function with this (func()) signature." if ident.Value == "init" { - check.errorf(ident, "cannot declare init - must be func") + check.error(ident, "cannot declare init - must be func") return } // spec: "The main package must have package name main and declare // a function main that takes no arguments and returns no value." if ident.Value == "main" && check.pkg.name == "main" { - check.errorf(ident, "cannot declare main - must be func") + check.error(ident, "cannot declare main - must be func") return } @@ -180,7 +179,11 @@ func (check *Checker) importPackage(pos syntax.Pos, path, dir string) *Package { // package should be complete or marked fake, but be cautious if imp.complete || imp.fake { check.impMap[key] = imp - check.pkgCnt[imp.name]++ + // Once we've formatted an error message once, keep the pkgPathMap + // up-to-date on subsequent imports. + if check.pkgPathMap != nil { + check.markImports(imp) + } return imp } @@ -217,7 +220,7 @@ func (check *Checker) collectObjects() { // but there is no corresponding package object. check.recordDef(file.PkgName, nil) - fileScope := NewScope(check.pkg.scope, startPos(file), endPos(file), check.filename(fileNo)) + fileScope := NewScope(check.pkg.scope, syntax.StartPos(file), syntax.EndPos(file), check.filename(fileNo)) fileScopes = append(fileScopes, fileScope) check.recordScope(file, fileScope) @@ -256,13 +259,13 @@ func (check *Checker) collectObjects() { name = s.LocalPkgName.Value if path == "C" { // match cmd/compile (not prescribed by spec) - check.errorf(s.LocalPkgName, `cannot rename import "C"`) + check.error(s.LocalPkgName, `cannot rename import "C"`) continue } } if name == "init" { - check.errorf(s.LocalPkgName, "cannot import package as init - init must be a func") + check.error(s.LocalPkgName, "cannot import package as init - init must be a func") continue } @@ -305,8 +308,10 @@ func (check *Checker) collectObjects() { // the object may be imported into more than one file scope // concurrently. See issue #32154.) if alt := fileScope.Insert(obj); alt != nil { - check.errorf(s.LocalPkgName, "%s redeclared in this block", obj.Name()) - check.reportAltDecl(alt) + var err error_ + err.errorf(s.LocalPkgName, "%s redeclared in this block", obj.Name()) + err.recordAltDecl(alt) + check.report(&err) } else { check.dotImportMap[dotImportKey{fileScope, obj}] = pkgName } @@ -425,9 +430,9 @@ func (check *Checker) collectObjects() { } else { // method // d.Recv != nil - if !methodTypeParamsOk && len(d.TParamList) != 0 { - //check.invalidASTf(d.TParamList.Pos(), "method must have no type parameters") - check.invalidASTf(d, "method must have no type parameters") + if !acceptMethodTypeParams && len(d.TParamList) != 0 { + //check.error(d.TParamList.Pos(), invalidAST + "method must have no type parameters") + check.error(d, invalidAST+"method must have no type parameters") } ptr, recv, _ := check.unpackRecv(d.Recv.Type, false) // (Methods with invalid receiver cannot be associated to a type, and @@ -447,7 +452,7 @@ func (check *Checker) collectObjects() { obj.setOrder(uint32(len(check.objMap))) default: - check.invalidASTf(s, "unknown syntax.Decl node %T", s) + check.errorf(s, invalidAST+"unknown syntax.Decl node %T", s) } } } @@ -456,14 +461,16 @@ func (check *Checker) collectObjects() { for _, scope := range fileScopes { for _, obj := range scope.elems { if alt := pkg.scope.Lookup(obj.Name()); alt != nil { + var err error_ if pkg, ok := obj.(*PkgName); ok { - check.errorf(alt, "%s already declared through import of %s", alt.Name(), pkg.Imported()) - check.reportAltDecl(pkg) + err.errorf(alt, "%s already declared through import of %s", alt.Name(), pkg.Imported()) + err.recordAltDecl(pkg) } else { - check.errorf(alt, "%s already declared through dot-import of %s", alt.Name(), obj.Pkg()) - // TODO(gri) dot-imported objects don't have a position; reportAltDecl won't print anything - check.reportAltDecl(obj) + err.errorf(alt, "%s already declared through dot-import of %s", alt.Name(), obj.Pkg()) + // TODO(gri) dot-imported objects don't have a position; recordAltDecl won't print anything + err.recordAltDecl(obj) } + check.report(&err) } } } @@ -526,7 +533,7 @@ L: // unpack receiver type case *syntax.BadExpr: // ignore - error already reported by parser case nil: - check.invalidASTf(ptyp, "parameterized receiver contains nil parameters") + check.error(ptyp, invalidAST+"parameterized receiver contains nil parameters") default: check.errorf(arg, "receiver type parameter %s must be an identifier", arg) } diff --git a/src/cmd/compile/internal/types2/sanitize.go b/src/cmd/compile/internal/types2/sanitize.go index bac569416b4..64a2dedc7d8 100644 --- a/src/cmd/compile/internal/types2/sanitize.go +++ b/src/cmd/compile/internal/types2/sanitize.go @@ -1,10 +1,16 @@ -// UNREVIEWED // Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package types2 +// sanitizeInfo walks the types contained in info to ensure that all instances +// are expanded. +// +// This includes some objects that may be shared across concurrent +// type-checking passes (such as those in the universe scope), so we are +// careful here not to write types that are already sanitized. This avoids a +// data race as any shared types should already be sanitized. func sanitizeInfo(info *Info) { var s sanitizer = make(map[Type]Type) @@ -12,27 +18,42 @@ func sanitizeInfo(info *Info) { // If modified, they must be assigned back. for e, tv := range info.Types { - tv.Type = s.typ(tv.Type) - info.Types[e] = tv + if typ := s.typ(tv.Type); typ != tv.Type { + tv.Type = typ + info.Types[e] = tv + } } for e, inf := range info.Inferred { + changed := false for i, targ := range inf.Targs { - inf.Targs[i] = s.typ(targ) + if typ := s.typ(targ); typ != targ { + inf.Targs[i] = typ + changed = true + } + } + if typ := s.typ(inf.Sig); typ != inf.Sig { + inf.Sig = typ.(*Signature) + changed = true + } + if changed { + info.Inferred[e] = inf } - inf.Sig = s.typ(inf.Sig).(*Signature) - info.Inferred[e] = inf } for _, obj := range info.Defs { if obj != nil { - obj.setType(s.typ(obj.Type())) + if typ := s.typ(obj.Type()); typ != obj.Type() { + obj.setType(typ) + } } } for _, obj := range info.Uses { if obj != nil { - obj.setType(s.typ(obj.Type())) + if typ := s.typ(obj.Type()); typ != obj.Type() { + obj.setType(typ) + } } } @@ -46,26 +67,36 @@ func sanitizeInfo(info *Info) { type sanitizer map[Type]Type func (s sanitizer) typ(typ Type) Type { + if typ == nil { + return nil + } + if t, found := s[typ]; found { return t } s[typ] = typ switch t := typ.(type) { - case nil, *Basic, *bottom, *top: + case *Basic, *bottom, *top: // nothing to do case *Array: - t.elem = s.typ(t.elem) + if elem := s.typ(t.elem); elem != t.elem { + t.elem = elem + } case *Slice: - t.elem = s.typ(t.elem) + if elem := s.typ(t.elem); elem != t.elem { + t.elem = elem + } case *Struct: s.varList(t.fields) case *Pointer: - t.base = s.typ(t.base) + if base := s.typ(t.base); base != t.base { + t.base = base + } case *Tuple: s.tuple(t) @@ -80,33 +111,49 @@ func (s sanitizer) typ(typ Type) Type { case *Interface: s.funcList(t.methods) - s.typ(t.types) + if types := s.typ(t.types); types != t.types { + t.types = types + } s.typeList(t.embeddeds) s.funcList(t.allMethods) - s.typ(t.allTypes) + if allTypes := s.typ(t.allTypes); allTypes != t.allTypes { + t.allTypes = allTypes + } case *Map: - t.key = s.typ(t.key) - t.elem = s.typ(t.elem) + if key := s.typ(t.key); key != t.key { + t.key = key + } + if elem := s.typ(t.elem); elem != t.elem { + t.elem = elem + } case *Chan: - t.elem = s.typ(t.elem) + if elem := s.typ(t.elem); elem != t.elem { + t.elem = elem + } case *Named: - t.orig = s.typ(t.orig) - t.underlying = s.typ(t.underlying) + if orig := s.typ(t.fromRHS); orig != t.fromRHS { + t.fromRHS = orig + } + if under := s.typ(t.underlying); under != t.underlying { + t.underlying = under + } s.typeList(t.targs) s.funcList(t.methods) case *TypeParam: - t.bound = s.typ(t.bound) + if bound := s.typ(t.bound); bound != t.bound { + t.bound = bound + } case *instance: typ = t.expand() s[t] = typ default: - unimplemented() + panic("unimplemented") } return typ @@ -114,7 +161,9 @@ func (s sanitizer) typ(typ Type) Type { func (s sanitizer) var_(v *Var) { if v != nil { - v.typ = s.typ(v.typ) + if typ := s.typ(v.typ); typ != v.typ { + v.typ = typ + } } } @@ -132,7 +181,9 @@ func (s sanitizer) tuple(t *Tuple) { func (s sanitizer) func_(f *Func) { if f != nil { - f.typ = s.typ(f.typ) + if typ := s.typ(f.typ); typ != f.typ { + f.typ = typ + } } } @@ -144,6 +195,8 @@ func (s sanitizer) funcList(list []*Func) { func (s sanitizer) typeList(list []Type) { for i, t := range list { - list[i] = s.typ(t) + if typ := s.typ(t); typ != t { + list[i] = typ + } } } diff --git a/src/cmd/compile/internal/types2/scope.go b/src/cmd/compile/internal/types2/scope.go index fd0b6241f5c..ade0a79b31d 100644 --- a/src/cmd/compile/internal/types2/scope.go +++ b/src/cmd/compile/internal/types2/scope.go @@ -81,7 +81,7 @@ func (s *Scope) Lookup(name string) Object { // whose scope is the scope of the package that exported them. func (s *Scope) LookupParent(name string, pos syntax.Pos) (*Scope, Object) { for ; s != nil; s = s.parent { - if obj := s.elems[name]; obj != nil && (!pos.IsKnown() || cmpPos(obj.scopePos(), pos) <= 0) { + if obj := s.elems[name]; obj != nil && (!pos.IsKnown() || obj.scopePos().Cmp(pos) <= 0) { return s, obj } } @@ -153,7 +153,7 @@ func (s *Scope) End() syntax.Pos { return s.end } // The result is guaranteed to be valid only if the type-checked // AST has complete position information. func (s *Scope) Contains(pos syntax.Pos) bool { - return cmpPos(s.pos, pos) <= 0 && cmpPos(pos, s.end) < 0 + return s.pos.Cmp(pos) <= 0 && pos.Cmp(s.end) < 0 } // Innermost returns the innermost (child) scope containing diff --git a/src/cmd/compile/internal/types2/selection.go b/src/cmd/compile/internal/types2/selection.go index 67d1aa7e1df..8128aeee2e5 100644 --- a/src/cmd/compile/internal/types2/selection.go +++ b/src/cmd/compile/internal/types2/selection.go @@ -51,22 +51,6 @@ func (s *Selection) Kind() SelectionKind { return s.kind } // Recv returns the type of x in x.f. func (s *Selection) Recv() Type { return s.recv } -// Work-around for bug where a (*instance) shows up in a final type. -// TODO(gri): fix this bug. -func (s *Selection) TArgs() []Type { - r := s.recv - if p := asPointer(r); p != nil { - r = p.Elem() - } - if n := asNamed(r); n != nil { - return n.TArgs() - } - // The base type (after skipping any pointer) must be a Named type. The - // bug is that sometimes it can be an instance type (which is supposed to - // be an internal type only). - return r.(*instance).targs -} - // Obj returns the object denoted by x.f; a *Var for // a field selection, and a *Func in all other cases. func (s *Selection) Obj() Object { return s.obj } diff --git a/src/cmd/compile/internal/types2/self_test.go b/src/cmd/compile/internal/types2/self_test.go index 6d7971e50f7..4722fec9889 100644 --- a/src/cmd/compile/internal/types2/self_test.go +++ b/src/cmd/compile/internal/types2/self_test.go @@ -1,4 +1,3 @@ -// UNREVIEWED // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -7,17 +6,15 @@ package types2_test import ( "cmd/compile/internal/syntax" - "flag" - "fmt" + "path" "path/filepath" + "runtime" "testing" "time" . "cmd/compile/internal/types2" ) -var benchmark = flag.Bool("b", false, "run benchmarks") - func TestSelf(t *testing.T) { files, err := pkgFiles(".") if err != nil { @@ -25,7 +22,7 @@ func TestSelf(t *testing.T) { } conf := Config{Importer: defaultImporter()} - _, err = conf.Check("go/types", files, nil) + _, err = conf.Check("cmd/compile/internal/types2", files, nil) if err != nil { // Importing go/constant doesn't work in the // build dashboard environment. Don't report an error @@ -36,46 +33,69 @@ func TestSelf(t *testing.T) { } } -func TestBenchmark(t *testing.T) { - if !*benchmark { - return - } - - // We're not using testing's benchmarking mechanism directly - // because we want custom output. - - for _, p := range []string{"types", "constant", filepath.Join("internal", "gcimporter")} { - path := filepath.Join("..", p) - runbench(t, path, false) - runbench(t, path, true) - fmt.Println() +func BenchmarkCheck(b *testing.B) { + for _, p := range []string{ + filepath.Join("src", "net", "http"), + filepath.Join("src", "go", "parser"), + filepath.Join("src", "go", "constant"), + filepath.Join("src", "go", "internal", "gcimporter"), + } { + b.Run(path.Base(p), func(b *testing.B) { + path := filepath.Join(runtime.GOROOT(), p) + for _, ignoreFuncBodies := range []bool{false, true} { + name := "funcbodies" + if ignoreFuncBodies { + name = "nofuncbodies" + } + b.Run(name, func(b *testing.B) { + b.Run("info", func(b *testing.B) { + runbench(b, path, ignoreFuncBodies, true) + }) + b.Run("noinfo", func(b *testing.B) { + runbench(b, path, ignoreFuncBodies, false) + }) + }) + } + }) } } -func runbench(t *testing.T, path string, ignoreFuncBodies bool) { +func runbench(b *testing.B, path string, ignoreFuncBodies, writeInfo bool) { files, err := pkgFiles(path) if err != nil { - t.Fatal(err) + b.Fatal(err) } - b := testing.Benchmark(func(b *testing.B) { - for i := 0; i < b.N; i++ { - conf := Config{IgnoreFuncBodies: ignoreFuncBodies} - conf.Check(path, files, nil) - } - }) - // determine line count var lines uint for _, f := range files { lines += f.EOF.Line() } - d := time.Duration(b.NsPerOp()) - fmt.Printf( - "%s: %s for %d lines (%d lines/s), ignoreFuncBodies = %v\n", - filepath.Base(path), d, lines, int64(float64(lines)/d.Seconds()), ignoreFuncBodies, - ) + b.ResetTimer() + start := time.Now() + for i := 0; i < b.N; i++ { + conf := Config{ + IgnoreFuncBodies: ignoreFuncBodies, + Importer: defaultImporter(), + } + var info *Info + if writeInfo { + info = &Info{ + Types: make(map[syntax.Expr]TypeAndValue), + Defs: make(map[*syntax.Name]Object), + Uses: make(map[*syntax.Name]Object), + Implicits: make(map[syntax.Node]Object), + Selections: make(map[*syntax.SelectorExpr]*Selection), + Scopes: make(map[syntax.Node]*Scope), + } + } + if _, err := conf.Check(path, files, info); err != nil { + b.Fatal(err) + } + } + b.StopTimer() + b.ReportMetric(float64(lines)*float64(b.N)/time.Since(start).Seconds(), "lines/s") } func pkgFiles(path string) ([]*syntax.File, error) { diff --git a/src/cmd/compile/internal/types2/sizeof_test.go b/src/cmd/compile/internal/types2/sizeof_test.go new file mode 100644 index 00000000000..236feb0404e --- /dev/null +++ b/src/cmd/compile/internal/types2/sizeof_test.go @@ -0,0 +1,65 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "reflect" + "testing" +) + +// Signal size changes of important structures. + +func TestSizeof(t *testing.T) { + const _64bit = ^uint(0)>>32 != 0 + + var tests = []struct { + val interface{} // type as a value + _32bit uintptr // size on 32bit platforms + _64bit uintptr // size on 64bit platforms + }{ + // Types + {Basic{}, 16, 32}, + {Array{}, 16, 24}, + {Slice{}, 8, 16}, + {Struct{}, 24, 48}, + {Pointer{}, 8, 16}, + {Tuple{}, 12, 24}, + {Signature{}, 44, 88}, + {Sum{}, 12, 24}, + {Interface{}, 60, 120}, + {Map{}, 16, 32}, + {Chan{}, 12, 24}, + {Named{}, 68, 136}, + {TypeParam{}, 28, 48}, + {instance{}, 52, 96}, + {bottom{}, 0, 0}, + {top{}, 0, 0}, + + // Objects + {PkgName{}, 64, 104}, + {Const{}, 64, 104}, + {TypeName{}, 56, 88}, + {Var{}, 60, 96}, + {Func{}, 60, 96}, + {Label{}, 60, 96}, + {Builtin{}, 60, 96}, + {Nil{}, 56, 88}, + + // Misc + {Scope{}, 56, 96}, + {Package{}, 40, 80}, + } + + for _, test := range tests { + got := reflect.TypeOf(test.val).Size() + want := test._32bit + if _64bit { + want = test._64bit + } + if got != want { + t.Errorf("unsafe.Sizeof(%T) = %d, want %d", test.val, got, want) + } + } +} diff --git a/src/cmd/compile/internal/types2/stdlib_test.go b/src/cmd/compile/internal/types2/stdlib_test.go index 34925687e35..cde35c17b6f 100644 --- a/src/cmd/compile/internal/types2/stdlib_test.go +++ b/src/cmd/compile/internal/types2/stdlib_test.go @@ -1,9 +1,8 @@ -// UNREVIEWED // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file tests types.Check by using it to +// This file tests types2.Check by using it to // typecheck the standard library and tests. package types2_test @@ -14,7 +13,6 @@ import ( "fmt" "go/build" "internal/testenv" - "io/ioutil" "os" "path/filepath" "runtime" @@ -91,7 +89,7 @@ func firstComment(filename string) (first string) { } func testTestDir(t *testing.T, path string, ignore ...string) { - files, err := ioutil.ReadDir(path) + files, err := os.ReadDir(path) if err != nil { t.Fatal(err) } @@ -184,17 +182,16 @@ func TestStdFixed(t *testing.T) { "bug248.go", "bug302.go", "bug369.go", // complex test instructions - ignore "issue6889.go", // gc-specific test "issue11362.go", // canonical import path check - "issue16369.go", // go/types handles this correctly - not an issue - "issue18459.go", // go/types doesn't check validity of //go:xxx directives - "issue18882.go", // go/types doesn't check validity of //go:xxx directives - "issue20529.go", // go/types does not have constraints on stack size - "issue22200.go", // go/types does not have constraints on stack size - "issue22200b.go", // go/types does not have constraints on stack size - "issue25507.go", // go/types does not have constraints on stack size - "issue20780.go", // go/types does not have constraints on stack size - "issue42058a.go", // go/types does not have constraints on channel element size - "issue42058b.go", // go/types does not have constraints on channel element size - "bug251.go", // issue #34333 which was exposed with fix for #34151 + "issue16369.go", // types2 handles this correctly - not an issue + "issue18459.go", // types2 doesn't check validity of //go:xxx directives + "issue18882.go", // types2 doesn't check validity of //go:xxx directives + "issue20529.go", // types2 does not have constraints on stack size + "issue22200.go", // types2 does not have constraints on stack size + "issue22200b.go", // types2 does not have constraints on stack size + "issue25507.go", // types2 does not have constraints on stack size + "issue20780.go", // types2 does not have constraints on stack size + "issue42058a.go", // types2 does not have constraints on channel element size + "issue42058b.go", // types2 does not have constraints on channel element size ) } @@ -207,6 +204,9 @@ func TestStdKen(t *testing.T) { // Package paths of excluded packages. var excluded = map[string]bool{ "builtin": true, + + // See #46027: some imports are missing for this submodule. + "crypto/ed25519/internal/edwards25519/field/_asm": true, } // typecheck typechecks the given package files. @@ -298,7 +298,7 @@ func (w *walker) walk(dir string) { return } - fis, err := ioutil.ReadDir(dir) + files, err := os.ReadDir(dir) if err != nil { w.errh(err) return @@ -318,9 +318,9 @@ func (w *walker) walk(dir string) { } // traverse subdirectories, but don't walk into testdata - for _, fi := range fis { - if fi.IsDir() && fi.Name() != "testdata" { - w.walk(filepath.Join(dir, fi.Name())) + for _, f := range files { + if f.IsDir() && f.Name() != "testdata" { + w.walk(filepath.Join(dir, f.Name())) } } } diff --git a/src/cmd/compile/internal/types2/stmt.go b/src/cmd/compile/internal/types2/stmt.go index 490cd0fc190..c3e646c80c1 100644 --- a/src/cmd/compile/internal/types2/stmt.go +++ b/src/cmd/compile/internal/types2/stmt.go @@ -1,4 +1,3 @@ -// UNREVIEWED // Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -14,16 +13,20 @@ import ( ) func (check *Checker) funcBody(decl *declInfo, name string, sig *Signature, body *syntax.BlockStmt, iota constant.Value) { + if check.conf.IgnoreFuncBodies { + panic("internal error: function body not ignored") + } + if check.conf.Trace { check.trace(body.Pos(), "--- %s: %s", name, sig) defer func() { - check.trace(endPos(body), "--- ") + check.trace(syntax.EndPos(body), "--- ") }() } // set function scope extent sig.scope.pos = body.Pos() - sig.scope.end = endPos(body) + sig.scope.end = syntax.EndPos(body) // save/restore current context and setup function context // (and use 0 indentation at function start) @@ -67,7 +70,7 @@ func (check *Checker) usage(scope *Scope) { } } sort.Slice(unused, func(i, j int) bool { - return cmpPos(unused[i].pos, unused[j].pos) < 0 + return unused[i].pos.Cmp(unused[j].pos) < 0 }) for _, v := range unused { check.softErrorf(v.pos, "%s declared but not used", v.name) @@ -155,7 +158,7 @@ func (check *Checker) multipleSelectDefaults(list []*syntax.CommClause) { } func (check *Checker) openScope(node syntax.Node, comment string) { - check.openScopeUntil(node, endPos(node), comment) + check.openScopeUntil(node, syntax.EndPos(node), comment) } func (check *Checker) openScopeUntil(node syntax.Node, end syntax.Pos, comment string) { @@ -253,8 +256,10 @@ L: // (quadratic algorithm, but these lists tend to be very short) for _, vt := range seen[val] { if check.identical(v.typ, vt.typ) { - check.errorf(&v, "duplicate case %s in expression switch", &v) - check.error(vt.pos, "\tprevious case") // secondary error, \t indented + var err error_ + err.errorf(&v, "duplicate case %s in expression switch", &v) + err.errorf(vt.pos, "previous case") + check.report(&err) continue L } } @@ -263,7 +268,7 @@ L: } } -func (check *Checker) caseTypes(x *operand, xtyp *Interface, types []syntax.Expr, seen map[Type]syntax.Pos, strict bool) (T Type) { +func (check *Checker) caseTypes(x *operand, xtyp *Interface, types []syntax.Expr, seen map[Type]syntax.Expr) (T Type) { L: for _, e := range types { T = check.typOrNil(e) @@ -275,21 +280,23 @@ L: } // look for duplicate types // (quadratic algorithm, but type switches tend to be reasonably small) - for t, pos := range seen { + for t, other := range seen { if T == nil && t == nil || T != nil && t != nil && check.identical(T, t) { // talk about "case" rather than "type" because of nil case Ts := "nil" if T != nil { Ts = T.String() } - check.errorf(e, "duplicate case %s in type switch", Ts) - check.error(pos, "\tprevious case") // secondary error, \t indented + var err error_ + err.errorf(e, "duplicate case %s in type switch", Ts) + err.errorf(other, "previous case") + check.report(&err) continue L } } - seen[T] = e.Pos() + seen[T] = e if T != nil { - check.typeAssertion(e.Pos(), x, xtyp, T, strict) + check.typeAssertion(e.Pos(), x, xtyp, T) } } return @@ -353,12 +360,12 @@ func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) { tch := asChan(ch.typ) if tch == nil { - check.invalidOpf(s, "cannot send to non-chan type %s", ch.typ) + check.errorf(s, invalidOp+"cannot send to non-chan type %s", ch.typ) return } if tch.dir == RecvOnly { - check.invalidOpf(s, "cannot send to receive-only type %s", tch) + check.errorf(s, invalidOp+"cannot send to receive-only type %s", tch) return } @@ -369,7 +376,7 @@ func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) { if s.Rhs == nil { // x++ or x-- if len(lhs) != 1 { - check.invalidASTf(s, "%s%s requires one operand", s.Op, s.Op) + check.errorf(s, invalidAST+"%s%s requires one operand", s.Op, s.Op) return } var x operand @@ -378,7 +385,7 @@ func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) { return } if !isNumeric(x.typ) { - check.invalidOpf(lhs[0], "%s%s%s (non-numeric type %s)", lhs[0], s.Op, s.Op, x.typ) + check.errorf(lhs[0], invalidOp+"%s%s%s (non-numeric type %s)", lhs[0], s.Op, s.Op, x.typ) return } check.assignVar(lhs[0], &x) @@ -405,11 +412,6 @@ func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) { check.binary(&x, nil, lhs[0], rhs[0], s.Op) check.assignVar(lhs[0], &x) - // case *syntax.GoStmt: - // check.suspendedCall("go", s.Call) - - // case *syntax.DeferStmt: - // check.suspendedCall("defer", s.Call) case *syntax.CallStmt: // TODO(gri) get rid of this conversion to string kind := "go" @@ -430,8 +432,10 @@ func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) { // with the same name as a result parameter is in scope at the place of the return." for _, obj := range res.vars { if alt := check.lookup(obj.name); alt != nil && alt != obj { - check.errorf(s, "result parameter %s not in scope at return", obj.name) - check.errorf(alt, "\tinner declaration of %s", obj) + var err error_ + err.errorf(s, "result parameter %s not in scope at return", obj.name) + err.errorf(alt, "inner declaration of %s", obj) + check.report(&err) // ok to continue } } @@ -478,7 +482,7 @@ func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) { // goto's must have labels, should have been caught above fallthrough default: - check.invalidASTf(s, "branch statement: %s", s.Tok) + check.errorf(s, invalidAST+"branch statement: %s", s.Tok) } case *syntax.BlockStmt: @@ -636,7 +640,7 @@ func (check *Checker) switchStmt(inner stmtContext, s *syntax.SwitchStmt) { seen := make(valueMap) // map of seen case values to positions and types for i, clause := range s.Body { if clause == nil { - check.invalidASTf(clause, "incorrect expression switch case") + check.error(clause, invalidAST+"incorrect expression switch case") continue } end := s.Rbrace @@ -680,6 +684,10 @@ func (check *Checker) typeSwitchStmt(inner stmtContext, s *syntax.SwitchStmt, gu if x.mode == invalid { return } + // Caution: We're not using asInterface here because we don't want + // to switch on a suitably constrained type parameter (for + // now). + // TODO(gri) Need to revisit this. xtyp, _ := under(x.typ).(*Interface) if xtyp == nil { check.errorf(&x, "%s is not an interface type", &x) @@ -689,11 +697,11 @@ func (check *Checker) typeSwitchStmt(inner stmtContext, s *syntax.SwitchStmt, gu check.multipleSwitchDefaults(s.Body) - var lhsVars []*Var // list of implicitly declared lhs variables - seen := make(map[Type]syntax.Pos) // map of seen types to positions + var lhsVars []*Var // list of implicitly declared lhs variables + seen := make(map[Type]syntax.Expr) // map of seen types to positions for i, clause := range s.Body { if clause == nil { - check.invalidASTf(s, "incorrect type switch case") + check.error(s, invalidAST+"incorrect type switch case") continue } end := s.Rbrace @@ -702,7 +710,7 @@ func (check *Checker) typeSwitchStmt(inner stmtContext, s *syntax.SwitchStmt, gu } // Check each type in this type switch case. cases := unpackExpr(clause.Cases) - T := check.caseTypes(&x, xtyp, cases, seen, false) + T := check.caseTypes(&x, xtyp, cases, seen) check.openScopeUntil(clause, end, "case") // If lhs exists, declare a corresponding variable in the case-local scope. if lhs != nil { @@ -719,7 +727,7 @@ func (check *Checker) typeSwitchStmt(inner stmtContext, s *syntax.SwitchStmt, gu // "at the end of the TypeSwitchCase" in #16794 instead? scopePos := clause.Pos() // for default clause (len(List) == 0) if n := len(cases); n > 0 { - scopePos = endPos(cases[n-1]) + scopePos = syntax.EndPos(cases[n-1]) } check.declare(check.scope, nil, obj, scopePos) check.recordImplicit(clause, obj) @@ -733,6 +741,9 @@ func (check *Checker) typeSwitchStmt(inner stmtContext, s *syntax.SwitchStmt, gu } // If lhs exists, we must have at least one lhs variable that was used. + // (We can't use check.usage because that only looks at one scope; and + // we don't want to use the same variable for all scopes and change the + // variable type underfoot.) if lhs != nil { var used bool for _, v := range lhsVars { @@ -759,7 +770,7 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s var sValue syntax.Expr if p, _ := sKey.(*syntax.ListExpr); p != nil { if len(p.ElemList) != 2 { - check.invalidASTf(s, "invalid lhs in range clause") + check.error(s, invalidAST+"invalid lhs in range clause") return } sKey = p.ElemList[0] @@ -833,7 +844,7 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s // declare variables if len(vars) > 0 { - scopePos := endPos(rclause.X) // TODO(gri) should this just be s.Body.Pos (spec clarification)? + scopePos := syntax.EndPos(rclause.X) // TODO(gri) should this just be s.Body.Pos (spec clarification)? for _, obj := range vars { // spec: "The scope of a constant or variable identifier declared inside // a function begins at the end of the ConstSpec or VarSpec (ShortVarDecl diff --git a/src/cmd/compile/internal/types2/subst.go b/src/cmd/compile/internal/types2/subst.go index d730642831c..c8e428c1832 100644 --- a/src/cmd/compile/internal/types2/subst.go +++ b/src/cmd/compile/internal/types2/subst.go @@ -308,6 +308,9 @@ func (subst *subster) typ(typ Type) Type { embeddeds, ecopied := subst.typeList(t.embeddeds) if mcopied || types != t.types || ecopied { iface := &Interface{methods: methods, types: types, embeddeds: embeddeds} + if subst.check == nil { + panic("internal error: cannot instantiate interfaces yet") + } subst.check.posMap[iface] = subst.check.posMap[t] // satisfy completeInterface requirement subst.check.completeInterface(nopos, iface) return iface @@ -327,12 +330,14 @@ func (subst *subster) typ(typ Type) Type { } case *Named: - subst.check.indent++ - defer func() { - subst.check.indent-- - }() - dump := func(format string, args ...interface{}) { - if subst.check.conf.Trace { + // dump is for debugging + dump := func(string, ...interface{}) {} + if subst.check != nil && subst.check.conf.Trace { + subst.check.indent++ + defer func() { + subst.check.indent-- + }() + dump = func(format string, args ...interface{}) { subst.check.trace(subst.pos, format, args...) } } @@ -377,24 +382,27 @@ func (subst *subster) typ(typ Type) Type { // before creating a new named type, check if we have this one already h := instantiatedHash(t, new_targs) dump(">>> new type hash: %s", h) - if named, found := subst.check.typMap[h]; found { - dump(">>> found %s", named) - subst.cache[t] = named - return named + if subst.check != nil { + if named, found := subst.check.typMap[h]; found { + dump(">>> found %s", named) + subst.cache[t] = named + return named + } } // create a new named type and populate caches to avoid endless recursion tname := NewTypeName(subst.pos, t.obj.pkg, t.obj.name, nil) - named := subst.check.NewNamed(tname, t.underlying, t.methods) // method signatures are updated lazily - named.tparams = t.tparams // new type is still parameterized + named := subst.check.newNamed(tname, t, t.underlying, t.tparams, t.methods) // method signatures are updated lazily named.targs = new_targs - subst.check.typMap[h] = named + if subst.check != nil { + subst.check.typMap[h] = named + } subst.cache[t] = named // do the substitution dump(">>> subst %s with %s (new: %s)", t.underlying, subst.smap, new_targs) named.underlying = subst.typOrNil(t.underlying) - named.orig = named.underlying // for cycle detection (Checker.validType) + named.fromRHS = named.underlying // for cycle detection (Checker.validType) return named diff --git a/src/cmd/compile/internal/types2/testdata/blank.src b/src/cmd/compile/internal/types2/testdata/check/blank.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/blank.src rename to src/cmd/compile/internal/types2/testdata/check/blank.src diff --git a/src/cmd/compile/internal/types2/testdata/builtins.go2 b/src/cmd/compile/internal/types2/testdata/check/builtins.go2 similarity index 100% rename from src/cmd/compile/internal/types2/testdata/builtins.go2 rename to src/cmd/compile/internal/types2/testdata/check/builtins.go2 diff --git a/src/cmd/compile/internal/types2/testdata/builtins.src b/src/cmd/compile/internal/types2/testdata/check/builtins.src similarity index 99% rename from src/cmd/compile/internal/types2/testdata/builtins.src rename to src/cmd/compile/internal/types2/testdata/check/builtins.src index f866ef059f5..6d1f47129b9 100644 --- a/src/cmd/compile/internal/types2/testdata/builtins.src +++ b/src/cmd/compile/internal/types2/testdata/check/builtins.src @@ -35,9 +35,9 @@ func append1() { type S []byte type T string var t T - _ = append(s, "foo" /* ERROR cannot convert */ ) + _ = append(s, "foo" /* ERROR cannot use .* in argument to append */ ) _ = append(s, "foo"...) - _ = append(S(s), "foo" /* ERROR cannot convert */ ) + _ = append(S(s), "foo" /* ERROR cannot use .* in argument to append */ ) _ = append(S(s), "foo"...) _ = append(s, t /* ERROR cannot use t */ ) _ = append(s, t...) @@ -283,7 +283,7 @@ func delete1() { delete() // ERROR not enough arguments delete(1) // ERROR not enough arguments delete(1, 2, 3) // ERROR too many arguments - delete(m, 0 /* ERROR cannot convert */) + delete(m, 0 /* ERROR cannot use */) delete(m, s) _ = delete /* ERROR used as value */ (m, s) diff --git a/src/cmd/compile/internal/types2/testdata/chans.go2 b/src/cmd/compile/internal/types2/testdata/check/chans.go2 similarity index 100% rename from src/cmd/compile/internal/types2/testdata/chans.go2 rename to src/cmd/compile/internal/types2/testdata/check/chans.go2 diff --git a/src/cmd/compile/internal/types2/testdata/const0.src b/src/cmd/compile/internal/types2/testdata/check/const0.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/const0.src rename to src/cmd/compile/internal/types2/testdata/check/const0.src diff --git a/src/cmd/compile/internal/types2/testdata/const1.src b/src/cmd/compile/internal/types2/testdata/check/const1.src similarity index 96% rename from src/cmd/compile/internal/types2/testdata/const1.src rename to src/cmd/compile/internal/types2/testdata/check/const1.src index 56b6bd4ca55..c9128017cf7 100644 --- a/src/cmd/compile/internal/types2/testdata/const1.src +++ b/src/cmd/compile/internal/types2/testdata/check/const1.src @@ -6,6 +6,8 @@ package const1 +import "math" + const( mi = ^int(0) mu = ^uint(0) @@ -48,7 +50,7 @@ const ( // without overflow. For now we match the compiler. // See also issue #44057. // smallestFloat64 = 1.0 / (1<<(1023 - 1 + 52)) - smallestFloat64 = 4.940656458412465441765687928682213723651e-324 + smallestFloat64 = math.SmallestNonzeroFloat64 ) const ( @@ -63,7 +65,7 @@ const ( // without overflow. For now we match the compiler. // See also issue #44057. // maxFloat64 = 1<<1023 * (1<<53 - 1) / (1.0<<52) - maxFloat64 = 1.797693134862315708145274237317043567981e+308 + maxFloat64 = math.MaxFloat64 ) const ( @@ -281,9 +283,7 @@ const ( _ = assert(float64(smallestFloat32) == smallestFloat32) _ = assert(float64(smallestFloat32/2) == smallestFloat32/2) _ = assert(float64(smallestFloat64) == smallestFloat64) - // TODO(gri) With the change to the declaration of smallestFloat64 - // this now fails to be true. See issue #44058. - // _ = assert(float64(smallestFloat64/2) == 0) + _ = assert(float64(smallestFloat64/2) == 0) ) const ( diff --git a/src/cmd/compile/internal/types2/testdata/constdecl.src b/src/cmd/compile/internal/types2/testdata/check/constdecl.src similarity index 94% rename from src/cmd/compile/internal/types2/testdata/constdecl.src rename to src/cmd/compile/internal/types2/testdata/check/constdecl.src index 1a7ed003a4c..cb155ab35db 100644 --- a/src/cmd/compile/internal/types2/testdata/constdecl.src +++ b/src/cmd/compile/internal/types2/testdata/check/constdecl.src @@ -111,13 +111,13 @@ func _() { const ( _ byte = 255 + iota /* some gap */ - _ // ERROR overflows byte + _ // ERROR overflows /* some gap */ - /* some gap */ _ /* ERROR overflows byte */; _ /* ERROR overflows byte */ + /* some gap */ _ /* ERROR overflows */; _ /* ERROR overflows */ /* some gap */ _ = 255 + iota - _ = byte /* ERROR overflows byte */ (255) + iota - _ /* ERROR overflows byte */ + _ = byte /* ERROR overflows */ (255) + iota + _ /* ERROR overflows */ ) // Test cases from issue. diff --git a/src/cmd/compile/internal/types2/testdata/conversions.src b/src/cmd/compile/internal/types2/testdata/check/conversions.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/conversions.src rename to src/cmd/compile/internal/types2/testdata/check/conversions.src diff --git a/src/cmd/compile/internal/types2/testdata/conversions2.src b/src/cmd/compile/internal/types2/testdata/check/conversions2.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/conversions2.src rename to src/cmd/compile/internal/types2/testdata/check/conversions2.src diff --git a/src/cmd/compile/internal/types2/testdata/cycles.src b/src/cmd/compile/internal/types2/testdata/check/cycles.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/cycles.src rename to src/cmd/compile/internal/types2/testdata/check/cycles.src diff --git a/src/cmd/compile/internal/types2/testdata/cycles1.src b/src/cmd/compile/internal/types2/testdata/check/cycles1.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/cycles1.src rename to src/cmd/compile/internal/types2/testdata/check/cycles1.src diff --git a/src/cmd/compile/internal/types2/testdata/cycles2.src b/src/cmd/compile/internal/types2/testdata/check/cycles2.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/cycles2.src rename to src/cmd/compile/internal/types2/testdata/check/cycles2.src diff --git a/src/cmd/compile/internal/types2/testdata/cycles3.src b/src/cmd/compile/internal/types2/testdata/check/cycles3.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/cycles3.src rename to src/cmd/compile/internal/types2/testdata/check/cycles3.src diff --git a/src/cmd/compile/internal/types2/testdata/cycles4.src b/src/cmd/compile/internal/types2/testdata/check/cycles4.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/cycles4.src rename to src/cmd/compile/internal/types2/testdata/check/cycles4.src diff --git a/src/cmd/compile/internal/types2/testdata/cycles5.src b/src/cmd/compile/internal/types2/testdata/check/cycles5.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/cycles5.src rename to src/cmd/compile/internal/types2/testdata/check/cycles5.src diff --git a/src/cmd/compile/internal/types2/testdata/decls0.src b/src/cmd/compile/internal/types2/testdata/check/decls0.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/decls0.src rename to src/cmd/compile/internal/types2/testdata/check/decls0.src diff --git a/src/cmd/compile/internal/types2/testdata/decls1.src b/src/cmd/compile/internal/types2/testdata/check/decls1.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/decls1.src rename to src/cmd/compile/internal/types2/testdata/check/decls1.src diff --git a/src/cmd/compile/internal/types2/testdata/decls2/decls2a.src b/src/cmd/compile/internal/types2/testdata/check/decls2/decls2a.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/decls2/decls2a.src rename to src/cmd/compile/internal/types2/testdata/check/decls2/decls2a.src diff --git a/src/cmd/compile/internal/types2/testdata/decls2/decls2b.src b/src/cmd/compile/internal/types2/testdata/check/decls2/decls2b.src similarity index 91% rename from src/cmd/compile/internal/types2/testdata/decls2/decls2b.src rename to src/cmd/compile/internal/types2/testdata/check/decls2/decls2b.src index 8e82c6dcde1..7b3229cadc3 100644 --- a/src/cmd/compile/internal/types2/testdata/decls2/decls2b.src +++ b/src/cmd/compile/internal/types2/testdata/check/decls2/decls2b.src @@ -40,17 +40,17 @@ func f_double /* ERROR "redeclared" */ () {} // Verify by checking that errors are reported. func (T /* ERROR "undeclared" */ ) _() {} func (T1) _(undeclared /* ERROR "undeclared" */ ) {} -func (T1) _() int { return "foo" /* ERROR "cannot convert" */ } +func (T1) _() int { return "foo" /* ERROR "cannot use .* in return statement" */ } // Methods with undeclared receiver type can still be checked. // Verify by checking that errors are reported. func (Foo /* ERROR "undeclared" */ ) m() {} func (Foo /* ERROR "undeclared" */ ) m(undeclared /* ERROR "undeclared" */ ) {} -func (Foo /* ERROR "undeclared" */ ) m() int { return "foo" /* ERROR "cannot convert" */ } +func (Foo /* ERROR "undeclared" */ ) m() int { return "foo" /* ERROR "cannot use .* in return statement" */ } func (Foo /* ERROR "undeclared" */ ) _() {} func (Foo /* ERROR "undeclared" */ ) _(undeclared /* ERROR "undeclared" */ ) {} -func (Foo /* ERROR "undeclared" */ ) _() int { return "foo" /* ERROR "cannot convert" */ } +func (Foo /* ERROR "undeclared" */ ) _() int { return "foo" /* ERROR "cannot use .* in return statement" */ } // Receiver declarations are regular parameter lists; // receiver types may use parentheses, and the list diff --git a/src/cmd/compile/internal/types2/testdata/decls3.src b/src/cmd/compile/internal/types2/testdata/check/decls3.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/decls3.src rename to src/cmd/compile/internal/types2/testdata/check/decls3.src diff --git a/src/cmd/compile/internal/types2/testdata/decls4.src b/src/cmd/compile/internal/types2/testdata/check/decls4.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/decls4.src rename to src/cmd/compile/internal/types2/testdata/check/decls4.src diff --git a/src/cmd/compile/internal/types2/testdata/decls5.src b/src/cmd/compile/internal/types2/testdata/check/decls5.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/decls5.src rename to src/cmd/compile/internal/types2/testdata/check/decls5.src diff --git a/src/cmd/compile/internal/types2/testdata/errors.src b/src/cmd/compile/internal/types2/testdata/check/errors.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/errors.src rename to src/cmd/compile/internal/types2/testdata/check/errors.src diff --git a/src/cmd/compile/internal/types2/testdata/expr0.src b/src/cmd/compile/internal/types2/testdata/check/expr0.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/expr0.src rename to src/cmd/compile/internal/types2/testdata/check/expr0.src diff --git a/src/cmd/compile/internal/types2/testdata/expr1.src b/src/cmd/compile/internal/types2/testdata/check/expr1.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/expr1.src rename to src/cmd/compile/internal/types2/testdata/check/expr1.src diff --git a/src/cmd/compile/internal/types2/testdata/expr2.src b/src/cmd/compile/internal/types2/testdata/check/expr2.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/expr2.src rename to src/cmd/compile/internal/types2/testdata/check/expr2.src diff --git a/src/cmd/compile/internal/types2/testdata/expr3.src b/src/cmd/compile/internal/types2/testdata/check/expr3.src similarity index 95% rename from src/cmd/compile/internal/types2/testdata/expr3.src rename to src/cmd/compile/internal/types2/testdata/check/expr3.src index 6d0ac6cd94d..eab3f72c4d5 100644 --- a/src/cmd/compile/internal/types2/testdata/expr3.src +++ b/src/cmd/compile/internal/types2/testdata/check/expr3.src @@ -35,6 +35,7 @@ func indexes() { _ = a[9] _ = a[10 /* ERROR "index .* out of bounds" */ ] _ = a[1 /* ERROR "overflows" */ <<100] + _ = a[1<< /* ERROR "constant shift overflow" */ 1000] // no out-of-bounds follow-on error _ = a[10:] _ = a[:10] _ = a[10:10] @@ -95,7 +96,7 @@ func indexes() { _ = &s /* ERROR "cannot take address" */ [:10] var m map[string]int - _ = m[0 /* ERROR "cannot convert" */ ] + _ = m[0 /* ERROR "cannot use .* in map index" */ ] _ = m /* ERROR "cannot slice" */ ["foo" : "bar"] _ = m["foo"] // ok is of type bool @@ -103,7 +104,7 @@ func indexes() { var ok mybool _, ok = m["bar"] _ = ok - _ = m[0 /* ERROR "cannot convert 0" */ ] + "foo" // ERROR "cannot convert" + _ = m[0 /* ERROR "cannot use 0" */ ] + "foo" // ERROR "cannot convert" var t string _ = t[- /* ERROR "negative" */ 1] @@ -186,7 +187,7 @@ func struct_literals() { _ = T1{aa /* ERROR "unknown field" */ : 0} _ = T1{1 /* ERROR "invalid field name" */ : 0} _ = T1{a: 0, s: "foo", u: 0, a /* ERROR "duplicate field" */: 10} - _ = T1{a: "foo" /* ERROR "cannot convert" */ } + _ = T1{a: "foo" /* ERROR "cannot use .* in struct literal" */ } _ = T1{c /* ERROR "unknown field" */ : 0} _ = T1{T0: { /* ERROR "missing type" */ }} // struct literal element type may not be elided _ = T1{T0: T0{}} @@ -197,7 +198,7 @@ func struct_literals() { _ = T0{1, b /* ERROR "mixture" */ : 2, 3} _ = T0{1, 2} /* ERROR "too few values" */ _ = T0{1, 2, 3, 4 /* ERROR "too many values" */ } - _ = T0{1, "foo" /* ERROR "cannot convert" */, 3.4 /* ERROR "truncated" */} + _ = T0{1, "foo" /* ERROR "cannot use .* in struct literal" */, 3.4 /* ERROR "cannot use .*\(truncated\)" */} // invalid type type P *struct{ @@ -237,7 +238,7 @@ func array_literals() { _ = A1{5: 5, 6, 7, 4: 4, 1 /* ERROR "overflows" */ <<100: 4} _ = A1{2.0} _ = A1{2.1 /* ERROR "truncated" */ } - _ = A1{"foo" /* ERROR "cannot convert" */ } + _ = A1{"foo" /* ERROR "cannot use .* in array or slice literal" */ } // indices must be integer constants i := 1 @@ -303,7 +304,7 @@ func slice_literals() { _ = S0{5: 5, 6, 7, 4: 4, 1 /* ERROR "overflows" */ <<100: 4} _ = S0{2.0} _ = S0{2.1 /* ERROR "truncated" */ } - _ = S0{"foo" /* ERROR "cannot convert" */ } + _ = S0{"foo" /* ERROR "cannot use .* in array or slice literal" */ } // indices must be resolved correctly const index1 = 1 @@ -356,8 +357,8 @@ func map_literals() { _ = M0{} _ = M0{1 /* ERROR "missing key" */ } - _ = M0{1 /* ERROR "cannot convert" */ : 2} - _ = M0{"foo": "bar" /* ERROR "cannot convert" */ } + _ = M0{1 /* ERROR "cannot use .* in map literal" */ : 2} + _ = M0{"foo": "bar" /* ERROR "cannot use .* in map literal" */ } _ = M0{"foo": 1, "bar": 2, "foo" /* ERROR "duplicate key" */ : 3 } _ = map[interface{}]int{2: 1, 2 /* ERROR "duplicate key" */ : 1} diff --git a/src/cmd/compile/internal/types2/testdata/go1_12.src b/src/cmd/compile/internal/types2/testdata/check/go1_12.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/go1_12.src rename to src/cmd/compile/internal/types2/testdata/check/go1_12.src diff --git a/src/cmd/compile/internal/types2/testdata/go1_13.src b/src/cmd/compile/internal/types2/testdata/check/go1_13.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/go1_13.src rename to src/cmd/compile/internal/types2/testdata/check/go1_13.src diff --git a/src/cmd/compile/internal/types2/testdata/check/go1_16.src b/src/cmd/compile/internal/types2/testdata/check/go1_16.src new file mode 100644 index 00000000000..fdf5c99d7e3 --- /dev/null +++ b/src/cmd/compile/internal/types2/testdata/check/go1_16.src @@ -0,0 +1,13 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Check Go language version-specific errors. + +package go1_16 // go1.16 + +type Slice []byte +type Array [8]byte + +var s Slice +var p = (*Array)(s /* ERROR requires go1.17 or later */ ) diff --git a/src/cmd/compile/internal/types2/testdata/go1_8.src b/src/cmd/compile/internal/types2/testdata/check/go1_8.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/go1_8.src rename to src/cmd/compile/internal/types2/testdata/check/go1_8.src diff --git a/src/cmd/compile/internal/types2/testdata/gotos.src b/src/cmd/compile/internal/types2/testdata/check/gotos.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/gotos.src rename to src/cmd/compile/internal/types2/testdata/check/gotos.src diff --git a/src/cmd/compile/internal/types2/testdata/importC.src b/src/cmd/compile/internal/types2/testdata/check/importC.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/importC.src rename to src/cmd/compile/internal/types2/testdata/check/importC.src diff --git a/src/cmd/compile/internal/types2/testdata/importdecl0/importdecl0a.src b/src/cmd/compile/internal/types2/testdata/check/importdecl0/importdecl0a.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/importdecl0/importdecl0a.src rename to src/cmd/compile/internal/types2/testdata/check/importdecl0/importdecl0a.src diff --git a/src/cmd/compile/internal/types2/testdata/importdecl0/importdecl0b.src b/src/cmd/compile/internal/types2/testdata/check/importdecl0/importdecl0b.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/importdecl0/importdecl0b.src rename to src/cmd/compile/internal/types2/testdata/check/importdecl0/importdecl0b.src diff --git a/src/cmd/compile/internal/types2/testdata/importdecl1/importdecl1a.src b/src/cmd/compile/internal/types2/testdata/check/importdecl1/importdecl1a.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/importdecl1/importdecl1a.src rename to src/cmd/compile/internal/types2/testdata/check/importdecl1/importdecl1a.src diff --git a/src/cmd/compile/internal/types2/testdata/importdecl1/importdecl1b.src b/src/cmd/compile/internal/types2/testdata/check/importdecl1/importdecl1b.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/importdecl1/importdecl1b.src rename to src/cmd/compile/internal/types2/testdata/check/importdecl1/importdecl1b.src diff --git a/src/cmd/compile/internal/types2/testdata/init0.src b/src/cmd/compile/internal/types2/testdata/check/init0.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/init0.src rename to src/cmd/compile/internal/types2/testdata/check/init0.src diff --git a/src/cmd/compile/internal/types2/testdata/init1.src b/src/cmd/compile/internal/types2/testdata/check/init1.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/init1.src rename to src/cmd/compile/internal/types2/testdata/check/init1.src diff --git a/src/cmd/compile/internal/types2/testdata/init2.src b/src/cmd/compile/internal/types2/testdata/check/init2.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/init2.src rename to src/cmd/compile/internal/types2/testdata/check/init2.src diff --git a/src/cmd/compile/internal/types2/testdata/issue25008/issue25008a.src b/src/cmd/compile/internal/types2/testdata/check/issue25008/issue25008a.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/issue25008/issue25008a.src rename to src/cmd/compile/internal/types2/testdata/check/issue25008/issue25008a.src diff --git a/src/cmd/compile/internal/types2/testdata/issue25008/issue25008b.src b/src/cmd/compile/internal/types2/testdata/check/issue25008/issue25008b.src similarity index 100% rename from src/cmd/compile/internal/types2/testdata/issue25008/issue25008b.src rename to src/cmd/compile/internal/types2/testdata/check/issue25008/issue25008b.src diff --git a/src/cmd/compile/internal/types2/testdata/issues.go2 b/src/cmd/compile/internal/types2/testdata/check/issues.go2 similarity index 100% rename from src/cmd/compile/internal/types2/testdata/issues.go2 rename to src/cmd/compile/internal/types2/testdata/check/issues.go2 diff --git a/src/cmd/compile/internal/types2/testdata/issues.src b/src/cmd/compile/internal/types2/testdata/check/issues.src similarity index 97% rename from src/cmd/compile/internal/types2/testdata/issues.src rename to src/cmd/compile/internal/types2/testdata/check/issues.src index 1bfc7fec75f..21aa208cc76 100644 --- a/src/cmd/compile/internal/types2/testdata/issues.src +++ b/src/cmd/compile/internal/types2/testdata/check/issues.src @@ -354,12 +354,18 @@ func issue26234c() { func issue35895() { // T is defined in this package, don't qualify its name with the package name. - var _ T = 0 // ERROR cannot convert 0 \(untyped int constant\) to T + var _ T = 0 // ERROR cannot use 0 \(untyped int constant\) as T // There is only one package with name syntax imported, only use the (global) package name in error messages. - var _ *syn.File = 0 // ERROR cannot convert 0 \(untyped int constant\) to \*syntax.File + var _ *syn.File = 0 // ERROR cannot use 0 \(untyped int constant\) as \*syntax.File // Because both t1 and t2 have the same global package name (template), // qualify packages with full path name in this case. var _ t1.Template = t2 /* ERROR cannot use .* \(value of type "html/template".Template\) as "text/template".Template */ .Template{} } + +func issue42989(s uint) { + var m map[int]string + delete(m, 1<") case *Basic: - if t.kind == UnsafePointer { - buf.WriteString("unsafe.") + // exported basic types go into package unsafe + // (currently this is just unsafe.Pointer) + if isExported(t.name) { + if obj, _ := Unsafe.scope.Lookup(t.name).(*TypeName); obj != nil { + writeTypeName(buf, obj, qf) + break + } } + if gcCompatibilityMode { // forget the alias names switch t.kind { diff --git a/src/cmd/compile/internal/types2/typestring_test.go b/src/cmd/compile/internal/types2/typestring_test.go index 97a4fdf73de..d98e9a5ade6 100644 --- a/src/cmd/compile/internal/types2/typestring_test.go +++ b/src/cmd/compile/internal/types2/typestring_test.go @@ -111,7 +111,7 @@ var dependentTestTypes = []testEntry{ // interfaces dup(`interface{io.Reader; io.Writer}`), dup(`interface{m() int; io.Writer}`), - {`interface{m() interface{T}}`, `interface{m() interface{p.T}}`}, + {`interface{m() interface{T}}`, `interface{m() interface{generic_p.T}}`}, } func TestTypeString(t *testing.T) { @@ -122,7 +122,7 @@ func TestTypeString(t *testing.T) { tests = append(tests, dependentTestTypes...) for _, test := range tests { - src := `package p; import "io"; type _ io.Writer; type T ` + test.src + src := `package generic_p; import "io"; type _ io.Writer; type T ` + test.src pkg, err := makePkg(src) if err != nil { t.Errorf("%s: %s", src, err) diff --git a/src/cmd/compile/internal/types2/typexpr.go b/src/cmd/compile/internal/types2/typexpr.go index 7190cb446ac..e64d804c30b 100644 --- a/src/cmd/compile/internal/types2/typexpr.go +++ b/src/cmd/compile/internal/types2/typexpr.go @@ -15,6 +15,9 @@ import ( "strings" ) +// Disabled by default, but enabled when running tests (via types_test.go). +var acceptMethodTypeParams bool + // ident type-checks identifier e and initializes x with the value or type of e. // If an error occurred, x.mode is set to invalid. // For the meaning of def, see Checker.definedType, below. @@ -29,7 +32,7 @@ func (check *Checker) ident(x *operand, e *syntax.Name, def *Named, wantType boo scope, obj := check.scope.LookupParent(e.Value, check.pos) if obj == nil { if e.Value == "_" { - check.errorf(e, "cannot use _ as value or type") + check.error(e, "cannot use _ as value or type") } else { if check.conf.CompilerErrorMessages { check.errorf(e, "undefined: %s", e.Value) @@ -76,7 +79,7 @@ func (check *Checker) ident(x *operand, e *syntax.Name, def *Named, wantType boo } if obj == universeIota { if check.iota == nil { - check.errorf(e, "cannot use iota outside constant declaration") + check.error(e, "cannot use iota outside constant declaration") return } x.val = check.iota @@ -131,7 +134,7 @@ func (check *Checker) typ(e syntax.Expr) Type { // (see ordinaryType). func (check *Checker) varType(e syntax.Expr) Type { typ := check.definedType(e, nil) - check.ordinaryType(startPos(e), typ) + check.ordinaryType(syntax.StartPos(e), typ) return typ } @@ -141,7 +144,7 @@ func (check *Checker) ordinaryType(pos syntax.Pos, typ Type) { // We don't want to call under() (via Interface) or complete interfaces while we // are in the middle of type-checking parameter declarations that might belong to // interface methods. Delay this check to the end of type-checking. - check.atEnd(func() { + check.later(func() { if t := asInterface(typ); t != nil { check.completeInterface(pos, t) // TODO(gri) is this the correct position? if t.allTypes != nil { @@ -336,8 +339,8 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams [] // Always type-check method type parameters but complain if they are not enabled. // (A separate check is needed when type-checking interface method signatures because // they don't have a receiver specification.) - if recvPar != nil && !check.conf.AcceptMethodTypeParams { - check.errorf(ftyp, "methods cannot have type parameters") + if recvPar != nil && !acceptMethodTypeParams { + check.error(ftyp, "methods cannot have type parameters") } } @@ -352,8 +355,10 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams [] params, variadic := check.collectParams(scope, ftyp.ParamList, nil, true) results, _ := check.collectParams(scope, ftyp.ResultList, nil, false) scope.Squash(func(obj, alt Object) { - check.errorf(obj, "%s redeclared in this block", obj.Name()) - check.reportAltDecl(alt) + var err error_ + err.errorf(obj, "%s redeclared in this block", obj.Name()) + err.recordAltDecl(alt) + check.report(&err) }) if recvPar != nil { @@ -426,9 +431,9 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams [] } // goTypeName returns the Go type name for typ and -// removes any occurences of "types." from that name. +// removes any occurrences of "types2." from that name. func goTypeName(typ Type) string { - return strings.Replace(fmt.Sprintf("%T", typ), "types.", "", -1) // strings.ReplaceAll is not available in Go 1.4 + return strings.Replace(fmt.Sprintf("%T", typ), "types2.", "", -1) // strings.ReplaceAll is not available in Go 1.4 } // typInternal drives type checking of types. @@ -509,11 +514,14 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) { typ.len = check.arrayLength(e.Len) } else { // [...]array - check.errorf(e, "invalid use of [...] array (outside a composite literal)") + check.error(e, "invalid use of [...] array (outside a composite literal)") typ.len = -1 } typ.elem = check.varType(e.Elem) - return typ + if typ.len >= 0 { + return typ + } + // report error if we encountered [...] case *syntax.SliceType: typ := new(Slice) @@ -572,7 +580,7 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) { // // Delay this check because it requires fully setup types; // it is safe to continue in any case (was issue 6667). - check.atEnd(func() { + check.later(func() { if !Comparable(typ.key) { var why string if asTypeParam(typ.key) != nil { @@ -597,7 +605,7 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) { case syntax.RecvOnly: dir = RecvOnly default: - check.invalidASTf(e, "unknown channel direction %d", e.Dir) + check.errorf(e, invalidAST+"unknown channel direction %d", e.Dir) // ok to continue } @@ -669,12 +677,12 @@ func (check *Checker) instantiatedType(x syntax.Expr, targs []syntax.Expr, def * // determine argument positions (for error reporting) typ.poslist = make([]syntax.Pos, len(targs)) for i, arg := range targs { - typ.poslist[i] = arg.Pos() + typ.poslist[i] = syntax.StartPos(arg) } // make sure we check instantiation works at least once // and that the resulting type is valid - check.atEnd(func() { + check.later(func() { t := typ.expand() check.validType(t, nil) }) @@ -710,7 +718,7 @@ func (check *Checker) arrayLength(e syntax.Expr) int64 { } // typeList provides the list of types corresponding to the incoming expression list. -// If an error occured, the result is nil, but all list elements were type-checked. +// If an error occurred, the result is nil, but all list elements were type-checked. func (check *Checker) typeList(list []syntax.Expr) []Type { res := make([]Type, len(list)) // res != nil even if len(list) == 0 for i, x := range list { @@ -761,7 +769,7 @@ func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, type0 sy // named parameter name := field.Name.Value if name == "" { - check.invalidASTf(field.Name, "anonymous parameter") + check.error(field.Name, invalidAST+"anonymous parameter") // ok to continue } par := NewParam(field.Name.Pos(), check.pkg, name, typ) @@ -778,7 +786,7 @@ func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, type0 sy } if named && anonymous { - check.invalidASTf(list[0], "list contains both named and anonymous parameters") + check.error(list[0], invalidAST+"list contains both named and anonymous parameters") // ok to continue } @@ -796,8 +804,10 @@ func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, type0 sy func (check *Checker) declareInSet(oset *objset, pos syntax.Pos, obj Object) bool { if alt := oset.insert(obj); alt != nil { - check.errorf(pos, "%s redeclared", obj.Name()) - check.reportAltDecl(alt) + var err error_ + err.errorf(pos, "%s redeclared", obj.Name()) + err.recordAltDecl(alt) + check.report(&err) return false } return true @@ -813,9 +823,9 @@ func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType name := f.Name.Value if name == "_" { if check.conf.CompilerErrorMessages { - check.errorf(f.Name, "methods must have a unique non-blank name") + check.error(f.Name, "methods must have a unique non-blank name") } else { - check.errorf(f.Name, "invalid method name _") + check.error(f.Name, "invalid method name _") } continue // ignore } @@ -826,7 +836,7 @@ func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType // the author intended to include all types. types = append(types, f.Type) if tname != nil && tname != f.Name { - check.errorf(f.Name, "cannot have multiple type lists in an interface") + check.error(f.Name, "cannot have multiple type lists in an interface") } tname = f.Name continue @@ -836,7 +846,7 @@ func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType sig, _ := typ.(*Signature) if sig == nil { if typ != Typ[Invalid] { - check.invalidASTf(f.Type, "%s is not a method signature", typ) + check.errorf(f.Type, invalidAST+"%s is not a method signature", typ) } continue // ignore } @@ -844,8 +854,8 @@ func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType // Always type-check method type parameters but complain if they are not enabled. // (This extra check is needed here because interface method signatures don't have // a receiver specification.) - if sig.tparams != nil && !check.conf.AcceptMethodTypeParams { - check.errorf(f.Type, "methods cannot have type parameters") + if sig.tparams != nil && !acceptMethodTypeParams { + check.error(f.Type, "methods cannot have type parameters") } // use named receiver type if available (for better error messages) @@ -940,18 +950,22 @@ func (check *Checker) completeInterface(pos syntax.Pos, ityp *Interface) { methods = append(methods, m) mpos[m] = pos case explicit: - check.errorf(pos, "duplicate method %s", m.name) - check.errorf(mpos[other.(*Func)], "\tother declaration of %s", m.name) // secondary error, \t indented + var err error_ + err.errorf(pos, "duplicate method %s", m.name) + err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name) + check.report(&err) default: // We have a duplicate method name in an embedded (not explicitly declared) method. // Check method signatures after all types are computed (issue #33656). // If we're pre-go1.14 (overlapping embeddings are not permitted), report that // error here as well (even though we could do it eagerly) because it's the same // error message. - check.atEnd(func() { + check.later(func() { if !check.allowVersion(m.pkg, 1, 14) || !check.identical(m.typ, other.Type()) { - check.errorf(pos, "duplicate method %s", m.name) - check.errorf(mpos[other.(*Func)], "\tother declaration of %s", m.name) // secondary error, \t indented + var err error_ + err.errorf(pos, "duplicate method %s", m.name) + err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name) + check.report(&err) } }) } @@ -1079,7 +1093,7 @@ func (check *Checker) tag(t *syntax.BasicLit) string { return val } } - check.invalidASTf(t, "incorrect tag syntax: %q", t.Value) + check.errorf(t, invalidAST+"incorrect tag syntax: %q", t.Value) } return "" } @@ -1146,7 +1160,7 @@ func (check *Checker) structType(styp *Struct, e *syntax.StructType) { // spec: "An embedded type must be specified as a type name T or as a // pointer to a non-interface type name *T, and T itself may not be a // pointer type." - pos := startPos(f.Type) + pos := syntax.StartPos(f.Type) name := embeddedFieldIdent(f.Type) if name == nil { check.errorf(pos, "invalid embedded field type %s", f.Type) @@ -1162,7 +1176,7 @@ func (check *Checker) structType(styp *Struct, e *syntax.StructType) { // (via under(t)) a possibly incomplete type. embeddedTyp := typ // for closure below embeddedPos := pos - check.atEnd(func() { + check.later(func() { t, isPtr := deref(embeddedTyp) switch t := optype(t).(type) { case *Basic: @@ -1172,13 +1186,13 @@ func (check *Checker) structType(styp *Struct, e *syntax.StructType) { } // unsafe.Pointer is treated like a regular pointer if t.kind == UnsafePointer { - check.errorf(embeddedPos, "embedded field type cannot be unsafe.Pointer") + check.error(embeddedPos, "embedded field type cannot be unsafe.Pointer") } case *Pointer: - check.errorf(embeddedPos, "embedded field type cannot be a pointer") + check.error(embeddedPos, "embedded field type cannot be a pointer") case *Interface: if isPtr { - check.errorf(embeddedPos, "embedded field type cannot be a pointer to an interface") + check.error(embeddedPos, "embedded field type cannot be a pointer to an interface") } } }) @@ -1212,7 +1226,7 @@ func (check *Checker) collectTypeConstraints(pos syntax.Pos, types []syntax.Expr list := make([]Type, 0, len(types)) // assume all types are correct for _, texpr := range types { if texpr == nil { - check.invalidASTf(pos, "missing type constraint") + check.error(pos, invalidAST+"missing type constraint") continue } list = append(list, check.varType(texpr)) @@ -1222,7 +1236,7 @@ func (check *Checker) collectTypeConstraints(pos syntax.Pos, types []syntax.Expr // interfaces, which may not be complete yet. It's ok to do this check at the // end because it's not a requirement for correctness of the code. // Note: This is a quadratic algorithm, but type lists tend to be short. - check.atEnd(func() { + check.later(func() { for i, t := range list { if t := asInterface(t); t != nil { check.completeInterface(types[i].Pos(), t) diff --git a/src/cmd/compile/internal/types2/unify.go b/src/cmd/compile/internal/types2/unify.go index d2ea2b952b0..e1832bbb2a3 100644 --- a/src/cmd/compile/internal/types2/unify.go +++ b/src/cmd/compile/internal/types2/unify.go @@ -6,6 +6,8 @@ package types2 +import "bytes" + // The unifier maintains two separate sets of type parameters x and y // which are used to resolve type parameters in the x and y arguments // provided to the unify call. For unidirectional unification, only @@ -69,6 +71,22 @@ type tparamsList struct { indices []int // len(d.indices) == len(d.tparams) } +// String returns a string representation for a tparamsList. For debugging. +func (d *tparamsList) String() string { + var buf bytes.Buffer + buf.WriteByte('[') + for i, tname := range d.tparams { + if i > 0 { + buf.WriteString(", ") + } + writeType(&buf, tname.typ, nil, nil) + buf.WriteString(": ") + writeType(&buf, d.at(i), nil, nil) + } + buf.WriteByte(']') + return buf.String() +} + // init initializes d with the given type parameters. // The type parameters must be in the order in which they appear in their declaration // (this ensures that the tparams indices match the respective type parameter index). diff --git a/src/cmd/compile/internal/types2/universe.go b/src/cmd/compile/internal/types2/universe.go index 3654ab49459..76d4e55e84b 100644 --- a/src/cmd/compile/internal/types2/universe.go +++ b/src/cmd/compile/internal/types2/universe.go @@ -136,9 +136,11 @@ const ( _Recover // package unsafe + _Add _Alignof _Offsetof _Sizeof + _Slice // testing support _Assert @@ -167,9 +169,11 @@ var predeclaredFuncs = [...]struct { _Real: {"real", 1, false, expression}, _Recover: {"recover", 0, false, statement}, + _Add: {"Add", 2, false, expression}, _Alignof: {"Alignof", 1, false, expression}, _Offsetof: {"Offsetof", 1, false, expression}, _Sizeof: {"Sizeof", 1, false, expression}, + _Slice: {"Slice", 2, false, expression}, _Assert: {"assert", 1, false, statement}, _Trace: {"trace", 0, true, statement}, diff --git a/src/cmd/compile/internal/types2/version.go b/src/cmd/compile/internal/types2/version.go index cb497f048e1..d9d18b6f7a7 100644 --- a/src/cmd/compile/internal/types2/version.go +++ b/src/cmd/compile/internal/types2/version.go @@ -21,7 +21,7 @@ func (check *Checker) langCompat(lit *syntax.BasicLit) { } // len(s) > 2 if strings.Contains(s, "_") { - check.errorf(lit, "underscores in numeric literals requires go1.13 or later") + check.error(lit, "underscores in numeric literals requires go1.13 or later") return } if s[0] != '0' { @@ -29,15 +29,15 @@ func (check *Checker) langCompat(lit *syntax.BasicLit) { } radix := s[1] if radix == 'b' || radix == 'B' { - check.errorf(lit, "binary literals requires go1.13 or later") + check.error(lit, "binary literals requires go1.13 or later") return } if radix == 'o' || radix == 'O' { - check.errorf(lit, "0o/0O-style octal literals requires go1.13 or later") + check.error(lit, "0o/0O-style octal literals requires go1.13 or later") return } if lit.Kind != syntax.IntLit && (radix == 'x' || radix == 'X') { - check.errorf(lit, "hexadecimal floating-point literals requires go1.13 or later") + check.error(lit, "hexadecimal floating-point literals requires go1.13 or later") } } diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go index 230b544148e..6d697a53ae3 100644 --- a/src/cmd/compile/internal/walk/assign.go +++ b/src/cmd/compile/internal/walk/assign.go @@ -157,15 +157,7 @@ func walkAssignMapRead(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { t := r.X.Type() fast := mapfast(t) - var key ir.Node - if fast != mapslow { - // fast versions take key by value - key = r.Index - } else { - // standard version takes key by reference - // order.expr made sure key is addressable. - key = typecheck.NodAddr(r.Index) - } + key := mapKeyArg(fast, r, r.Index) // from: // a,b = m[i] @@ -176,10 +168,10 @@ func walkAssignMapRead(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { var call *ir.CallExpr if w := t.Elem().Width; w <= zeroValSize { - fn := mapfn(mapaccess2[fast], t) + fn := mapfn(mapaccess2[fast], t, false) call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key) } else { - fn := mapfn("mapaccess2_fat", t) + fn := mapfn("mapaccess2_fat", t, true) z := reflectdata.ZeroAddr(w) call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key, z) } @@ -270,7 +262,7 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { } res := ir.NewResultExpr(base.Pos, nil, types.BADWIDTH) - res.Offset = base.Ctxt.FixedFrameSize() + r.Offset + res.Index = int64(i) res.SetType(r.Type) res.SetTypecheck(1) @@ -330,6 +322,13 @@ func ascompatee(op ir.Op, nl, nr []ir.Node) []ir.Node { // Save subexpressions needed on left side. // Drill through non-dereferences. for { + // If an expression has init statements, they must be evaluated + // before any of its saved sub-operands (#45706). + // TODO(mdempsky): Disallow init statements on lvalues. + init := ir.TakeInit(l) + walkStmtList(init) + early.Append(init...) + switch ll := l.(type) { case *ir.IndexExpr: if ll.X.Type().IsArray() { @@ -558,7 +557,7 @@ func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node { return s } -// isAppendOfMake reports whether n is of the form append(x , make([]T, y)...). +// isAppendOfMake reports whether n is of the form append(x, make([]T, y)...). // isAppendOfMake assumes n has already been typechecked. func isAppendOfMake(n ir.Node) bool { if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting { diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go index 97f9de9c1df..62eb4298f4d 100644 --- a/src/cmd/compile/internal/walk/builtin.go +++ b/src/cmd/compile/internal/walk/builtin.go @@ -214,10 +214,7 @@ func walkDelete(init *ir.Nodes, n *ir.CallExpr) ir.Node { t := map_.Type() fast := mapfast(t) - if fast == mapslow { - // order.stmt made sure key is addressable. - key = typecheck.NodAddr(key) - } + key = mapKeyArg(fast, n, key) return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.TypePtr(t), map_, key) } @@ -424,16 +421,13 @@ func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node { fnname = "makeslice" argtype = types.Types[types.TINT] } - - m := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) - m.SetType(t) - fn := typecheck.LookupRuntime(fnname) - m.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype)) - m.Ptr.MarkNonNil() - m.Len = typecheck.Conv(len, types.Types[types.TINT]) - m.Cap = typecheck.Conv(cap, types.Types[types.TINT]) - return walkExpr(typecheck.Expr(m), init) + ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype)) + ptr.MarkNonNil() + len = typecheck.Conv(len, types.Types[types.TINT]) + cap = typecheck.Conv(cap, types.Types[types.TINT]) + sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, len, cap) + return walkExpr(typecheck.Expr(sh), init) } // walkMakeSliceCopy walks an OMAKESLICECOPY node. @@ -462,12 +456,9 @@ func walkMakeSliceCopy(n *ir.MakeExpr, init *ir.Nodes) ir.Node { // instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer fn := typecheck.LookupRuntime("mallocgc") - sh := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) - sh.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, typecheck.NodNil(), ir.NewBool(false)) - sh.Ptr.MarkNonNil() - sh.Len = length - sh.Cap = length - sh.SetType(t) + ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, typecheck.NodNil(), ir.NewBool(false)) + ptr.MarkNonNil() + sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, length, length) s := typecheck.Temp(t) r := typecheck.Stmt(ir.NewAssignStmt(base.Pos, s, sh)) @@ -485,13 +476,10 @@ func walkMakeSliceCopy(n *ir.MakeExpr, init *ir.Nodes) ir.Node { // Replace make+copy with runtime.makeslicecopy. // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer fn := typecheck.LookupRuntime("makeslicecopy") - s := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil) - s.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR])) - s.Ptr.MarkNonNil() - s.Len = length - s.Cap = length - s.SetType(t) - return walkExpr(typecheck.Expr(s), init) + ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR])) + ptr.MarkNonNil() + sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, length, length) + return walkExpr(typecheck.Expr(sh), init) } // walkNew walks an ONEW node. @@ -653,6 +641,57 @@ func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { return walkStmt(typecheck.Stmt(r)) } +// walkRecover walks an ORECOVER node. +func walkRecover(nn *ir.CallExpr, init *ir.Nodes) ir.Node { + // Call gorecover with the FP of this frame. + // FP is equal to caller's SP plus FixedFrameSize(). + var fp ir.Node = mkcall("getcallersp", types.Types[types.TUINTPTR], init) + if off := base.Ctxt.FixedFrameSize(); off != 0 { + fp = ir.NewBinaryExpr(fp.Pos(), ir.OADD, fp, ir.NewInt(off)) + } + fp = ir.NewConvExpr(fp.Pos(), ir.OCONVNOP, types.NewPtr(types.Types[types.TINT32]), fp) + return mkcall("gorecover", nn.Type(), init, fp) +} + +func walkUnsafeSlice(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { + len := safeExpr(n.Y, init) + + fnname := "unsafeslice64" + argtype := types.Types[types.TINT64] + + // Type checking guarantees that TIDEAL len/cap are positive and fit in an int. + // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT + // will be handled by the negative range checks in unsafeslice during runtime. + if len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size() { + fnname = "unsafeslice" + argtype = types.Types[types.TINT] + } + + t := n.Type() + + // Call runtime.unsafeslice[64] to check that the length argument is + // non-negative and smaller than the max length allowed for the + // element type. + fn := typecheck.LookupRuntime(fnname) + init.Append(mkcall1(fn, nil, init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(len, argtype))) + + ptr := walkExpr(n.X, init) + + c := ir.NewUnaryExpr(n.Pos(), ir.OCHECKNIL, ptr) + c.SetTypecheck(1) + init.Append(c) + + // TODO(mdempsky): checkptr instrumentation. Maybe merge into length + // check above, along with nil check? Need to be careful about + // notinheap pointers though: can't pass them as unsafe.Pointer. + + h := ir.NewSliceHeaderExpr(n.Pos(), t, + typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]), + typecheck.Conv(len, types.Types[types.TINT]), + typecheck.Conv(len, types.Types[types.TINT])) + return walkExpr(typecheck.Expr(h), init) +} + func badtype(op ir.Op, tl, tr *types.Type) { var s string if tl != nil { diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go index d7d61058167..2194e1c5b0c 100644 --- a/src/cmd/compile/internal/walk/closure.go +++ b/src/cmd/compile/internal/walk/closure.go @@ -37,6 +37,14 @@ func directClosureCall(n *ir.CallExpr) { return // leave for walkClosure to handle } + // If wrapGoDefer() in the order phase has flagged this call, + // avoid eliminating the closure even if there is a direct call to + // (the closure is needed to simplify the register ABI). See + // wrapGoDefer for more details. + if n.PreserveClosure { + return + } + // We are going to insert captured variables before input args. var params []*types.Field var decls []*ir.Name diff --git a/src/cmd/compile/internal/walk/compare.go b/src/cmd/compile/internal/walk/compare.go index f4b5387c061..b18615f61a3 100644 --- a/src/cmd/compile/internal/walk/compare.go +++ b/src/cmd/compile/internal/walk/compare.go @@ -426,6 +426,7 @@ func eqFor(t *types.Type) (n ir.Node, needsize bool) { return n, true case types.ASPECIAL: sym := reflectdata.TypeSymPrefix(".eq", t) + // TODO(austin): This creates an ir.Name with a nil Func. n := typecheck.NewName(sym) ir.MarkFunc(n) n.SetType(types.NewSignature(types.NoPkg, nil, nil, []*types.Field{ diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go index 73442dc404c..abd920d6461 100644 --- a/src/cmd/compile/internal/walk/complit.go +++ b/src/cmd/compile/internal/walk/complit.go @@ -55,11 +55,12 @@ func (c initContext) String() string { return "inNonInitFunction" } -// readonlystaticname returns a name backed by a (writable) static data symbol. +// readonlystaticname returns a name backed by a read-only static data symbol. func readonlystaticname(t *types.Type) *ir.Name { n := staticinit.StaticName(t) n.MarkReadonly() n.Linksym().Set(obj.AttrContentAddressable, true) + n.Linksym().Set(obj.AttrLocal, true) return n } @@ -474,7 +475,10 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { zero := ir.NewAssignStmt(base.Pos, i, ir.NewInt(0)) cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(tk.NumElem())) incr := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(1))) - body := ir.NewAssignStmt(base.Pos, lhs, rhs) + + var body ir.Node = ir.NewAssignStmt(base.Pos, lhs, rhs) + body = typecheck.Stmt(body) // typechecker rewrites OINDEX to OINDEXMAP + body = orderStmtInPlace(body, map[string][]*ir.Name{}) loop := ir.NewForStmt(base.Pos, nil, cond, incr, nil) loop.Body = []ir.Node{body} @@ -502,7 +506,10 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpelem, elem)) ir.SetPos(tmpelem) - appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, tmpkey), tmpelem)) + var a ir.Node = ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, tmpkey), tmpelem) + a = typecheck.Stmt(a) // typechecker rewrites OINDEX to OINDEXMAP + a = orderStmtInPlace(a, map[string][]*ir.Name{}) + appendWalkStmt(init, a) } appendWalkStmt(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, tmpkey)) diff --git a/src/cmd/compile/internal/walk/convert.go b/src/cmd/compile/internal/walk/convert.go index fa8e2c0bb8d..26e17a126f2 100644 --- a/src/cmd/compile/internal/walk/convert.go +++ b/src/cmd/compile/internal/walk/convert.go @@ -135,7 +135,7 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node { return e } - fnname, needsaddr := convFuncName(fromType, toType) + fnname, argType, needsaddr := convFuncName(fromType, toType) if !needsaddr && !fromType.IsInterface() { // Use a specialized conversion routine that only returns a data pointer. @@ -143,10 +143,29 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node { // e = iface{typ/tab, ptr} fn := typecheck.LookupRuntime(fnname) types.CalcSize(fromType) - fn = typecheck.SubstArgTypes(fn, fromType) - types.CalcSize(fn.Type()) + + arg := n.X + switch { + case fromType == argType: + // already in the right type, nothing to do + case fromType.Kind() == argType.Kind(), + fromType.IsPtrShaped() && argType.IsPtrShaped(): + // can directly convert (e.g. named type to underlying type, or one pointer to another) + arg = ir.NewConvExpr(n.Pos(), ir.OCONVNOP, argType, arg) + case fromType.IsInteger() && argType.IsInteger(): + // can directly convert (e.g. int32 to uint32) + arg = ir.NewConvExpr(n.Pos(), ir.OCONV, argType, arg) + default: + // unsafe cast through memory + arg = copyExpr(arg, arg.Type(), init) + var addr ir.Node = typecheck.NodAddr(arg) + addr = ir.NewConvExpr(n.Pos(), ir.OCONVNOP, argType.PtrTo(), addr) + arg = ir.NewStarExpr(n.Pos(), addr) + arg.SetType(argType) + } + call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) - call.Args = []ir.Node{n.X} + call.Args = []ir.Node{arg} e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), safeExpr(walkExpr(typecheck.Expr(call), init), init)) e.SetType(toType) e.SetTypecheck(1) @@ -294,44 +313,45 @@ func walkStringToRunes(n *ir.ConvExpr, init *ir.Nodes) ir.Node { } // convFuncName builds the runtime function name for interface conversion. -// It also reports whether the function expects the data by address. +// It also returns the argument type that the runtime function takes, and +// whether the function expects the data by address. // Not all names are possible. For example, we never generate convE2E or convE2I. -func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) { +func convFuncName(from, to *types.Type) (fnname string, argType *types.Type, needsaddr bool) { tkind := to.Tie() switch from.Tie() { case 'I': if tkind == 'I' { - return "convI2I", false + return "convI2I", types.Types[types.TINTER], false } case 'T': switch { case from.Size() == 2 && from.Align == 2: - return "convT16", false + return "convT16", types.Types[types.TUINT16], false case from.Size() == 4 && from.Align == 4 && !from.HasPointers(): - return "convT32", false + return "convT32", types.Types[types.TUINT32], false case from.Size() == 8 && from.Align == types.Types[types.TUINT64].Align && !from.HasPointers(): - return "convT64", false + return "convT64", types.Types[types.TUINT64], false } if sc := from.SoleComponent(); sc != nil { switch { case sc.IsString(): - return "convTstring", false + return "convTstring", types.Types[types.TSTRING], false case sc.IsSlice(): - return "convTslice", false + return "convTslice", types.NewSlice(types.Types[types.TUINT8]), false // the element type doesn't matter } } switch tkind { case 'E': if !from.HasPointers() { - return "convT2Enoptr", true + return "convT2Enoptr", types.Types[types.TUNSAFEPTR], true } - return "convT2E", true + return "convT2E", types.Types[types.TUNSAFEPTR], true case 'I': if !from.HasPointers() { - return "convT2Inoptr", true + return "convT2Inoptr", types.Types[types.TUNSAFEPTR], true } - return "convT2I", true + return "convT2I", types.Types[types.TUNSAFEPTR], true } } base.Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie()) diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 7b65db51006..2fb907710bb 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -7,6 +7,7 @@ package walk import ( "fmt" "go/constant" + "internal/buildcfg" "strings" "cmd/compile/internal/base" @@ -16,7 +17,6 @@ import ( "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" - "cmd/internal/objabi" ) // The result of walkExpr MUST be assigned back to n, e.g. @@ -117,12 +117,17 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node { n.X = walkExpr(n.X, init) return n - case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH: + case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH, + ir.OUNSAFEADD: n := n.(*ir.BinaryExpr) n.X = walkExpr(n.X, init) n.Y = walkExpr(n.Y, init) return n + case ir.OUNSAFESLICE: + n := n.(*ir.BinaryExpr) + return walkUnsafeSlice(n, init) + case ir.ODOT, ir.ODOTPTR: n := n.(*ir.SelectorExpr) return walkDot(n, init) @@ -157,8 +162,7 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node { return mkcall("gopanic", nil, init, n.X) case ir.ORECOVER: - n := n.(*ir.CallExpr) - return mkcall("gorecover", n.Type(), init, typecheck.NodAddr(ir.RegFP)) + return walkRecover(n.(*ir.CallExpr), init) case ir.OCFUNC: return n @@ -206,6 +210,11 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node { n := n.(*ir.ConvExpr) return walkConv(n, init) + case ir.OSLICE2ARRPTR: + n := n.(*ir.ConvExpr) + n.X = walkExpr(n.X, init) + return n + case ir.ODIV, ir.OMOD: n := n.(*ir.BinaryExpr) return walkDivMod(n, init) @@ -493,6 +502,43 @@ func walkCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { directClosureCall(n) } + if isFuncPCIntrinsic(n) { + // For internal/abi.FuncPCABIxxx(fn), if fn is a defined function, rewrite + // it to the address of the function of the ABI fn is defined. + name := n.X.(*ir.Name).Sym().Name + arg := n.Args[0] + var wantABI obj.ABI + switch name { + case "FuncPCABI0": + wantABI = obj.ABI0 + case "FuncPCABIInternal": + wantABI = obj.ABIInternal + } + if isIfaceOfFunc(arg) { + fn := arg.(*ir.ConvExpr).X.(*ir.Name) + abi := fn.Func.ABI + if abi != wantABI { + base.ErrorfAt(n.Pos(), "internal/abi.%s expects an %v function, %s is defined as %v", name, wantABI, fn.Sym().Name, abi) + } + var e ir.Node = ir.NewLinksymExpr(n.Pos(), fn.Sym().LinksymABI(abi), types.Types[types.TUINTPTR]) + e = ir.NewAddrExpr(n.Pos(), e) + e.SetType(types.Types[types.TUINTPTR].PtrTo()) + e = ir.NewConvExpr(n.Pos(), ir.OCONVNOP, n.Type(), e) + return e + } + // fn is not a defined function. It must be ABIInternal. + // Read the address from func value, i.e. *(*uintptr)(idata(fn)). + if wantABI != obj.ABIInternal { + base.ErrorfAt(n.Pos(), "internal/abi.%s does not accept func expression, which is ABIInternal", name) + } + arg = walkExpr(arg, init) + var e ir.Node = ir.NewUnaryExpr(n.Pos(), ir.OIDATA, arg) + e.SetType(n.Type().PtrTo()) + e = ir.NewStarExpr(n.Pos(), e) + e.SetType(n.Type()) + return e + } + walkCall1(n, init) return n } @@ -670,6 +716,29 @@ func walkIndex(n *ir.IndexExpr, init *ir.Nodes) ir.Node { return n } +// mapKeyArg returns an expression for key that is suitable to be passed +// as the key argument for mapaccess and mapdelete functions. +// n is is the map indexing or delete Node (to provide Pos). +// Note: this is not used for mapassign, which does distinguish pointer vs. +// integer key. +func mapKeyArg(fast int, n, key ir.Node) ir.Node { + switch fast { + case mapslow: + // standard version takes key by reference. + // order.expr made sure key is addressable. + return typecheck.NodAddr(key) + case mapfast32ptr: + // mapaccess and mapdelete don't distinguish pointer vs. integer key. + return ir.NewConvExpr(n.Pos(), ir.OCONVNOP, types.Types[types.TUINT32], key) + case mapfast64ptr: + // mapaccess and mapdelete don't distinguish pointer vs. integer key. + return ir.NewConvExpr(n.Pos(), ir.OCONVNOP, types.Types[types.TUINT64], key) + default: + // fast version takes key by value. + return key + } +} + // walkIndexMap walks an OINDEXMAP node. func walkIndexMap(n *ir.IndexExpr, init *ir.Nodes) ir.Node { // Replace m[k] with *map{access1,assign}(maptype, m, &k) @@ -687,21 +756,16 @@ func walkIndexMap(n *ir.IndexExpr, init *ir.Nodes) ir.Node { // order.expr made sure key is addressable. key = typecheck.NodAddr(key) } - call = mkcall1(mapfn(mapassign[fast], t), nil, init, reflectdata.TypePtr(t), map_, key) + call = mkcall1(mapfn(mapassign[fast], t, false), nil, init, reflectdata.TypePtr(t), map_, key) } else { // m[k] is not the target of an assignment. fast := mapfast(t) - if fast == mapslow { - // standard version takes key by reference. - // order.expr made sure key is addressable. - key = typecheck.NodAddr(key) - } - + key = mapKeyArg(fast, n, key) if w := t.Elem().Width; w <= zeroValSize { - call = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key) + call = mkcall1(mapfn(mapaccess1[fast], t, false), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key) } else { z := reflectdata.ZeroAddr(w) - call = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key, z) + call = mkcall1(mapfn("mapaccess1_fat", t, true), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key, z) } } call.SetType(types.NewPtr(t.Elem())) @@ -924,7 +988,7 @@ func usemethod(n *ir.CallExpr) { } func usefield(n *ir.SelectorExpr) { - if objabi.Fieldtrack_enabled == 0 { + if !buildcfg.Experiment.FieldTrack { return } diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index fe0b6a0eff4..b733d3a29f6 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -6,6 +6,8 @@ package walk import ( "fmt" + "go/constant" + "internal/buildcfg" "cmd/compile/internal/base" "cmd/compile/internal/escape" @@ -268,10 +270,52 @@ func (o *orderState) addrTemp(n ir.Node) ir.Node { func (o *orderState) mapKeyTemp(t *types.Type, n ir.Node) ir.Node { // Most map calls need to take the address of the key. // Exception: map*_fast* calls. See golang.org/issue/19015. - if mapfast(t) == mapslow { + alg := mapfast(t) + if alg == mapslow { return o.addrTemp(n) } - return n + var kt *types.Type + switch alg { + case mapfast32: + kt = types.Types[types.TUINT32] + case mapfast64: + kt = types.Types[types.TUINT64] + case mapfast32ptr, mapfast64ptr: + kt = types.Types[types.TUNSAFEPTR] + case mapfaststr: + kt = types.Types[types.TSTRING] + } + nt := n.Type() + switch { + case nt == kt: + return n + case nt.Kind() == kt.Kind(), nt.IsPtrShaped() && kt.IsPtrShaped(): + // can directly convert (e.g. named type to underlying type, or one pointer to another) + return typecheck.Expr(ir.NewConvExpr(n.Pos(), ir.OCONVNOP, kt, n)) + case nt.IsInteger() && kt.IsInteger(): + // can directly convert (e.g. int32 to uint32) + if n.Op() == ir.OLITERAL && nt.IsSigned() { + // avoid constant overflow error + n = ir.NewConstExpr(constant.MakeUint64(uint64(ir.Int64Val(n))), n) + n.SetType(kt) + return n + } + return typecheck.Expr(ir.NewConvExpr(n.Pos(), ir.OCONV, kt, n)) + default: + // Unsafe cast through memory. + // We'll need to do a load with type kt. Create a temporary of type kt to + // ensure sufficient alignment. nt may be under-aligned. + if kt.Align < nt.Align { + base.Fatalf("mapKeyTemp: key type is not sufficiently aligned, kt=%v nt=%v", kt, nt) + } + tmp := o.newTemp(kt, true) + // *(*nt)(&tmp) = n + var e ir.Node = typecheck.NodAddr(tmp) + e = ir.NewConvExpr(n.Pos(), ir.OCONVNOP, nt.PtrTo(), e) + e = ir.NewStarExpr(n.Pos(), e) + o.append(ir.NewAssignStmt(base.Pos, e, n)) + return tmp + } } // mapKeyReplaceStrConv replaces OBYTES2STR by OBYTES2STRTMP @@ -500,6 +544,14 @@ func (o *orderState) call(nn ir.Node) { n := nn.(*ir.CallExpr) typecheck.FixVariadicCall(n) + + if isFuncPCIntrinsic(n) && isIfaceOfFunc(n.Args[0]) { + // For internal/abi.FuncPCABIxxx(fn), if fn is a defined function, + // do not introduce temporaries here, so it is easier to rewrite it + // to symbol address reference later in walk. + return + } + n.X = o.expr(n.X, nil) o.exprList(n.Args) @@ -731,6 +783,16 @@ func (o *orderState) stmt(n ir.Node) { t := o.markTemp() o.init(n.Call) o.call(n.Call) + if n.Call.Op() == ir.ORECOVER { + // Special handling of "defer recover()". We need to evaluate the FP + // argument before wrapping. + var init ir.Nodes + n.Call = walkRecover(n.Call.(*ir.CallExpr), &init) + o.stmtList(init) + } + if buildcfg.Experiment.RegabiDefer { + o.wrapGoDefer(n) + } o.out = append(o.out, n) o.cleanTemp(t) @@ -1136,7 +1198,7 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node { if n.X.Type().IsInterface() { return n } - if _, needsaddr := convFuncName(n.X.Type(), n.Type()); needsaddr || isStaticCompositeLiteral(n.X) { + if _, _, needsaddr := convFuncName(n.X.Type(), n.Type()); needsaddr || isStaticCompositeLiteral(n.X) { // Need a temp if we need to pass the address to the conversion function. // We also process static composite literal node here, making a named static global // whose address we can put directly in an interface (see OCONVIFACE case in walk). @@ -1435,3 +1497,325 @@ func (o *orderState) as2ok(n *ir.AssignListStmt) { o.out = append(o.out, n) o.stmt(typecheck.Stmt(as)) } + +var wrapGoDefer_prgen int + +// wrapGoDefer wraps the target of a "go" or "defer" statement with a +// new "function with no arguments" closure. Specifically, it converts +// +// defer f(x, y) +// +// to +// +// x1, y1 := x, y +// defer func() { f(x1, y1) }() +// +// This is primarily to enable a quicker bringup of defers under the +// new register ABI; by doing this conversion, we can simplify the +// code in the runtime that invokes defers on the panic path. +func (o *orderState) wrapGoDefer(n *ir.GoDeferStmt) { + call := n.Call + + var callX ir.Node // thing being called + var callArgs []ir.Node // call arguments + var keepAlive []*ir.Name // KeepAlive list from call, if present + + // A helper to recreate the call within the closure. + var mkNewCall func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node + + // Defer calls come in many shapes and sizes; not all of them + // are ir.CallExpr's. Examine the type to see what we're dealing with. + switch x := call.(type) { + case *ir.CallExpr: + callX = x.X + callArgs = x.Args + keepAlive = x.KeepAlive + mkNewCall = func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node { + newcall := ir.NewCallExpr(pos, op, fun, args) + newcall.IsDDD = x.IsDDD + return ir.Node(newcall) + } + case *ir.UnaryExpr: // ex: OCLOSE + callArgs = []ir.Node{x.X} + mkNewCall = func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node { + if len(args) != 1 { + panic("internal error, expecting single arg") + } + return ir.Node(ir.NewUnaryExpr(pos, op, args[0])) + } + case *ir.BinaryExpr: // ex: OCOPY + callArgs = []ir.Node{x.X, x.Y} + mkNewCall = func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node { + if len(args) != 2 { + panic("internal error, expecting two args") + } + return ir.Node(ir.NewBinaryExpr(pos, op, args[0], args[1])) + } + default: + panic("unhandled op") + } + + // No need to wrap if called func has no args, no receiver, and no results. + // However in the case of "defer func() { ... }()" we need to + // protect against the possibility of directClosureCall rewriting + // things so that the call does have arguments. + // + // Do wrap method calls (OCALLMETH, OCALLINTER), because it has + // a receiver. + // + // Also do wrap builtin functions, because they may be expanded to + // calls with arguments (e.g. ORECOVER). + // + // TODO: maybe not wrap if the called function has no arguments and + // only in-register results? + if len(callArgs) == 0 && call.Op() == ir.OCALLFUNC && callX.Type().NumResults() == 0 { + if c, ok := call.(*ir.CallExpr); ok && callX != nil && callX.Op() == ir.OCLOSURE { + cloFunc := callX.(*ir.ClosureExpr).Func + cloFunc.SetClosureCalled(false) + c.PreserveClosure = true + } + return + } + + if c, ok := call.(*ir.CallExpr); ok { + // To simplify things, turn f(a, b, []T{c, d, e}...) back + // into f(a, b, c, d, e) -- when the final call is run through the + // type checker below, it will rebuild the proper slice literal. + undoVariadic(c) + callX = c.X + callArgs = c.Args + } + + // This is set to true if the closure we're generating escapes + // (needs heap allocation). + cloEscapes := func() bool { + if n.Op() == ir.OGO { + // For "go", assume that all closures escape. + return true + } + // For defer, just use whatever result escape analysis + // has determined for the defer. + return n.Esc() != ir.EscNever + }() + + // A helper for making a copy of an argument. Note that it is + // not safe to use o.copyExpr(arg) if we're putting a + // reference to the temp into the closure (as opposed to + // copying it in by value), since in the by-reference case we + // need a temporary whose lifetime extends to the end of the + // function (as opposed to being local to the current block or + // statement being ordered). + mkArgCopy := func(arg ir.Node) *ir.Name { + t := arg.Type() + byval := t.Size() <= 128 || cloEscapes + var argCopy *ir.Name + if byval { + argCopy = o.copyExpr(arg) + } else { + argCopy = typecheck.Temp(t) + o.append(ir.NewAssignStmt(base.Pos, argCopy, arg)) + } + // The value of 128 below is meant to be consistent with code + // in escape analysis that picks byval/byaddr based on size. + argCopy.SetByval(byval) + return argCopy + } + + // getUnsafeArg looks for an unsafe.Pointer arg that has been + // previously captured into the call's keepalive list, returning + // the name node for it if found. + getUnsafeArg := func(arg ir.Node) *ir.Name { + // Look for uintptr(unsafe.Pointer(name)) + if arg.Op() != ir.OCONVNOP { + return nil + } + if !arg.Type().IsUintptr() { + return nil + } + if !arg.(*ir.ConvExpr).X.Type().IsUnsafePtr() { + return nil + } + arg = arg.(*ir.ConvExpr).X + argname, ok := arg.(*ir.Name) + if !ok { + return nil + } + for i := range keepAlive { + if argname == keepAlive[i] { + return argname + } + } + return nil + } + + // Copy the arguments to the function into temps. + // + // For calls with uintptr(unsafe.Pointer(...)) args that are being + // kept alive (see code in (*orderState).call that does this), use + // the existing arg copy instead of creating a new copy. + unsafeArgs := make([]*ir.Name, len(callArgs)) + origArgs := callArgs + var newNames []*ir.Name + for i := range callArgs { + arg := callArgs[i] + var argname *ir.Name + unsafeArgName := getUnsafeArg(arg) + if unsafeArgName != nil { + // arg has been copied already, use keepalive copy + argname = unsafeArgName + unsafeArgs[i] = unsafeArgName + } else { + argname = mkArgCopy(arg) + } + newNames = append(newNames, argname) + } + + // Deal with cases where the function expression (what we're + // calling) is not a simple function symbol. + var fnExpr *ir.Name + var methSelectorExpr *ir.SelectorExpr + if callX != nil { + switch { + case callX.Op() == ir.ODOTMETH || callX.Op() == ir.ODOTINTER: + // Handle defer of a method call, e.g. "defer v.MyMethod(x, y)" + n := callX.(*ir.SelectorExpr) + n.X = mkArgCopy(n.X) + methSelectorExpr = n + if callX.Op() == ir.ODOTINTER { + // Currently for "defer i.M()" if i is nil it panics at the + // point of defer statement, not when deferred function is called. + // (I think there is an issue discussing what is the intended + // behavior but I cannot find it.) + // We need to do the nil check outside of the wrapper. + tab := typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, n.X)) + c := ir.NewUnaryExpr(n.Pos(), ir.OCHECKNIL, tab) + c.SetTypecheck(1) + o.append(c) + } + case !(callX.Op() == ir.ONAME && callX.(*ir.Name).Class == ir.PFUNC): + // Deal with "defer returnsafunc()(x, y)" (for + // example) by copying the callee expression. + fnExpr = mkArgCopy(callX) + if callX.Op() == ir.OCLOSURE { + // For "defer func(...)", in addition to copying the + // closure into a temp, mark it as no longer directly + // called. + callX.(*ir.ClosureExpr).Func.SetClosureCalled(false) + } + } + } + + // Create a new no-argument function that we'll hand off to defer. + var noFuncArgs []*ir.Field + noargst := ir.NewFuncType(base.Pos, nil, noFuncArgs, nil) + wrapGoDefer_prgen++ + outerfn := ir.CurFunc + wrapname := fmt.Sprintf("%v·dwrap·%d", outerfn, wrapGoDefer_prgen) + sym := types.LocalPkg.Lookup(wrapname) + fn := typecheck.DeclFunc(sym, noargst) + fn.SetIsHiddenClosure(true) + fn.SetWrapper(true) + + // helper for capturing reference to a var declared in an outer scope. + capName := func(pos src.XPos, fn *ir.Func, n *ir.Name) *ir.Name { + t := n.Type() + cv := ir.CaptureName(pos, fn, n) + cv.SetType(t) + return typecheck.Expr(cv).(*ir.Name) + } + + // Call args (x1, y1) need to be captured as part of the newly + // created closure. + newCallArgs := []ir.Node{} + for i := range newNames { + var arg ir.Node + arg = capName(callArgs[i].Pos(), fn, newNames[i]) + if unsafeArgs[i] != nil { + arg = ir.NewConvExpr(arg.Pos(), origArgs[i].Op(), origArgs[i].Type(), arg) + } + newCallArgs = append(newCallArgs, arg) + } + // Also capture the function or method expression (if needed) into + // the closure. + if fnExpr != nil { + callX = capName(callX.Pos(), fn, fnExpr) + } + if methSelectorExpr != nil { + methSelectorExpr.X = capName(callX.Pos(), fn, methSelectorExpr.X.(*ir.Name)) + } + ir.FinishCaptureNames(n.Pos(), outerfn, fn) + + // This flags a builtin as opposed to a regular call. + irregular := (call.Op() != ir.OCALLFUNC && + call.Op() != ir.OCALLMETH && + call.Op() != ir.OCALLINTER) + + // Construct new function body: f(x1, y1) + op := ir.OCALL + if irregular { + op = call.Op() + } + newcall := mkNewCall(call.Pos(), op, callX, newCallArgs) + + // Type-check the result. + if !irregular { + typecheck.Call(newcall.(*ir.CallExpr)) + } else { + typecheck.Stmt(newcall) + } + + // Finalize body, register function on the main decls list. + fn.Body = []ir.Node{newcall} + typecheck.FinishFuncBody() + typecheck.Func(fn) + typecheck.Target.Decls = append(typecheck.Target.Decls, fn) + + // Create closure expr + clo := ir.NewClosureExpr(n.Pos(), fn) + fn.OClosure = clo + clo.SetType(fn.Type()) + + // Set escape properties for closure. + if n.Op() == ir.OGO { + // For "go", assume that the closure is going to escape + // (with an exception for the runtime, which doesn't + // permit heap-allocated closures). + if base.Ctxt.Pkgpath != "runtime" { + clo.SetEsc(ir.EscHeap) + } + } else { + // For defer, just use whatever result escape analysis + // has determined for the defer. + if n.Esc() == ir.EscNever { + clo.SetTransient(true) + clo.SetEsc(ir.EscNone) + } + } + + // Create new top level call to closure over argless function. + topcall := ir.NewCallExpr(n.Pos(), ir.OCALL, clo, []ir.Node{}) + typecheck.Call(topcall) + + // Tag the call to insure that directClosureCall doesn't undo our work. + topcall.PreserveClosure = true + + fn.SetClosureCalled(false) + + // Finally, point the defer statement at the newly generated call. + n.Call = topcall +} + +// isFuncPCIntrinsic returns whether n is a direct call of internal/abi.FuncPCABIxxx functions. +func isFuncPCIntrinsic(n *ir.CallExpr) bool { + if n.Op() != ir.OCALLFUNC || n.X.Op() != ir.ONAME { + return false + } + fn := n.X.(*ir.Name).Sym() + return (fn.Name == "FuncPCABI0" || fn.Name == "FuncPCABIInternal") && + (fn.Pkg.Path == "internal/abi" || fn.Pkg == types.LocalPkg && base.Ctxt.Pkgpath == "internal/abi") +} + +// isIfaceOfFunc returns whether n is an interface conversion from a direct reference of a func. +func isIfaceOfFunc(n ir.Node) bool { + return n.Op() == ir.OCONVIFACE && n.(*ir.ConvExpr).X.Op() == ir.ONAME && n.(*ir.ConvExpr).X.(*ir.Name).Class == ir.PFUNC +} diff --git a/src/cmd/compile/internal/walk/race.go b/src/cmd/compile/internal/walk/race.go index 47cd2fdc222..859e5c57f06 100644 --- a/src/cmd/compile/internal/walk/race.go +++ b/src/cmd/compile/internal/walk/race.go @@ -7,11 +7,8 @@ package walk import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" - "cmd/compile/internal/ssagen" - "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/src" - "cmd/internal/sys" ) func instrument(fn *ir.Func) { @@ -26,26 +23,12 @@ func instrument(fn *ir.Func) { if base.Flag.Race { lno := base.Pos base.Pos = src.NoXPos - if ssagen.Arch.LinkArch.Arch.Family != sys.AMD64 { - fn.Enter.Prepend(mkcallstmt("racefuncenterfp")) - fn.Exit.Append(mkcallstmt("racefuncexit")) - } else { - - // nodpc is the PC of the caller as extracted by - // getcallerpc. We use -widthptr(FP) for x86. - // This only works for amd64. This will not - // work on arm or others that might support - // race in the future. - - nodpc := ir.NewNameAt(src.NoXPos, typecheck.Lookup(".fp")) - nodpc.Class = ir.PPARAM - nodpc.SetUsed(true) - nodpc.SetType(types.Types[types.TUINTPTR]) - nodpc.SetFrameOffset(int64(-types.PtrSize)) - fn.Dcl = append(fn.Dcl, nodpc) - fn.Enter.Prepend(mkcallstmt("racefuncenter", nodpc)) - fn.Exit.Append(mkcallstmt("racefuncexit")) + var init ir.Nodes + fn.Enter.Prepend(mkcallstmt("racefuncenter", mkcall("getcallerpc", types.Types[types.TUINTPTR], &init))) + if len(init) != 0 { + base.Fatalf("race walk: unexpected init for getcallerpc") } + fn.Exit.Append(mkcallstmt("racefuncexit")) base.Pos = lno } } diff --git a/src/cmd/compile/internal/walk/range.go b/src/cmd/compile/internal/walk/range.go index 5ab24b21884..b1169fdae8a 100644 --- a/src/cmd/compile/internal/walk/range.go +++ b/src/cmd/compile/internal/walk/range.go @@ -118,9 +118,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { tmp.SetBounded(true) // Use OAS2 to correctly handle assignments // of the form "v1, a[v1] := range". - a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - a.Lhs = []ir.Node{v1, v2} - a.Rhs = []ir.Node{hv1, tmp} + a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{v1, v2}, []ir.Node{hv1, tmp}) body = []ir.Node{a} break } @@ -148,9 +146,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { // Use OAS2 to correctly handle assignments // of the form "v1, a[v1] := range". - a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - a.Lhs = []ir.Node{v1, v2} - a.Rhs = []ir.Node{hv1, ir.NewStarExpr(base.Pos, hp)} + a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{v1, v2}, []ir.Node{hv1, ir.NewStarExpr(base.Pos, hp)}) body = append(body, a) // Advance pointer as part of the late increment. @@ -168,7 +164,9 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { hit := nrange.Prealloc th := hit.Type() - keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:MapIterType + // depends on layout of iterator struct. + // See cmd/compile/internal/reflectdata/reflect.go:MapIterType + keysym := th.Field(0).Sym elemsym := th.Field(1).Sym // ditto fn := typecheck.LookupRuntime("mapiterinit") @@ -188,9 +186,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, key)} } else { elem := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym)) - a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - a.Lhs = []ir.Node{v1, v2} - a.Rhs = []ir.Node{key, elem} + a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{v1, v2}, []ir.Node{key, elem}) body = []ir.Node{a} } @@ -206,10 +202,10 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { hb := typecheck.Temp(types.Types[types.TBOOL]) nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, hb, ir.NewBool(false)) - a := ir.NewAssignListStmt(base.Pos, ir.OAS2RECV, nil, nil) + lhs := []ir.Node{hv1, hb} + rhs := []ir.Node{ir.NewUnaryExpr(base.Pos, ir.ORECV, ha)} + a := ir.NewAssignListStmt(base.Pos, ir.OAS2RECV, lhs, rhs) a.SetTypecheck(1) - a.Lhs = []ir.Node{hv1, hb} - a.Rhs = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.ORECV, ha)} nfor.Cond = ir.InitExpr([]ir.Node{a}, nfor.Cond) if v1 == nil { body = nil @@ -268,24 +264,18 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(1)))} // } else { - eif := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - // hv2, hv1 = decoderune(ha, hv1) - eif.Lhs = []ir.Node{hv2, hv1} fn := typecheck.LookupRuntime("decoderune") - var fnInit ir.Nodes - eif.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), &fnInit, ha, hv1)} - fnInit.Append(eif) - nif.Else = fnInit + call := mkcall1(fn, fn.Type().Results(), &nif.Else, ha, hv1) + a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{hv2, hv1}, []ir.Node{call}) + nif.Else.Append(a) body = append(body, nif) if v1 != nil { if v2 != nil { // v1, v2 = hv1t, hv2 - a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) - a.Lhs = []ir.Node{v1, v2} - a.Rhs = []ir.Node{hv1t, hv2} + a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{v1, v2}, []ir.Node{hv1t, hv2}) body = append(body, a) } else { // v1 = hv1t @@ -431,7 +421,6 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node { // i = len(a) - 1 // } n := ir.NewIfStmt(base.Pos, nil, nil, nil) - n.Body = nil n.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(0)) // hp = &a[0] diff --git a/src/cmd/compile/internal/walk/select.go b/src/cmd/compile/internal/walk/select.go index 873be289dcc..d2b67ddf55a 100644 --- a/src/cmd/compile/internal/walk/select.go +++ b/src/cmd/compile/internal/walk/select.go @@ -106,7 +106,7 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node { ir.SetPos(n) r := ir.NewIfStmt(base.Pos, nil, nil, nil) *r.PtrInit() = cas.Init() - var call ir.Node + var cond ir.Node switch n.Op() { default: base.Fatalf("select %v", n.Op()) @@ -115,7 +115,7 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node { // if selectnbsend(c, v) { body } else { default body } n := n.(*ir.SendStmt) ch := n.Chan - call = mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Value) + cond = mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Value) case ir.OSELRECV2: n := n.(*ir.AssignListStmt) @@ -125,18 +125,14 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node { if ir.IsBlank(elem) { elem = typecheck.NodNil() } - if ir.IsBlank(n.Lhs[1]) { - // if selectnbrecv(&v, c) { body } else { default body } - call = mkcall1(chanfn("selectnbrecv", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, ch) - } else { - // TODO(cuonglm): make this use selectnbrecv() - // if selectnbrecv2(&v, &received, c) { body } else { default body } - receivedp := typecheck.Expr(typecheck.NodAddr(n.Lhs[1])) - call = mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch) - } + cond = typecheck.Temp(types.Types[types.TBOOL]) + fn := chanfn("selectnbrecv", 2, ch.Type()) + call := mkcall1(fn, fn.Type().Results(), r.PtrInit(), elem, ch) + as := ir.NewAssignListStmt(r.Pos(), ir.OAS2, []ir.Node{cond, n.Lhs[1]}, []ir.Node{call}) + r.PtrInit().Append(typecheck.Stmt(as)) } - r.Cond = typecheck.Expr(call) + r.Cond = typecheck.Expr(cond) r.Body = cas.Body r.Else = append(dflt.Init(), dflt.Body...) return []ir.Node{r, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)} diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go index 46a621c2ba7..0bf76680c46 100644 --- a/src/cmd/compile/internal/walk/stmt.go +++ b/src/cmd/compile/internal/walk/stmt.go @@ -197,11 +197,7 @@ func walkGoDefer(n *ir.GoDeferStmt) ir.Node { case ir.ODELETE: call := call.(*ir.CallExpr) - if mapfast(call.Args[0].Type()) == mapslow { - n.Call = wrapCall(call, &init) - } else { - n.Call = walkExpr(call, &init) - } + n.Call = wrapCall(call, &init) case ir.OCOPY: call := call.(*ir.BinaryExpr) @@ -233,6 +229,24 @@ func walkIf(n *ir.IfStmt) ir.Node { return n } +// Rewrite +// go builtin(x, y, z) +// into +// go func(a1, a2, a3) { +// builtin(a1, a2, a3) +// }(x, y, z) +// for print, println, and delete. +// +// Rewrite +// go f(x, y, uintptr(unsafe.Pointer(z))) +// into +// go func(a1, a2, a3) { +// f(a1, a2, uintptr(a3)) +// }(x, y, unsafe.Pointer(z)) +// for function contains unsafe-uintptr arguments. + +var wrapCall_prgen int + // The result of wrapCall MUST be assigned back to n, e.g. // n.Left = wrapCall(n.Left, init) func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { @@ -245,23 +259,25 @@ func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { // Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e). if !isBuiltinCall && n.IsDDD { - last := len(n.Args) - 1 - if va := n.Args[last]; va.Op() == ir.OSLICELIT { - va := va.(*ir.CompLitExpr) - n.Args = append(n.Args[:last], va.List...) - n.IsDDD = false - } + undoVariadic(n) + } + + wrapArgs := n.Args + // If there's a receiver argument, it needs to be passed through the wrapper too. + if n.Op() == ir.OCALLMETH || n.Op() == ir.OCALLINTER { + recv := n.X.(*ir.SelectorExpr).X + wrapArgs = append([]ir.Node{recv}, wrapArgs...) } // origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion. - origArgs := make([]ir.Node, len(n.Args)) + origArgs := make([]ir.Node, len(wrapArgs)) var funcArgs []*ir.Field - for i, arg := range n.Args { + for i, arg := range wrapArgs { s := typecheck.LookupNum("a", i) if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.(*ir.ConvExpr).X.Type().IsUnsafePtr() { origArgs[i] = arg arg = arg.(*ir.ConvExpr).X - n.Args[i] = arg + wrapArgs[i] = arg } funcArgs = append(funcArgs, ir.NewField(base.Pos, s, nil, arg.Type())) } @@ -278,6 +294,12 @@ func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { } args[i] = ir.NewConvExpr(base.Pos, origArg.Op(), origArg.Type(), args[i]) } + if n.Op() == ir.OCALLMETH || n.Op() == ir.OCALLINTER { + // Move wrapped receiver argument back to its appropriate place. + recv := typecheck.Expr(args[0]) + n.X.(*ir.SelectorExpr).X = recv + args = args[1:] + } call := ir.NewCallExpr(base.Pos, n.Op(), n.X, args) if !isBuiltinCall { call.SetOp(ir.OCALL) @@ -291,6 +313,25 @@ func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { typecheck.Stmts(fn.Body) typecheck.Target.Decls = append(typecheck.Target.Decls, fn) - call = ir.NewCallExpr(base.Pos, ir.OCALL, fn.Nname, n.Args) + call = ir.NewCallExpr(base.Pos, ir.OCALL, fn.Nname, wrapArgs) return walkExpr(typecheck.Stmt(call), init) } + +// undoVariadic turns a call to a variadic function of the form +// +// f(a, b, []T{c, d, e}...) +// +// back into +// +// f(a, b, c, d, e) +// +func undoVariadic(call *ir.CallExpr) { + if call.IsDDD { + last := len(call.Args) - 1 + if va := call.Args[last]; va.Op() == ir.OSLICELIT { + va := va.(*ir.CompLitExpr) + call.Args = append(call.Args[:last], va.List...) + call.IsDDD = false + } + } +} diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index b47d96dc4c9..fe2c62cd4f8 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -157,12 +157,16 @@ func chanfn(name string, n int, t *types.Type) ir.Node { return fn } -func mapfn(name string, t *types.Type) ir.Node { +func mapfn(name string, t *types.Type, isfat bool) ir.Node { if !t.IsMap() { base.Fatalf("mapfn %v", t) } fn := typecheck.LookupRuntime(name) - fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem()) + if mapfast(t) == mapslow || isfat { + fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem()) + } else { + fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Elem()) + } return fn } @@ -171,7 +175,11 @@ func mapfndel(name string, t *types.Type) ir.Node { base.Fatalf("mapfn %v", t) } fn := typecheck.LookupRuntime(name) - fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key()) + if mapfast(t) == mapslow { + fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key()) + } else { + fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem()) + } return fn } @@ -237,24 +245,6 @@ func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) { } } -// Rewrite -// go builtin(x, y, z) -// into -// go func(a1, a2, a3) { -// builtin(a1, a2, a3) -// }(x, y, z) -// for print, println, and delete. -// -// Rewrite -// go f(x, y, uintptr(unsafe.Pointer(z))) -// into -// go func(a1, a2, a3) { -// builtin(a1, a2, uintptr(a3)) -// }(x, y, unsafe.Pointer(z)) -// for function contains unsafe-uintptr arguments. - -var wrapCall_prgen int - // appendWalkStmt typechecks and walks stmt and then appends it to init. func appendWalkStmt(init *ir.Nodes, stmt ir.Node) { op := stmt.Op() @@ -318,7 +308,8 @@ func mayCall(n ir.Node) bool { default: base.FatalfAt(n.Pos(), "mayCall %+v", n) - case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER: + case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, + ir.OUNSAFEADD, ir.OUNSAFESLICE: return true case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR, diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go index e4ef9d7c6a8..31b09016eb9 100644 --- a/src/cmd/compile/internal/wasm/ssa.go +++ b/src/cmd/compile/internal/wasm/ssa.go @@ -14,7 +14,7 @@ import ( "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/wasm" - "cmd/internal/objabi" + "internal/buildcfg" ) func Init(arch *ssagen.ArchInfo) { @@ -190,6 +190,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(storeOp(v.Type)) ssagen.AddrAuto(&p.To, v) + case ssa.OpClobber, ssa.OpClobberReg: + // TODO: implement for clobberdead experiment. Nop is ok for now. + default: if v.Type.IsMemory() { return @@ -322,7 +325,7 @@ func ssaGenValueOnStack(s *ssagen.State, v *ssa.Value, extend bool) { case ssa.OpWasmI64TruncSatF32S, ssa.OpWasmI64TruncSatF64S: getValue64(s, v.Args[0]) - if objabi.GOWASM.SatConv { + if buildcfg.GOWASM.SatConv { s.Prog(v.Op.Asm()) } else { if v.Op == ssa.OpWasmI64TruncSatF32S { @@ -334,7 +337,7 @@ func ssaGenValueOnStack(s *ssagen.State, v *ssa.Value, extend bool) { case ssa.OpWasmI64TruncSatF32U, ssa.OpWasmI64TruncSatF64U: getValue64(s, v.Args[0]) - if objabi.GOWASM.SatConv { + if buildcfg.GOWASM.SatConv { s.Prog(v.Op.Asm()) } else { if v.Op == ssa.OpWasmI64TruncSatF32U { diff --git a/src/cmd/compile/internal/x86/galign.go b/src/cmd/compile/internal/x86/galign.go index fc806f91196..00a20e429f1 100644 --- a/src/cmd/compile/internal/x86/galign.go +++ b/src/cmd/compile/internal/x86/galign.go @@ -8,8 +8,8 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ssagen" "cmd/internal/obj/x86" - "cmd/internal/objabi" "fmt" + "internal/buildcfg" "os" ) @@ -19,7 +19,7 @@ func Init(arch *ssagen.ArchInfo) { arch.SSAGenValue = ssaGenValue arch.SSAGenBlock = ssaGenBlock arch.MAXWIDTH = (1 << 32) - 1 - switch v := objabi.GO386; v { + switch v := buildcfg.GO386; v { case "sse2": case "softfloat": arch.SoftFloat = true diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go index 00dfa07bf78..a06fdbcb717 100644 --- a/src/cmd/compile/internal/x86/ssa.go +++ b/src/cmd/compile/internal/x86/ssa.go @@ -161,31 +161,19 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssa.Op386PXOR, ssa.Op386ADCL, ssa.Op386SBBL: - r := v.Reg() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } - opregreg(s, v.Op.Asm(), r, v.Args[1].Reg()) + opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg()) case ssa.Op386ADDLcarry, ssa.Op386SUBLcarry: // output 0 is carry/borrow, output 1 is the low 32 bits. - r := v.Reg0() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output[0] not in same register %s", v.LongString()) - } - opregreg(s, v.Op.Asm(), r, v.Args[1].Reg()) + opregreg(s, v.Op.Asm(), v.Reg0(), v.Args[1].Reg()) case ssa.Op386ADDLconstcarry, ssa.Op386SUBLconstcarry: // output 0 is carry/borrow, output 1 is the low 32 bits. - r := v.Reg0() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output[0] not in same register %s", v.LongString()) - } p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG - p.To.Reg = r + p.To.Reg = v.Reg0() case ssa.Op386DIVL, ssa.Op386DIVW, ssa.Op386DIVLU, ssa.Op386DIVWU, @@ -306,20 +294,16 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // compute (x+y)/2 unsigned. // Do a 32-bit add, the overflow goes into the carry. // Shift right once and pull the carry back into the 31st bit. - r := v.Reg() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } p := s.Prog(x86.AADDL) p.From.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG - p.To.Reg = r + p.To.Reg = v.Reg() p.From.Reg = v.Args[1].Reg() p = s.Prog(x86.ARCRL) p.From.Type = obj.TYPE_CONST p.From.Offset = 1 p.To.Type = obj.TYPE_REG - p.To.Reg = r + p.To.Reg = v.Reg() case ssa.Op386ADDLconst: r := v.Reg() @@ -358,7 +342,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = r - p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[0].Reg()}) + p.SetFrom3Reg(v.Args[0].Reg()) case ssa.Op386SUBLconst, ssa.Op386ADCLconst, @@ -370,15 +354,11 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssa.Op386SHRLconst, ssa.Op386SHRWconst, ssa.Op386SHRBconst, ssa.Op386SARLconst, ssa.Op386SARWconst, ssa.Op386SARBconst, ssa.Op386ROLLconst, ssa.Op386ROLWconst, ssa.Op386ROLBconst: - r := v.Reg() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG - p.To.Reg = r + p.To.Reg = v.Reg() case ssa.Op386SBBLcarrymask: r := v.Reg() p := s.Prog(v.Op.Asm()) @@ -447,9 +427,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() - ssagen.AddAux2(&p.From, v, sc.Off()) + ssagen.AddAux2(&p.From, v, sc.Off64()) p.To.Type = obj.TYPE_CONST - p.To.Offset = sc.Val() + p.To.Offset = sc.Val64() case ssa.Op386MOVLconst: x := v.Reg() @@ -536,9 +516,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() - if v.Reg() != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } case ssa.Op386ADDLload, ssa.Op386SUBLload, ssa.Op386MULLload, ssa.Op386ANDLload, ssa.Op386ORLload, ssa.Op386XORLload, ssa.Op386ADDSDload, ssa.Op386ADDSSload, ssa.Op386SUBSDload, ssa.Op386SUBSSload, @@ -549,9 +526,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() - if v.Reg() != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } case ssa.Op386MOVSSstore, ssa.Op386MOVSDstore, ssa.Op386MOVLstore, ssa.Op386MOVWstore, ssa.Op386MOVBstore, ssa.Op386ADDLmodify, ssa.Op386SUBLmodify, ssa.Op386ANDLmodify, ssa.Op386ORLmodify, ssa.Op386XORLmodify: p := s.Prog(v.Op.Asm()) @@ -570,7 +544,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { } else { p = s.Prog(x86.ADECL) } - off := sc.Off() + off := sc.Off64() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() ssagen.AddAux2(&p.To, v, off) @@ -579,8 +553,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { fallthrough case ssa.Op386ANDLconstmodify, ssa.Op386ORLconstmodify, ssa.Op386XORLconstmodify: sc := v.AuxValAndOff() - off := sc.Off() - val := sc.Val() + off := sc.Off64() + val := sc.Val64() p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = val @@ -617,10 +591,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST sc := v.AuxValAndOff() - p.From.Offset = sc.Val() + p.From.Offset = sc.Val64() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - ssagen.AddAux2(&p.To, v, sc.Off()) + ssagen.AddAux2(&p.To, v, sc.Off64()) case ssa.Op386ADDLconstmodifyidx4: sc := v.AuxValAndOff() val := sc.Val() @@ -631,7 +605,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { } else { p = s.Prog(x86.ADECL) } - off := sc.Off() + off := sc.Off64() p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() p.To.Scale = 4 @@ -645,7 +619,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST sc := v.AuxValAndOff() - p.From.Offset = sc.Val() + p.From.Offset = sc.Val64() r := v.Args[0].Reg() i := v.Args[1].Reg() switch v.Op { @@ -663,7 +637,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_MEM p.To.Reg = r p.To.Index = i - ssagen.AddAux2(&p.To, v, sc.Off()) + ssagen.AddAux2(&p.To, v, sc.Off64()) case ssa.Op386MOVWLSX, ssa.Op386MOVBLSX, ssa.Op386MOVWLZX, ssa.Op386MOVBLZX, ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD, ssa.Op386CVTTSS2SL, ssa.Op386CVTTSD2SL, @@ -781,16 +755,12 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { case ssa.Op386NEGL, ssa.Op386BSWAPL, ssa.Op386NOTL: - r := v.Reg() - if r != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } p := s.Prog(v.Op.Asm()) p.To.Type = obj.TYPE_REG - p.To.Reg = r + p.To.Reg = v.Reg() case ssa.Op386BSFL, ssa.Op386BSFW, ssa.Op386BSRL, ssa.Op386BSRW, - ssa.Op386SQRTSD: + ssa.Op386SQRTSS, ssa.Op386SQRTSD: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() @@ -862,6 +832,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_MEM p.To.Reg = x86.REG_SP ssagen.AddAux(&p.To, v) + case ssa.OpClobberReg: + // TODO: implement for clobberdead experiment. Nop is ok for now. default: v.Fatalf("genValue not implemented: %s", v.LongString()) } diff --git a/src/cmd/compile/main.go b/src/cmd/compile/main.go index cb2f4e8cf47..3af1e1fafdf 100644 --- a/src/cmd/compile/main.go +++ b/src/cmd/compile/main.go @@ -18,8 +18,8 @@ import ( "cmd/compile/internal/ssagen" "cmd/compile/internal/wasm" "cmd/compile/internal/x86" - "cmd/internal/objabi" "fmt" + "internal/buildcfg" "log" "os" ) @@ -45,9 +45,10 @@ func main() { log.SetFlags(0) log.SetPrefix("compile: ") - archInit, ok := archInits[objabi.GOARCH] + buildcfg.Check() + archInit, ok := archInits[buildcfg.GOARCH] if !ok { - fmt.Fprintf(os.Stderr, "compile: unknown architecture %q\n", objabi.GOARCH) + fmt.Fprintf(os.Stderr, "compile: unknown architecture %q\n", buildcfg.GOARCH) os.Exit(2) } diff --git a/src/cmd/cover/func.go b/src/cmd/cover/func.go index ce7c771ac96..76a16b3fc4a 100644 --- a/src/cmd/cover/func.go +++ b/src/cmd/cover/func.go @@ -23,6 +23,8 @@ import ( "runtime" "strings" "text/tabwriter" + + "golang.org/x/tools/cover" ) // funcOutput takes two file names as arguments, a coverage profile to read as input and an output @@ -38,7 +40,7 @@ import ( // total: (statements) 91.9% func funcOutput(profile, outputFile string) error { - profiles, err := ParseProfiles(profile) + profiles, err := cover.ParseProfiles(profile) if err != nil { return err } @@ -144,7 +146,7 @@ func (v *FuncVisitor) Visit(node ast.Node) ast.Visitor { } // coverage returns the fraction of the statements in the function that were covered, as a numerator and denominator. -func (f *FuncExtent) coverage(profile *Profile) (num, den int64) { +func (f *FuncExtent) coverage(profile *cover.Profile) (num, den int64) { // We could avoid making this n^2 overall by doing a single scan and annotating the functions, // but the sizes of the data structures is never very large and the scan is almost instantaneous. var covered, total int64 @@ -175,7 +177,7 @@ type Pkg struct { } } -func findPkgs(profiles []*Profile) (map[string]*Pkg, error) { +func findPkgs(profiles []*cover.Profile) (map[string]*Pkg, error) { // Run go list to find the location of every package we care about. pkgs := make(map[string]*Pkg) var list []string diff --git a/src/cmd/cover/html.go b/src/cmd/cover/html.go index b2865c427c8..3c1d17e7b95 100644 --- a/src/cmd/cover/html.go +++ b/src/cmd/cover/html.go @@ -15,13 +15,15 @@ import ( "os" "path/filepath" "strings" + + "golang.org/x/tools/cover" ) // htmlOutput reads the profile data from profile and generates an HTML // coverage report, writing it to outfile. If outfile is empty, // it writes the report to a temporary file and opens it in a web browser. func htmlOutput(profile, outfile string) error { - profiles, err := ParseProfiles(profile) + profiles, err := cover.ParseProfiles(profile) if err != nil { return err } @@ -92,7 +94,7 @@ func htmlOutput(profile, outfile string) error { // percentCovered returns, as a percentage, the fraction of the statements in // the profile covered by the test run. // In effect, it reports the coverage of a given source file. -func percentCovered(p *Profile) float64 { +func percentCovered(p *cover.Profile) float64 { var total, covered int64 for _, b := range p.Blocks { total += int64(b.NumStmt) @@ -108,7 +110,7 @@ func percentCovered(p *Profile) float64 { // htmlGen generates an HTML coverage report with the provided filename, // source code, and tokens, and writes it to the given Writer. -func htmlGen(w io.Writer, src []byte, boundaries []Boundary) error { +func htmlGen(w io.Writer, src []byte, boundaries []cover.Boundary) error { dst := bufio.NewWriter(w) for i := range src { for len(boundaries) > 0 && boundaries[0].Offset == i { diff --git a/src/cmd/cover/testdata/toolexec.go b/src/cmd/cover/testdata/toolexec.go index 386de79038a..458adaeaaa5 100644 --- a/src/cmd/cover/testdata/toolexec.go +++ b/src/cmd/cover/testdata/toolexec.go @@ -15,8 +15,8 @@ package main import ( - "os" exec "internal/execabs" + "os" "strings" ) diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go index c02b92818c2..00e23ef179e 100644 --- a/src/cmd/dist/build.go +++ b/src/cmd/dist/build.go @@ -14,6 +14,7 @@ import ( "os" "os/exec" "path/filepath" + "regexp" "sort" "strings" "sync" @@ -39,6 +40,7 @@ var ( goextlinkenabled string gogcflags string // For running built compiler goldflags string + goexperiment string workdir string tooldir string oldgoos string @@ -111,9 +113,6 @@ func xinit() { fatalf("$GOROOT must be set") } goroot = filepath.Clean(b) - if modRoot := findModuleRoot(goroot); modRoot != "" { - fatalf("found go.mod file in %s: $GOROOT must not be inside a module", modRoot) - } b = os.Getenv("GOROOT_FINAL") if b == "" { @@ -197,6 +196,9 @@ func xinit() { goextlinkenabled = b } + goexperiment = os.Getenv("GOEXPERIMENT") + // TODO(mdempsky): Validate known experiments? + gogcflags = os.Getenv("BOOT_GO_GCFLAGS") goldflags = os.Getenv("BOOT_GO_LDFLAGS") @@ -241,6 +243,9 @@ func xinit() { os.Setenv("LANGUAGE", "en_US.UTF8") workdir = xworkdir() + if err := ioutil.WriteFile(pathf("%s/go.mod", workdir), []byte("module bootstrap"), 0666); err != nil { + fatalf("cannot write stub go.mod: %s", err) + } xatexit(rmworkdir) tooldir = pathf("%s/pkg/tool/%s_%s", goroot, gohostos, gohostarch) @@ -401,8 +406,22 @@ func findgoversion() string { } if !precise { - // Tag does not point at HEAD; add hash and date to version. - tag += chomp(run(goroot, CheckExit, "git", "log", "-n", "1", "--format=format: +%h %cd", "HEAD")) + // Tag does not point at HEAD; add 1.x base version, hash, and date to version. + // + // Note that we lightly parse internal/goversion/goversion.go to + // obtain the base version. We can't just import the package, + // because cmd/dist is built with a bootstrap GOROOT which could + // be an entirely different version of Go, like 1.4. We assume + // that the file contains "const Version = ". + + goversionSource := readfile(pathf("%s/src/internal/goversion/goversion.go", goroot)) + m := regexp.MustCompile(`(?m)^const Version = (\d+)`).FindStringSubmatch(goversionSource) + if m == nil { + fatalf("internal/goversion/goversion.go does not contain 'const Version = ...'") + } + tag += fmt.Sprintf(" go1.%s-", m[1]) + + tag += chomp(run(goroot, CheckExit, "git", "log", "-n", "1", "--format=format:%h %cd", "HEAD")) } // Cache version. @@ -834,18 +853,6 @@ func runInstall(pkg string, ch chan struct{}) { goasmh := pathf("%s/go_asm.h", workdir) if IsRuntimePackagePath(pkg) { asmArgs = append(asmArgs, "-compiling-runtime") - if os.Getenv("GOEXPERIMENT") == "regabi" { - // In order to make it easier to port runtime assembly - // to the register ABI, we introduce a macro - // indicating the experiment is enabled. - // - // Note: a similar change also appears in - // cmd/go/internal/work/gc.go. - // - // TODO(austin): Remove this once we commit to the - // register ABI (#40724). - asmArgs = append(asmArgs, "-D=GOEXPERIMENT_REGABI=1") - } } // Collect symabis from assembly code. @@ -1271,6 +1278,20 @@ func cmdbootstrap() { // go tool may complain. os.Setenv("GOPATH", pathf("%s/pkg/obj/gopath", goroot)) + // Disable GOEXPERIMENT when building toolchain1 and + // go_bootstrap. We don't need any experiments for the + // bootstrap toolchain, and this lets us avoid duplicating the + // GOEXPERIMENT-related build logic from cmd/go here. If the + // bootstrap toolchain is < Go 1.17, it will ignore this + // anyway since GOEXPERIMENT is baked in; otherwise it will + // pick it up from the environment we set here. Once we're + // using toolchain1 with dist as the build system, we need to + // override this to keep the experiments assumed by the + // toolchain and by dist consistent. Once go_bootstrap takes + // over the build process, we'll set this back to the original + // GOEXPERIMENT. + os.Setenv("GOEXPERIMENT", "none") + if debug { // cmd/buildid is used in debug mode. toolchain = append(toolchain, "cmd/buildid") @@ -1348,6 +1369,8 @@ func cmdbootstrap() { } xprintf("Building Go toolchain2 using go_bootstrap and Go toolchain1.\n") os.Setenv("CC", compilerEnvLookup(defaultcc, goos, goarch)) + // Now that cmd/go is in charge of the build process, enable GOEXPERIMENT. + os.Setenv("GOEXPERIMENT", goexperiment) goInstall(goBootstrap, append([]string{"-i"}, toolchain...)...) if debug { run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full") @@ -1500,11 +1523,11 @@ func goCmd(goBinary string, cmd string, args ...string) { goCmd = append(goCmd, "-p=1") } - run(goroot, ShowOutput|CheckExit, append(goCmd, args...)...) + run(workdir, ShowOutput|CheckExit, append(goCmd, args...)...) } func checkNotStale(goBinary string, targets ...string) { - out := run(goroot, CheckExit, + out := run(workdir, CheckExit, append([]string{ goBinary, "list", "-gcflags=all=" + gogcflags, "-ldflags=all=" + goldflags, @@ -1514,11 +1537,11 @@ func checkNotStale(goBinary string, targets ...string) { os.Setenv("GODEBUG", "gocachehash=1") for _, target := range []string{"runtime/internal/sys", "cmd/dist", "cmd/link"} { if strings.Contains(out, "STALE "+target) { - run(goroot, ShowOutput|CheckExit, goBinary, "list", "-f={{.ImportPath}} {{.Stale}}", target) + run(workdir, ShowOutput|CheckExit, goBinary, "list", "-f={{.ImportPath}} {{.Stale}}", target) break } } - fatalf("unexpected stale targets reported by %s list -gcflags=\"%s\" -ldflags=\"%s\" for %v:\n%s", goBinary, gogcflags, goldflags, targets, out) + fatalf("unexpected stale targets reported by %s list -gcflags=\"%s\" -ldflags=\"%s\" for %v (consider rerunning with GOMAXPROCS=1 GODEBUG=gocachehash=1):\n%s", goBinary, gogcflags, goldflags, targets, out) } } @@ -1567,7 +1590,7 @@ var cgoEnabled = map[string]bool{ "openbsd/amd64": true, "openbsd/arm": true, "openbsd/arm64": true, - "openbsd/mips64": false, + "openbsd/mips64": true, "plan9/386": false, "plan9/amd64": false, "plan9/arm": false, @@ -1575,7 +1598,7 @@ var cgoEnabled = map[string]bool{ "windows/386": true, "windows/amd64": true, "windows/arm": false, - "windows/arm64": false, + "windows/arm64": true, } // List of platforms which are supported but not complete yet. These get @@ -1610,20 +1633,6 @@ func checkCC() { } } -func findModuleRoot(dir string) (root string) { - for { - if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() { - return dir - } - d := filepath.Dir(dir) - if d == dir { - break - } - dir = d - } - return "" -} - func defaulttarg() string { // xgetwd might return a path with symlinks fully resolved, and if // there happens to be symlinks in goroot, then the hasprefix test @@ -1755,8 +1764,9 @@ func cmdlist() { // IsRuntimePackagePath examines 'pkgpath' and returns TRUE if it // belongs to the collection of "runtime-related" packages, including // "runtime" itself, "reflect", "syscall", and the -// "runtime/internal/*" packages. See also the function of the same -// name in cmd/internal/objabi/path.go. +// "runtime/internal/*" packages. +// +// Keep in sync with cmd/internal/objabi/path.go:IsRuntimePackagePath. func IsRuntimePackagePath(pkgpath string) bool { rval := false switch pkgpath { @@ -1766,7 +1776,7 @@ func IsRuntimePackagePath(pkgpath string) bool { rval = true case "syscall": rval = true - case "crypto/x509/internal/macos": // libc function wrappers need to be ABIInternal + case "internal/bytealg": rval = true default: rval = strings.HasPrefix(pkgpath, "runtime/internal") diff --git a/src/cmd/dist/buildruntime.go b/src/cmd/dist/buildruntime.go index 27449515976..54e935ad3be 100644 --- a/src/cmd/dist/buildruntime.go +++ b/src/cmd/dist/buildruntime.go @@ -19,8 +19,6 @@ import ( // // package sys // -// const TheVersion = -// const Goexperiment = // const StackGuardMultiplier = // func mkzversion(dir, file string) { @@ -29,29 +27,20 @@ func mkzversion(dir, file string) { fmt.Fprintln(&buf) fmt.Fprintf(&buf, "package sys\n") fmt.Fprintln(&buf) - fmt.Fprintf(&buf, "const TheVersion = `%s`\n", findgoversion()) - fmt.Fprintf(&buf, "const Goexperiment = `%s`\n", os.Getenv("GOEXPERIMENT")) fmt.Fprintf(&buf, "const StackGuardMultiplierDefault = %d\n", stackGuardMultiplierDefault()) writefile(buf.String(), file, writeSkipSame) } -// mkzbootstrap writes cmd/internal/objabi/zbootstrap.go: +// mkbuildcfg writes internal/buildcfg/zbootstrap.go: // -// package objabi +// package buildcfg // // const defaultGOROOT = // const defaultGO386 = -// const defaultGOARM = -// const defaultGOMIPS = -// const defaultGOMIPS64 = -// const defaultGOPPC64 = +// ... // const defaultGOOS = runtime.GOOS // const defaultGOARCH = runtime.GOARCH -// const defaultGO_EXTLINK_ENABLED = -// const version = -// const stackGuardMultiplierDefault = -// const goexperiment = // // The use of runtime.GOOS and runtime.GOARCH makes sure that // a cross-compiled compiler expects to compile for its own target @@ -62,11 +51,11 @@ func mkzversion(dir, file string) { // the resulting compiler will default to generating linux/ppc64 object files. // This is more useful than having it default to generating objects for the // original target (in this example, a Mac). -func mkzbootstrap(file string) { +func mkbuildcfg(file string) { var buf bytes.Buffer fmt.Fprintf(&buf, "// Code generated by go tool dist; DO NOT EDIT.\n") fmt.Fprintln(&buf) - fmt.Fprintf(&buf, "package objabi\n") + fmt.Fprintf(&buf, "package buildcfg\n") fmt.Fprintln(&buf) fmt.Fprintf(&buf, "import \"runtime\"\n") fmt.Fprintln(&buf) @@ -75,13 +64,29 @@ func mkzbootstrap(file string) { fmt.Fprintf(&buf, "const defaultGOMIPS = `%s`\n", gomips) fmt.Fprintf(&buf, "const defaultGOMIPS64 = `%s`\n", gomips64) fmt.Fprintf(&buf, "const defaultGOPPC64 = `%s`\n", goppc64) - fmt.Fprintf(&buf, "const defaultGOOS = runtime.GOOS\n") - fmt.Fprintf(&buf, "const defaultGOARCH = runtime.GOARCH\n") + fmt.Fprintf(&buf, "const defaultGOEXPERIMENT = `%s`\n", goexperiment) fmt.Fprintf(&buf, "const defaultGO_EXTLINK_ENABLED = `%s`\n", goextlinkenabled) fmt.Fprintf(&buf, "const defaultGO_LDSO = `%s`\n", defaultldso) fmt.Fprintf(&buf, "const version = `%s`\n", findgoversion()) + fmt.Fprintf(&buf, "const defaultGOOS = runtime.GOOS\n") + fmt.Fprintf(&buf, "const defaultGOARCH = runtime.GOARCH\n") + + writefile(buf.String(), file, writeSkipSame) +} + +// mkobjabi writes cmd/internal/objabi/zbootstrap.go: +// +// package objabi +// +// const stackGuardMultiplierDefault = +// +func mkobjabi(file string) { + var buf bytes.Buffer + fmt.Fprintf(&buf, "// Code generated by go tool dist; DO NOT EDIT.\n") + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "package objabi\n") + fmt.Fprintln(&buf) fmt.Fprintf(&buf, "const stackGuardMultiplierDefault = %d\n", stackGuardMultiplierDefault()) - fmt.Fprintf(&buf, "const goexperiment = `%s`\n", os.Getenv("GOEXPERIMENT")) writefile(buf.String(), file, writeSkipSame) } diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go index 7520b0ef186..26b33e389fe 100644 --- a/src/cmd/dist/buildtool.go +++ b/src/cmd/dist/buildtool.go @@ -58,6 +58,8 @@ var bootstrapDirs = []string{ "debug/macho", "debug/pe", "go/constant", + "internal/buildcfg", + "internal/goexperiment", "internal/goversion", "internal/race", "internal/unsafeheader", @@ -97,7 +99,8 @@ func bootstrapBuildTools() { } xprintf("Building Go toolchain1 using %s.\n", goroot_bootstrap) - mkzbootstrap(pathf("%s/src/cmd/internal/objabi/zbootstrap.go", goroot)) + mkbuildcfg(pathf("%s/src/internal/buildcfg/zbootstrap.go", goroot)) + mkobjabi(pathf("%s/src/cmd/internal/objabi/zbootstrap.go", goroot)) // Use $GOROOT/pkg/bootstrap as the bootstrap workspace root. // We use a subdirectory of $GOROOT/pkg because that's the diff --git a/src/cmd/dist/sys_default.go b/src/cmd/dist/sys_default.go index 821dc273d60..e87c84ce3ee 100644 --- a/src/cmd/dist/sys_default.go +++ b/src/cmd/dist/sys_default.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !windows // +build !windows package main diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go index a22397aa16b..50bf80ba596 100644 --- a/src/cmd/dist/test.go +++ b/src/cmd/dist/test.go @@ -42,6 +42,7 @@ func cmdtest() { if noRebuild { t.rebuild = false } + t.run() } @@ -117,6 +118,21 @@ func (t *tester) run() { } } + // Set GOTRACEBACK to system if the user didn't set a level explicitly. + // Since we're running tests for Go, we want as much detail as possible + // if something goes wrong. + // + // Set it before running any commands just in case something goes wrong. + if ok := isEnvSet("GOTRACEBACK"); !ok { + if err := os.Setenv("GOTRACEBACK", "system"); err != nil { + if t.keepGoing { + log.Printf("Failed to set GOTRACEBACK: %v", err) + } else { + fatalf("Failed to set GOTRACEBACK: %v", err) + } + } + } + if t.rebuild { t.out("Building packages and commands.") // Force rebuild the whole toolchain. @@ -475,6 +491,19 @@ func (t *tester) registerTests() { }) } + // Test go/... cmd/gofmt with type parameters enabled. + if !t.compileOnly { + t.tests = append(t.tests, distTest{ + name: "tyepparams", + heading: "go/... and cmd/gofmt tests with tag typeparams", + fn: func(dt *distTest) error { + t.addCmd(dt, "src", t.goTest(), t.timeout(300), "-tags=typeparams", "go/...") + t.addCmd(dt, "src", t.goTest(), t.timeout(300), "-tags=typeparams", "cmd/gofmt") + return nil + }, + }) + } + if t.iOS() && !t.compileOnly { t.tests = append(t.tests, distTest{ name: "x509omitbundledroots", @@ -736,8 +765,10 @@ func (t *tester) registerTests() { if gohostos == "linux" && goarch == "amd64" { t.registerTest("testasan", "../misc/cgo/testasan", "go", "run", ".") } - if mSanSupported(goos, goarch) { - t.registerHostTest("testsanitizers/msan", "../misc/cgo/testsanitizers", "misc/cgo/testsanitizers", ".") + if goos == "linux" && goarch != "ppc64le" { + // because syscall.SysProcAttri struct used in misc/cgo/testsanitizers is only built on linux. + // Some inconsistent failures happen on ppc64le so disable for now. + t.registerHostTest("testsanitizers", "../misc/cgo/testsanitizers", "misc/cgo/testsanitizers", ".") } if t.hasBash() && goos != "android" && !t.iOS() && gohostos != "windows" { t.registerHostTest("cgo_errors", "../misc/cgo/errors", "misc/cgo/errors", ".") @@ -962,6 +993,9 @@ func (t *tester) internalLink() bool { if goos == "ios" { return false } + if goos == "windows" && goarch == "arm64" { + return false + } // Internally linking cgo is incomplete on some architectures. // https://golang.org/issue/10373 // https://golang.org/issue/14449 @@ -1094,8 +1128,7 @@ func (t *tester) cgoTest(dt *distTest) error { cmd := t.addCmd(dt, "misc/cgo/test", t.goTest()) cmd.Env = append(os.Environ(), "GOFLAGS=-ldflags=-linkmode=auto") - // Skip internal linking cases on arm64 to support GCC-9.4 and above, - // only for linux, conservatively. + // Skip internal linking cases on linux/arm64 to support GCC-9.4 and above. // See issue #39466. skipInternalLink := goarch == "arm64" && goos == "linux" @@ -1633,24 +1666,13 @@ func raceDetectorSupported(goos, goarch string) bool { return goarch == "amd64" || goarch == "ppc64le" || goarch == "arm64" case "darwin": return goarch == "amd64" || goarch == "arm64" - case "freebsd", "netbsd", "windows": + case "freebsd", "netbsd", "openbsd", "windows": return goarch == "amd64" default: return false } } -// mSanSupported is a copy of the function cmd/internal/sys.MSanSupported, -// which can't be used here because cmd/dist has to be buildable by Go 1.4. -func mSanSupported(goos, goarch string) bool { - switch goos { - case "linux": - return goarch == "amd64" || goarch == "arm64" - default: - return false - } -} - // isUnsupportedVMASize reports whether the failure is caused by an unsupported // VMA for the race detector (for example, running the race detector on an // arm64 machine configured with 39-bit VMA) @@ -1658,3 +1680,15 @@ func isUnsupportedVMASize(w *work) bool { unsupportedVMA := []byte("unsupported VMA range") return w.dt.name == "race" && bytes.Contains(w.out, unsupportedVMA) } + +// isEnvSet reports whether the environment variable evar is +// set in the environment. +func isEnvSet(evar string) bool { + evarEq := evar + "=" + for _, e := range os.Environ() { + if strings.HasPrefix(e, evarEq) { + return true + } + } + return false +} diff --git a/src/cmd/dist/test_linux.go b/src/cmd/dist/test_linux.go index b6d0aedbbf7..43d28dc6619 100644 --- a/src/cmd/dist/test_linux.go +++ b/src/cmd/dist/test_linux.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux // +build linux package main diff --git a/src/cmd/dist/util.go b/src/cmd/dist/util.go index e99375f5380..df60145d1e2 100644 --- a/src/cmd/dist/util.go +++ b/src/cmd/dist/util.go @@ -249,6 +249,7 @@ func writefile(text, file string, flag int) { if flag&writeExec != 0 { mode = 0777 } + xremove(file) // in case of symlink tricks by misc/reboot test err := ioutil.WriteFile(file, new, mode) if err != nil { fatalf("%v", err) diff --git a/src/cmd/dist/util_gc.go b/src/cmd/dist/util_gc.go index 17a0e6fbb56..875784d3830 100644 --- a/src/cmd/dist/util_gc.go +++ b/src/cmd/dist/util_gc.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +//go:build gc +// +build gc package main diff --git a/src/cmd/dist/util_gccgo.go b/src/cmd/dist/util_gccgo.go index dc897236fbb..3255b803652 100644 --- a/src/cmd/dist/util_gccgo.go +++ b/src/cmd/dist/util_gccgo.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gccgo // +build gccgo package main diff --git a/src/cmd/doc/doc_test.go b/src/cmd/doc/doc_test.go index 39530e3c2d6..af7793133ef 100644 --- a/src/cmd/doc/doc_test.go +++ b/src/cmd/doc/doc_test.go @@ -579,7 +579,7 @@ var tests = []test{ []string{ `Comment about exported interface`, // Include comment. `type ExportedInterface interface`, // Interface definition. - `Comment before exported method.*\n.*ExportedMethod\(\)` + + `Comment before exported method.\n.*//\n.*// // Code block showing how to use ExportedMethod\n.*// func DoSomething\(\) error {\n.*// ExportedMethod\(\)\n.*// return nil\n.*// }\n.*//.*\n.*ExportedMethod\(\)` + `.*Comment on line with exported method`, `io.Reader.*Comment on line with embedded Reader`, `error.*Comment on line with embedded error`, @@ -599,8 +599,7 @@ var tests = []test{ []string{ `Comment about exported interface`, // Include comment. `type ExportedInterface interface`, // Interface definition. - `Comment before exported method.*\n.*ExportedMethod\(\)` + - `.*Comment on line with exported method`, + `Comment before exported method.\n.*//\n.*// // Code block showing how to use ExportedMethod\n.*// func DoSomething\(\) error {\n.*// ExportedMethod\(\)\n.*// return nil\n.*// }\n.*//.*\n.*ExportedMethod\(\)` + `.*Comment on line with exported method`, `unexportedMethod\(\).*Comment on line with unexported method`, `io.Reader.*Comment on line with embedded Reader`, `error.*Comment on line with embedded error`, @@ -615,7 +614,7 @@ var tests = []test{ "interface method", []string{p, `ExportedInterface.ExportedMethod`}, []string{ - `Comment before exported method.*\n.*ExportedMethod\(\)` + + `Comment before exported method.\n.*//\n.*// // Code block showing how to use ExportedMethod\n.*// func DoSomething\(\) error {\n.*// ExportedMethod\(\)\n.*// return nil\n.*// }\n.*//.*\n.*ExportedMethod\(\)` + `.*Comment on line with exported method`, }, []string{ diff --git a/src/cmd/doc/pkg.go b/src/cmd/doc/pkg.go index c2e06ebc8b0..587f0bdc146 100644 --- a/src/cmd/doc/pkg.go +++ b/src/cmd/doc/pkg.go @@ -950,6 +950,9 @@ func (pkg *Package) printMethodDoc(symbol, method string) bool { // Not an interface type. continue } + + // Collect and print only the methods that match. + var methods []*ast.Field for _, iMethod := range inter.Methods.List { // This is an interface, so there can be only one name. // TODO: Anonymous methods (embedding) @@ -958,22 +961,21 @@ func (pkg *Package) printMethodDoc(symbol, method string) bool { } name := iMethod.Names[0].Name if match(method, name) { - if iMethod.Doc != nil { - for _, comment := range iMethod.Doc.List { - doc.ToText(&pkg.buf, comment.Text, "", indent, indentedWidth) - } - } - s := pkg.oneLineNode(iMethod.Type) - // Hack: s starts "func" but there is no name present. - // We could instead build a FuncDecl but it's not worthwhile. - lineComment := "" - if iMethod.Comment != nil { - lineComment = fmt.Sprintf(" %s", iMethod.Comment.List[0].Text) - } - pkg.Printf("func %s%s%s\n", name, s[4:], lineComment) + methods = append(methods, iMethod) found = true } } + if found { + pkg.Printf("type %s ", spec.Name) + inter.Methods.List, methods = methods, inter.Methods.List + err := format.Node(&pkg.buf, pkg.fs, inter) + if err != nil { + log.Fatal(err) + } + pkg.newlines(1) + // Restore the original methods. + inter.Methods.List = methods + } } return found } diff --git a/src/cmd/doc/testdata/pkg.go b/src/cmd/doc/testdata/pkg.go index d695bdf1c5f..5ece8325651 100644 --- a/src/cmd/doc/testdata/pkg.go +++ b/src/cmd/doc/testdata/pkg.go @@ -111,6 +111,13 @@ const unexportedTypedConstant ExportedType = 1 // In a separate section to test // Comment about exported interface. type ExportedInterface interface { // Comment before exported method. + // + // // Code block showing how to use ExportedMethod + // func DoSomething() error { + // ExportedMethod() + // return nil + // } + // ExportedMethod() // Comment on line with exported method. unexportedMethod() // Comment on line with unexported method. io.Reader // Comment on line with embedded Reader. diff --git a/src/cmd/go.mod b/src/cmd/go.mod index 5414e5e6888..7a96bc64095 100644 --- a/src/cmd/go.mod +++ b/src/cmd/go.mod @@ -1,12 +1,15 @@ module cmd -go 1.16 +go 1.17 require ( - github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2 - golang.org/x/arch v0.0.0-20201008161808-52c3e6f60cff - golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 - golang.org/x/mod v0.4.1 - golang.org/x/sys v0.0.0-20210218145245-beda7e5e158e // indirect - golang.org/x/tools v0.0.0-20210107193943-4ed967dd8eff + github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a + github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639 // indirect + golang.org/x/arch v0.0.0-20210502124803-cbf565b21d1e + golang.org/x/crypto v0.0.0-20210503195802-e9a32991a82e // indirect + golang.org/x/mod v0.4.3-0.20210504181020-67f1c1edc27a + golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6 // indirect + golang.org/x/term v0.0.0-20210503060354-a79de5458b56 + golang.org/x/tools v0.1.1-0.20210505014545-7cab0ef2e9a5 + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect ) diff --git a/src/cmd/go.sum b/src/cmd/go.sum index 3dc0565f655..1c6e2248208 100644 --- a/src/cmd/go.sum +++ b/src/cmd/go.sum @@ -1,40 +1,22 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2 h1:HyOHhUtuB/Ruw/L5s5pG2D0kckkN2/IzBs9OClGHnHI= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a h1:jmAp/2PZAScNd62lTD3Mcb0Ey9FvIIJtLohPhtxZJ+Q= +github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639 h1:mV02weKRL81bEnm8A0HT1/CAelMQDBuQIfLw8n+d6xI= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -golang.org/x/arch v0.0.0-20201008161808-52c3e6f60cff h1:XmKBi9R6duxOB3lfc72wyrwiOY7X2Jl1wuI+RFOyMDE= -golang.org/x/arch v0.0.0-20201008161808-52c3e6f60cff/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 h1:pLI5jrR7OSLijeIDcmRxNmw2api+jEfxLoykJVice/E= -golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/arch v0.0.0-20210502124803-cbf565b21d1e h1:pv3V0NlNSh5Q6AX/StwGLBjcLS7UN4m4Gq+V+uSecqM= +golang.org/x/arch v0.0.0-20210502124803-cbf565b21d1e/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/crypto v0.0.0-20210503195802-e9a32991a82e h1:8foAy0aoO5GkqCvAEJ4VC4P3zksTg4X4aJCDpZzmgQI= +golang.org/x/crypto v0.0.0-20210503195802-e9a32991a82e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/mod v0.4.3-0.20210504181020-67f1c1edc27a h1:wbpC/7Wbo5WFVox32n+KjhRRLmTLq8YW/wRlL2iVAhk= +golang.org/x/mod v0.4.3-0.20210504181020-67f1c1edc27a/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210218145245-beda7e5e158e h1:f5mksnk+hgXHnImpZoWj64ja99j9zV7YUgrVG95uFE4= -golang.org/x/sys v0.0.0-20210218145245-beda7e5e158e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20210107193943-4ed967dd8eff h1:6EkB024TP1fu6cmQqeCNw685zYDVt5g8N1BXh755SQM= -golang.org/x/tools v0.0.0-20210107193943-4ed967dd8eff/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6 h1:cdsMqa2nXzqlgs183pHxtvoVwU7CyzaCTAUOg94af4c= +golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20210503060354-a79de5458b56 h1:b8jxX3zqjpqb2LklXPzKSGJhzyxCOZSz8ncv8Nv+y7w= +golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY= +golang.org/x/tools v0.1.1-0.20210505014545-7cab0ef2e9a5 h1:ImcI7RFHWLu2QWpFDXaReu0j+sQAHIy65vUFZImXiqY= +golang.org/x/tools v0.1.1-0.20210505014545-7cab0ef2e9a5/go.mod h1:sH/Eidr0EddymY8HZSakBo32zU3fG5ovDq874hJLjVg= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go index db3f281ef35..052b61c03dd 100644 --- a/src/cmd/go/alldocs.go +++ b/src/cmd/go/alldocs.go @@ -198,6 +198,8 @@ // a program to use to invoke toolchain programs like vet and asm. // For example, instead of running asm, the go command will run // 'cmd args /path/to/asm '. +// The TOOLEXEC_IMPORTPATH environment variable will be set, +// matching 'go list -f {{.ImportPath}}' for the package being built. // // The -asmflags, -gccgoflags, -gcflags, and -ldflags flags accept a // space-separated list of arguments to pass to an underlying tool @@ -596,7 +598,7 @@ // // Usage: // -// go get [-d] [-t] [-u] [-v] [-insecure] [build flags] [packages] +// go get [-d] [-t] [-u] [-v] [build flags] [packages] // // Get resolves its command-line arguments to packages at specific module versions, // updates go.mod to require those versions, downloads source code into the @@ -641,14 +643,6 @@ // When the -t and -u flags are used together, get will update // test dependencies as well. // -// The -insecure flag permits fetching from repositories and resolving -// custom domains using insecure schemes such as HTTP, and also bypassess -// module sum validation using the checksum database. Use with caution. -// This flag is deprecated and will be removed in a future version of go. -// To permit the use of insecure schemes, use the GOINSECURE environment -// variable instead. To bypass module sum validation, use GOPRIVATE or -// GONOSUMDB. See 'go help environment' for details. -// // The -d flag instructs get not to build or install packages. get will only // update go.mod and download source code needed to build packages. // @@ -692,18 +686,22 @@ // arguments must satisfy the following constraints: // // - Arguments must be package paths or package patterns (with "..." wildcards). -// They must not be standard packages (like fmt), meta-patterns (std, cmd, -// all), or relative or absolute file paths. +// They must not be standard packages (like fmt), meta-patterns (std, cmd, +// all), or relative or absolute file paths. +// // - All arguments must have the same version suffix. Different queries are not -// allowed, even if they refer to the same version. +// allowed, even if they refer to the same version. +// // - All arguments must refer to packages in the same module at the same version. +// // - No module is considered the "main" module. If the module containing -// packages named on the command line has a go.mod file, it must not contain -// directives (replace and exclude) that would cause it to be interpreted -// differently than if it were the main module. The module must not require -// a higher version of itself. +// packages named on the command line has a go.mod file, it must not contain +// directives (replace and exclude) that would cause it to be interpreted +// differently than if it were the main module. The module must not require +// a higher version of itself. +// // - Package path arguments must refer to main packages. Pattern arguments -// will only match main packages. +// will only match main packages. // // If the arguments don't have version suffixes, "go install" may run in // module-aware mode or GOPATH mode, depending on the GO111MODULE environment @@ -845,6 +843,7 @@ // UseAllFiles bool // use files regardless of +build lines, file names // Compiler string // compiler to assume when computing target paths // BuildTags []string // build constraints to match in +build lines +// ToolTags []string // toolchain-specific build constraints // ReleaseTags []string // releases the current release is compatible with // InstallSuffix string // suffix to use in the name of the install dir // } @@ -1138,12 +1137,12 @@ // writing it back to go.mod. The JSON output corresponds to these Go types: // // type Module struct { -// Path string +// Path string // Version string // } // // type GoMod struct { -// Module Module +// Module ModPath // Go string // Require []Require // Exclude []Module @@ -1151,6 +1150,11 @@ // Retract []Retract // } // +// type ModPath struct { +// Path string +// Deprecated string +// } +// // type Require struct { // Path string // Version string @@ -1217,7 +1221,7 @@ // // Usage: // -// go mod tidy [-e] [-v] +// go mod tidy [-e] [-v] [-go=version] // // Tidy makes sure go.mod matches the source code in the module. // It adds any missing modules necessary to build the current module's @@ -1231,6 +1235,12 @@ // The -e flag causes tidy to attempt to proceed despite errors // encountered while loading packages. // +// The -go flag causes tidy to update the 'go' directive in the go.mod +// file to the given version, which may change which module dependencies +// are retained as explicit requirements in the go.mod file. +// (Go versions 1.17 and higher retain more requirements in order to +// support lazy module loading.) +// // See https://golang.org/ref/mod#go-mod-tidy for more about 'go mod tidy'. // // @@ -1314,10 +1324,21 @@ // go run [build flags] [-exec xprog] package [arguments...] // // Run compiles and runs the named main Go package. -// Typically the package is specified as a list of .go source files from a single directory, -// but it may also be an import path, file system path, or pattern +// Typically the package is specified as a list of .go source files from a single +// directory, but it may also be an import path, file system path, or pattern // matching a single known package, as in 'go run .' or 'go run my/cmd'. // +// If the package argument has a version suffix (like @latest or @v1.0.0), +// "go run" builds the program in module-aware mode, ignoring the go.mod file in +// the current directory or any parent directory, if there is one. This is useful +// for running programs without affecting the dependencies of the main module. +// +// If the package argument doesn't have a version suffix, "go run" may run in +// module-aware mode or GOPATH mode, depending on the GO111MODULE environment +// variable and the presence of a go.mod file. See 'go help modules' for details. +// If module-aware mode is enabled, "go run" runs in the context of the main +// module. +// // By default, 'go run' runs the compiled binary directly: 'a.out arguments...'. // If the -exec flag is given, 'go run' invokes the binary using xprog: // 'xprog a.out arguments...'. @@ -1412,8 +1433,8 @@ // // The rule for a match in the cache is that the run involves the same // test binary and the flags on the command line come entirely from a -// restricted set of 'cacheable' test flags, defined as -cpu, -list, -// -parallel, -run, -short, and -v. If a run of go test has any test +// restricted set of 'cacheable' test flags, defined as -benchtime, -cpu, +// -list, -parallel, -run, -short, and -v. If a run of go test has any test // or non-test flags outside this set, the result is not cached. To // disable test caching, use any test flag or argument other than the // cacheable flags. The idiomatic way to disable test caching explicitly @@ -1783,9 +1804,8 @@ // Comma-separated list of glob patterns (in the syntax of Go's path.Match) // of module path prefixes that should always be fetched in an insecure // manner. Only applies to dependencies that are being fetched directly. -// Unlike the -insecure flag on 'go get', GOINSECURE does not disable -// checksum database validation. GOPRIVATE or GONOSUMDB may be used -// to achieve that. +// GOINSECURE does not disable checksum database validation. GOPRIVATE or +// GONOSUMDB may be used to achieve that. // GOOS // The operating system for which to compile code. // Examples are linux, darwin, windows, netbsd. @@ -1957,7 +1977,7 @@ // The go.mod file format is described in detail at // https://golang.org/ref/mod#go-mod-file. // -// To create a new go.mod file, use 'go help init'. For details see +// To create a new go.mod file, use 'go mod init'. For details see // 'go help mod init' or https://golang.org/ref/mod#go-mod-init. // // To add missing module requirements or remove unneeded requirements, @@ -2135,7 +2155,7 @@ // This help text, accessible as 'go help gopath-get' even in module-aware mode, // describes 'go get' as it operates in legacy GOPATH mode. // -// Usage: go get [-d] [-f] [-t] [-u] [-v] [-fix] [-insecure] [build flags] [packages] +// Usage: go get [-d] [-f] [-t] [-u] [-v] [-fix] [build flags] [packages] // // Get downloads the packages named by the import paths, along with their // dependencies. It then installs the named packages, like 'go install'. @@ -2151,13 +2171,6 @@ // The -fix flag instructs get to run the fix tool on the downloaded packages // before resolving dependencies or building the code. // -// The -insecure flag permits fetching from repositories and resolving -// custom domains using insecure schemes such as HTTP. Use with caution. -// This flag is deprecated and will be removed in a future version of go. -// The GOINSECURE environment variable should be used instead, since it -// provides control over which packages may be retrieved using an insecure -// scheme. See 'go help environment' for details. -// // The -t flag instructs get to also download the packages required to build // the tests for the specified packages. // @@ -2342,7 +2355,7 @@ // will result in the following requests: // // https://example.org/pkg/foo?go-get=1 (preferred) -// http://example.org/pkg/foo?go-get=1 (fallback, only with -insecure) +// http://example.org/pkg/foo?go-get=1 (fallback, only with use of correctly set GOINSECURE) // // If that page contains the meta tag // @@ -2660,6 +2673,13 @@ // the Go tree can run a sanity check but not spend time running // exhaustive tests. // +// -shuffle off,on,N +// Randomize the execution order of tests and benchmarks. +// It is off by default. If -shuffle is set to on, then it will seed +// the randomizer using the system clock. If -shuffle is set to an +// integer N, then N will be used as the seed value. In both cases, +// the seed will be reported for reproducibility. +// // -timeout d // If a test binary runs longer than duration d, panic. // If d is 0, the timeout is disabled. diff --git a/src/cmd/go/go11.go b/src/cmd/go/go11.go index 7e383f4b5b0..a1f2727825e 100644 --- a/src/cmd/go/go11.go +++ b/src/cmd/go/go11.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.1 // +build go1.1 package main diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go index d14b2328bfa..a059a6dd902 100644 --- a/src/cmd/go/go_test.go +++ b/src/cmd/go/go_test.go @@ -814,6 +814,7 @@ func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) { "src/internal/abi", "src/internal/bytealg", "src/internal/cpu", + "src/internal/goexperiment", "src/math/bits", "src/unsafe", filepath.Join("pkg", runtime.GOOS+"_"+runtime.GOARCH), @@ -2831,3 +2832,27 @@ func TestCoverpkgTestOnly(t *testing.T) { tg.grepStderrNot("no packages being tested depend on matches", "bad match message") tg.grepStdout("coverage: 100", "no coverage") } + +// Regression test for golang.org/issue/34499: version command should not crash +// when executed in a deleted directory on Linux. +func TestExecInDeletedDir(t *testing.T) { + switch runtime.GOOS { + case "windows", "plan9", + "aix", // Fails with "device busy". + "solaris", "illumos": // Fails with "invalid argument". + t.Skipf("%v does not support removing the current working directory", runtime.GOOS) + } + tg := testgo(t) + defer tg.cleanup() + + wd, err := os.Getwd() + tg.check(err) + tg.makeTempdir() + tg.check(os.Chdir(tg.tempdir)) + defer func() { tg.check(os.Chdir(wd)) }() + + tg.check(os.Remove(tg.tempdir)) + + // `go version` should not fail + tg.run("version") +} diff --git a/src/cmd/go/go_unix_test.go b/src/cmd/go/go_unix_test.go index f6e10ca59c7..7d5ff9bbb74 100644 --- a/src/cmd/go/go_unix_test.go +++ b/src/cmd/go/go_unix_test.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build darwin dragonfly freebsd linux netbsd openbsd solaris package main_test diff --git a/src/cmd/go/internal/base/path.go b/src/cmd/go/internal/base/path.go index 7a51181c973..4d8715ef5fe 100644 --- a/src/cmd/go/internal/base/path.go +++ b/src/cmd/go/internal/base/path.go @@ -8,21 +8,27 @@ import ( "os" "path/filepath" "strings" + "sync" ) -func getwd() string { - wd, err := os.Getwd() - if err != nil { - Fatalf("cannot determine current directory: %v", err) - } - return wd -} +var cwd string +var cwdOnce sync.Once -var Cwd = getwd() +// Cwd returns the current working directory at the time of the first call. +func Cwd() string { + cwdOnce.Do(func() { + var err error + cwd, err = os.Getwd() + if err != nil { + Fatalf("cannot determine current directory: %v", err) + } + }) + return cwd +} // ShortPath returns an absolute or relative name for path, whatever is shorter. func ShortPath(path string) string { - if rel, err := filepath.Rel(Cwd, path); err == nil && len(rel) < len(path) { + if rel, err := filepath.Rel(Cwd(), path); err == nil && len(rel) < len(path) { return rel } return path @@ -32,10 +38,8 @@ func ShortPath(path string) string { // made relative to the current directory if they would be shorter. func RelPaths(paths []string) []string { var out []string - // TODO(rsc): Can this use Cwd from above? - pwd, _ := os.Getwd() for _, p := range paths { - rel, err := filepath.Rel(pwd, p) + rel, err := filepath.Rel(Cwd(), p) if err == nil && len(rel) < len(p) { p = rel } diff --git a/src/cmd/go/internal/base/signal_notunix.go b/src/cmd/go/internal/base/signal_notunix.go index 9e869b03ea8..5cc0b0f1011 100644 --- a/src/cmd/go/internal/base/signal_notunix.go +++ b/src/cmd/go/internal/base/signal_notunix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build plan9 || windows // +build plan9 windows package base diff --git a/src/cmd/go/internal/base/signal_unix.go b/src/cmd/go/internal/base/signal_unix.go index 342775a1182..cdc2658372e 100644 --- a/src/cmd/go/internal/base/signal_unix.go +++ b/src/cmd/go/internal/base/signal_unix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || js || linux || netbsd || openbsd || solaris // +build aix darwin dragonfly freebsd js linux netbsd openbsd solaris package base diff --git a/src/cmd/go/internal/bug/bug.go b/src/cmd/go/internal/bug/bug.go index 4aa08b4ff6e..307527c695c 100644 --- a/src/cmd/go/internal/bug/bug.go +++ b/src/cmd/go/internal/bug/bug.go @@ -20,6 +20,7 @@ import ( "cmd/go/internal/base" "cmd/go/internal/cfg" + "cmd/go/internal/envcmd" "cmd/go/internal/web" ) @@ -81,7 +82,7 @@ func printGoVersion(w io.Writer) { fmt.Fprintf(w, "### What version of Go are you using (`go version`)?\n\n") fmt.Fprintf(w, "
    \n")
     	fmt.Fprintf(w, "$ go version\n")
    -	printCmdOut(w, "", "go", "version")
    +	fmt.Fprintf(w, "go version %s %s/%s\n", runtime.Version(), runtime.GOOS, runtime.GOARCH)
     	fmt.Fprintf(w, "
    \n") fmt.Fprintf(w, "\n") } @@ -90,13 +91,20 @@ func printEnvDetails(w io.Writer) { fmt.Fprintf(w, "### What operating system and processor architecture are you using (`go env`)?\n\n") fmt.Fprintf(w, "
    go env Output
    \n")
     	fmt.Fprintf(w, "$ go env\n")
    -	printCmdOut(w, "", "go", "env")
    +	printGoEnv(w)
     	printGoDetails(w)
     	printOSDetails(w)
     	printCDetails(w)
     	fmt.Fprintf(w, "
    \n\n") } +func printGoEnv(w io.Writer) { + env := envcmd.MkEnv() + env = append(env, envcmd.ExtraEnvVars()...) + env = append(env, envcmd.ExtraEnvVarsCostly()...) + envcmd.PrintEnv(w, env) +} + func printGoDetails(w io.Writer) { printCmdOut(w, "GOROOT/bin/go version: ", filepath.Join(runtime.GOROOT(), "bin/go"), "version") printCmdOut(w, "GOROOT/bin/go tool compile -V: ", filepath.Join(runtime.GOROOT(), "bin/go"), "tool", "compile", "-V") diff --git a/src/cmd/go/internal/cache/cache.go b/src/cmd/go/internal/cache/cache.go index 41f921641d4..d592d704978 100644 --- a/src/cmd/go/internal/cache/cache.go +++ b/src/cmd/go/internal/cache/cache.go @@ -19,7 +19,7 @@ import ( "strings" "time" - "cmd/go/internal/renameio" + "cmd/go/internal/lockedfile" ) // An ActionID is a cache action key, the hash of a complete description of a @@ -294,10 +294,17 @@ func (c *Cache) Trim() { // We maintain in dir/trim.txt the time of the last completed cache trim. // If the cache has been trimmed recently enough, do nothing. // This is the common case. - data, _ := renameio.ReadFile(filepath.Join(c.dir, "trim.txt")) - t, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) - if err == nil && now.Sub(time.Unix(t, 0)) < trimInterval { - return + // If the trim file is corrupt, detected if the file can't be parsed, or the + // trim time is too far in the future, attempt the trim anyway. It's possible that + // the cache was full when the corruption happened. Attempting a trim on + // an empty cache is cheap, so there wouldn't be a big performance hit in that case. + if data, err := lockedfile.Read(filepath.Join(c.dir, "trim.txt")); err == nil { + if t, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64); err == nil { + lastTrim := time.Unix(t, 0) + if d := now.Sub(lastTrim); d < trimInterval && d > -mtimeInterval { + return + } + } } // Trim each of the 256 subdirectories. @@ -311,7 +318,11 @@ func (c *Cache) Trim() { // Ignore errors from here: if we don't write the complete timestamp, the // cache will appear older than it is, and we'll trim it again next time. - renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix())), 0666) + var b bytes.Buffer + fmt.Fprintf(&b, "%d", now.Unix()) + if err := lockedfile.Write(filepath.Join(c.dir, "trim.txt"), &b, 0666); err != nil { + return + } } // trimSubdir trims a single cache subdirectory. diff --git a/src/cmd/go/internal/cache/hash.go b/src/cmd/go/internal/cache/hash.go index e4bb2a34bb4..4f79c315002 100644 --- a/src/cmd/go/internal/cache/hash.go +++ b/src/cmd/go/internal/cache/hash.go @@ -12,6 +12,7 @@ import ( "io" "os" "runtime" + "strings" "sync" ) @@ -36,7 +37,22 @@ type Hash struct { // of other versions. This salt will result in additional ActionID files // in the cache, but not additional copies of the large output files, // which are still addressed by unsalted SHA256. -var hashSalt = []byte(runtime.Version()) +// +// We strip any GOEXPERIMENTs the go tool was built with from this +// version string on the assumption that they shouldn't affect go tool +// execution. This allows bootstrapping to converge faster: dist builds +// go_bootstrap without any experiments, so by stripping experiments +// go_bootstrap and the final go binary will use the same salt. +var hashSalt = []byte(stripExperiment(runtime.Version())) + +// stripExperiment strips any GOEXPERIMENT configuration from the Go +// version string. +func stripExperiment(version string) string { + if i := strings.Index(version, " X:"); i >= 0 { + return version[:i] + } + return version +} // Subkey returns an action ID corresponding to mixing a parent // action ID with a string description of the subkey. diff --git a/src/cmd/go/internal/cfg/cfg.go b/src/cmd/go/internal/cfg/cfg.go index 322247962f8..b47eb812b59 100644 --- a/src/cmd/go/internal/cfg/cfg.go +++ b/src/cmd/go/internal/cfg/cfg.go @@ -10,6 +10,7 @@ import ( "bytes" "fmt" "go/build" + "internal/buildcfg" "internal/cfg" "io" "os" @@ -19,8 +20,6 @@ import ( "sync" "cmd/go/internal/fsys" - - "cmd/internal/objabi" ) // These are general "build flags" used by build and other commands. @@ -51,8 +50,6 @@ var ( ModCacheRW bool // -modcacherw flag ModFile string // -modfile flag - Insecure bool // -insecure flag - CmdName string // "build", "install", "list", "mod tidy", etc. DebugActiongraph string // -debug-actiongraph flag (undocumented, unstable) @@ -254,12 +251,12 @@ var ( GOMODCACHE = envOr("GOMODCACHE", gopathDir("pkg/mod")) // Used in envcmd.MkEnv and build ID computations. - GOARM = envOr("GOARM", fmt.Sprint(objabi.GOARM)) - GO386 = envOr("GO386", objabi.GO386) - GOMIPS = envOr("GOMIPS", objabi.GOMIPS) - GOMIPS64 = envOr("GOMIPS64", objabi.GOMIPS64) - GOPPC64 = envOr("GOPPC64", fmt.Sprintf("%s%d", "power", objabi.GOPPC64)) - GOWASM = envOr("GOWASM", fmt.Sprint(objabi.GOWASM)) + GOARM = envOr("GOARM", fmt.Sprint(buildcfg.GOARM)) + GO386 = envOr("GO386", buildcfg.GO386) + GOMIPS = envOr("GOMIPS", buildcfg.GOMIPS) + GOMIPS64 = envOr("GOMIPS64", buildcfg.GOMIPS64) + GOPPC64 = envOr("GOPPC64", fmt.Sprintf("%s%d", "power", buildcfg.GOPPC64)) + GOWASM = envOr("GOWASM", fmt.Sprint(buildcfg.GOWASM)) GOPROXY = envOr("GOPROXY", "https://proxy.golang.org,direct") GOSUMDB = envOr("GOSUMDB", "sum.golang.org") diff --git a/src/cmd/go/internal/clean/clean.go b/src/cmd/go/internal/clean/clean.go index b1d40feb273..fd4cb205591 100644 --- a/src/cmd/go/internal/clean/clean.go +++ b/src/cmd/go/internal/clean/clean.go @@ -117,7 +117,7 @@ func runClean(ctx context.Context, cmd *base.Command, args []string) { } if cleanPkg { - for _, pkg := range load.PackagesAndErrors(ctx, args) { + for _, pkg := range load.PackagesAndErrors(ctx, load.PackageOpts{}, args) { clean(pkg) } } diff --git a/src/cmd/go/internal/envcmd/env.go b/src/cmd/go/internal/envcmd/env.go index 6937187522b..b30c37ab27c 100644 --- a/src/cmd/go/internal/envcmd/env.go +++ b/src/cmd/go/internal/envcmd/env.go @@ -10,6 +10,7 @@ import ( "encoding/json" "fmt" "go/build" + "io" "os" "path/filepath" "runtime" @@ -199,7 +200,7 @@ func runEnv(ctx context.Context, cmd *base.Command, args []string) { env := cfg.CmdEnv env = append(env, ExtraEnvVars()...) - if err := fsys.Init(base.Cwd); err != nil { + if err := fsys.Init(base.Cwd()); err != nil { base.Fatalf("go: %v", err) } @@ -347,27 +348,32 @@ func runEnv(ctx context.Context, cmd *base.Command, args []string) { return } + PrintEnv(os.Stdout, env) +} + +// PrintEnv prints the environment variables to w. +func PrintEnv(w io.Writer, env []cfg.EnvVar) { for _, e := range env { if e.Name != "TERM" { switch runtime.GOOS { default: - fmt.Printf("%s=\"%s\"\n", e.Name, e.Value) + fmt.Fprintf(w, "%s=\"%s\"\n", e.Name, e.Value) case "plan9": if strings.IndexByte(e.Value, '\x00') < 0 { - fmt.Printf("%s='%s'\n", e.Name, strings.ReplaceAll(e.Value, "'", "''")) + fmt.Fprintf(w, "%s='%s'\n", e.Name, strings.ReplaceAll(e.Value, "'", "''")) } else { v := strings.Split(e.Value, "\x00") - fmt.Printf("%s=(", e.Name) + fmt.Fprintf(w, "%s=(", e.Name) for x, s := range v { if x > 0 { - fmt.Printf(" ") + fmt.Fprintf(w, " ") } - fmt.Printf("%s", s) + fmt.Fprintf(w, "%s", s) } - fmt.Printf(")\n") + fmt.Fprintf(w, ")\n") } case "windows": - fmt.Printf("set %s=%s\n", e.Name, e.Value) + fmt.Fprintf(w, "set %s=%s\n", e.Name, e.Value) } } } @@ -428,7 +434,7 @@ func checkEnvWrite(key, val string) error { return fmt.Errorf("GOPATH entry is relative; must be absolute path: %q", val) } // Make sure CC and CXX are absolute paths - case "CC", "CXX": + case "CC", "CXX", "GOMODCACHE": if !filepath.IsAbs(val) && val != "" && val != filepath.Base(val) { return fmt.Errorf("%s entry is relative; must be absolute path: %q", key, val) } diff --git a/src/cmd/go/internal/fix/fix.go b/src/cmd/go/internal/fix/fix.go index c7588c66d3e..988d45e71cc 100644 --- a/src/cmd/go/internal/fix/fix.go +++ b/src/cmd/go/internal/fix/fix.go @@ -33,7 +33,7 @@ See also: go fmt, go vet. } func runFix(ctx context.Context, cmd *base.Command, args []string) { - pkgs := load.PackagesAndErrors(ctx, args) + pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{}, args) w := 0 for _, pkg := range pkgs { if pkg.Error != nil { diff --git a/src/cmd/go/internal/fmtcmd/fmt.go b/src/cmd/go/internal/fmtcmd/fmt.go index 6b98f0ccd31..8a040087539 100644 --- a/src/cmd/go/internal/fmtcmd/fmt.go +++ b/src/cmd/go/internal/fmtcmd/fmt.go @@ -65,7 +65,7 @@ func runFmt(ctx context.Context, cmd *base.Command, args []string) { } }() } - for _, pkg := range load.PackagesAndErrors(ctx, args) { + for _, pkg := range load.PackagesAndErrors(ctx, load.PackageOpts{}, args) { if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main { if !printed { fmt.Fprintf(os.Stderr, "go: not formatting packages in dependency modules\n") diff --git a/src/cmd/go/internal/fsys/fsys.go b/src/cmd/go/internal/fsys/fsys.go index 7b06c3c7f3a..0b806027e64 100644 --- a/src/cmd/go/internal/fsys/fsys.go +++ b/src/cmd/go/internal/fsys/fsys.go @@ -44,7 +44,7 @@ func (n *node) isDeleted() bool { // TODO(matloob): encapsulate these in an io/fs-like interface var overlay map[string]*node // path -> file or directory node -var cwd string // copy of base.Cwd to avoid dependency +var cwd string // copy of base.Cwd() to avoid dependency // Canonicalize a path for looking it up in the overlay. // Important: filepath.Join(cwd, path) doesn't always produce @@ -100,7 +100,7 @@ func Init(wd string) error { } func initFromJSON(overlayJSON OverlayJSON) error { - // Canonicalize the paths in in the overlay map. + // Canonicalize the paths in the overlay map. // Use reverseCanonicalized to check for collisions: // no two 'from' paths should canonicalize to the same path. overlay = make(map[string]*node) diff --git a/src/cmd/go/internal/generate/generate.go b/src/cmd/go/internal/generate/generate.go index a48311d51b0..80ea32b4284 100644 --- a/src/cmd/go/internal/generate/generate.go +++ b/src/cmd/go/internal/generate/generate.go @@ -161,8 +161,6 @@ func init() { } func runGenerate(ctx context.Context, cmd *base.Command, args []string) { - load.IgnoreImports = true - if generateRunFlag != "" { var err error generateRunRE, err = regexp.Compile(generateRunFlag) @@ -175,7 +173,8 @@ func runGenerate(ctx context.Context, cmd *base.Command, args []string) { // Even if the arguments are .go files, this loop suffices. printed := false - for _, pkg := range load.PackagesAndErrors(ctx, args) { + pkgOpts := load.PackageOpts{IgnoreImports: true} + for _, pkg := range load.PackagesAndErrors(ctx, pkgOpts, args) { if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main { if !printed { fmt.Fprintf(os.Stderr, "go: not generating in packages in dependency modules\n") @@ -334,6 +333,7 @@ func (g *Generator) setEnv() { "GOPACKAGE=" + g.pkg, "DOLLAR=" + "$", } + g.env = base.AppendPWD(g.env, g.dir) } // split breaks the line into words, evaluating quoted diff --git a/src/cmd/go/internal/get/get.go b/src/cmd/go/internal/get/get.go index 38ff3823f22..3c16dc3040f 100644 --- a/src/cmd/go/internal/get/get.go +++ b/src/cmd/go/internal/get/get.go @@ -26,7 +26,7 @@ import ( ) var CmdGet = &base.Command{ - UsageLine: "go get [-d] [-f] [-t] [-u] [-v] [-fix] [-insecure] [build flags] [packages]", + UsageLine: "go get [-d] [-f] [-t] [-u] [-v] [-fix] [build flags] [packages]", Short: "download and install packages and dependencies", Long: ` Get downloads the packages named by the import paths, along with their @@ -43,13 +43,6 @@ of the original. The -fix flag instructs get to run the fix tool on the downloaded packages before resolving dependencies or building the code. -The -insecure flag permits fetching from repositories and resolving -custom domains using insecure schemes such as HTTP. Use with caution. -This flag is deprecated and will be removed in a future version of go. -The GOINSECURE environment variable should be used instead, since it -provides control over which packages may be retrieved using an insecure -scheme. See 'go help environment' for details. - The -t flag instructs get to also download the packages required to build the tests for the specified packages. @@ -105,17 +98,17 @@ Usage: ` + CmdGet.UsageLine + ` } var ( - getD = CmdGet.Flag.Bool("d", false, "") - getF = CmdGet.Flag.Bool("f", false, "") - getT = CmdGet.Flag.Bool("t", false, "") - getU = CmdGet.Flag.Bool("u", false, "") - getFix = CmdGet.Flag.Bool("fix", false, "") + getD = CmdGet.Flag.Bool("d", false, "") + getF = CmdGet.Flag.Bool("f", false, "") + getT = CmdGet.Flag.Bool("t", false, "") + getU = CmdGet.Flag.Bool("u", false, "") + getFix = CmdGet.Flag.Bool("fix", false, "") + getInsecure = CmdGet.Flag.Bool("insecure", false, "") ) func init() { work.AddBuildFlags(CmdGet, work.OmitModFlag|work.OmitModCommonFlags) CmdGet.Run = runGet // break init loop - CmdGet.Flag.BoolVar(&cfg.Insecure, "insecure", cfg.Insecure, "") } func runGet(ctx context.Context, cmd *base.Command, args []string) { @@ -129,11 +122,11 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { if *getF && !*getU { base.Fatalf("go get: cannot use -f flag without -u") } - if cfg.Insecure { - fmt.Fprintf(os.Stderr, "go get: -insecure flag is deprecated; see 'go help get' for details\n") + if *getInsecure { + base.Fatalf("go get: -insecure flag is no longer supported; use GOINSECURE instead") } - // Disable any prompting for passwords by Git. + // Disable any prompting for passwords by Git itself. // Only has an effect for 2.3.0 or later, but avoiding // the prompt in earlier versions is just too hard. // If user has explicitly set GIT_TERMINAL_PROMPT=1, keep @@ -143,7 +136,10 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { os.Setenv("GIT_TERMINAL_PROMPT", "0") } - // Disable any ssh connection pooling by Git. + // Also disable prompting for passwords by the 'ssh' subprocess spawned by + // Git, because apparently GIT_TERMINAL_PROMPT isn't sufficient to do that. + // Adding '-o BatchMode=yes' should do the trick. + // // If a Git subprocess forks a child into the background to cache a new connection, // that child keeps stdout/stderr open. After the Git subprocess exits, // os /exec expects to be able to read from the stdout/stderr pipe @@ -157,7 +153,14 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { // assume they know what they are doing and don't step on it. // But default to turning off ControlMaster. if os.Getenv("GIT_SSH") == "" && os.Getenv("GIT_SSH_COMMAND") == "" { - os.Setenv("GIT_SSH_COMMAND", "ssh -o ControlMaster=no") + os.Setenv("GIT_SSH_COMMAND", "ssh -o ControlMaster=no -o BatchMode=yes") + } + + // And one more source of Git prompts: the Git Credential Manager Core for Windows. + // + // See https://github.com/microsoft/Git-Credential-Manager-Core/blob/master/docs/environment.md#gcm_interactive. + if os.Getenv("GCM_INTERACTIVE") == "" { + os.Setenv("GCM_INTERACTIVE", "never") } // Phase 1. Download/update. @@ -180,7 +183,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { // everything. load.ClearPackageCache() - pkgs := load.PackagesAndErrors(ctx, args) + pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{}, args) load.CheckPackageErrors(pkgs) // Phase 3. Install. @@ -255,9 +258,9 @@ func download(arg string, parent *load.Package, stk *load.ImportStack, mode int) load1 := func(path string, mode int) *load.Package { if parent == nil { mode := 0 // don't do module or vendor resolution - return load.LoadImport(context.TODO(), path, base.Cwd, nil, stk, nil, mode) + return load.LoadImport(context.TODO(), load.PackageOpts{}, path, base.Cwd(), nil, stk, nil, mode) } - return load.LoadImport(context.TODO(), path, parent.Dir, parent, stk, nil, mode|load.ResolveModule) + return load.LoadImport(context.TODO(), load.PackageOpts{}, path, parent.Dir, parent, stk, nil, mode|load.ResolveModule) } p := load1(arg, mode) @@ -431,11 +434,11 @@ func downloadPackage(p *load.Package) error { } importPrefix = importPrefix[:slash] } - if err := module.CheckImportPath(importPrefix); err != nil { + if err := checkImportPath(importPrefix); err != nil { return fmt.Errorf("%s: invalid import path: %v", p.ImportPath, err) } security := web.SecureOnly - if cfg.Insecure || module.MatchPrefixPatterns(cfg.GOINSECURE, importPrefix) { + if module.MatchPrefixPatterns(cfg.GOINSECURE, importPrefix) { security = web.Insecure } @@ -591,3 +594,31 @@ func selectTag(goVersion string, tags []string) (match string) { } return "" } + +// checkImportPath is like module.CheckImportPath, but it forbids leading dots +// in path elements. This can lead to 'go get' creating .git and other VCS +// directories in places we might run VCS tools later. +func checkImportPath(path string) error { + if err := module.CheckImportPath(path); err != nil { + return err + } + checkElem := func(elem string) error { + if elem[0] == '.' { + return fmt.Errorf("malformed import path %q: leading dot in path element", path) + } + return nil + } + elemStart := 0 + for i, r := range path { + if r == '/' { + if err := checkElem(path[elemStart:]); err != nil { + return err + } + elemStart = i + 1 + } + } + if err := checkElem(path[elemStart:]); err != nil { + return err + } + return nil +} diff --git a/src/cmd/go/internal/help/helpdoc.go b/src/cmd/go/internal/help/helpdoc.go index 57cee4ff96c..2f86e4195d9 100644 --- a/src/cmd/go/internal/help/helpdoc.go +++ b/src/cmd/go/internal/help/helpdoc.go @@ -251,7 +251,7 @@ For example, will result in the following requests: https://example.org/pkg/foo?go-get=1 (preferred) - http://example.org/pkg/foo?go-get=1 (fallback, only with -insecure) + http://example.org/pkg/foo?go-get=1 (fallback, only with use of correctly set GOINSECURE) If that page contains the meta tag @@ -517,9 +517,8 @@ General-purpose environment variables: Comma-separated list of glob patterns (in the syntax of Go's path.Match) of module path prefixes that should always be fetched in an insecure manner. Only applies to dependencies that are being fetched directly. - Unlike the -insecure flag on 'go get', GOINSECURE does not disable - checksum database validation. GOPRIVATE or GONOSUMDB may be used - to achieve that. + GOINSECURE does not disable checksum database validation. GOPRIVATE or + GONOSUMDB may be used to achieve that. GOOS The operating system for which to compile code. Examples are linux, darwin, windows, netbsd. diff --git a/src/cmd/go/internal/list/context.go b/src/cmd/go/internal/list/context.go index 68d691ebe2e..2dc63766b70 100644 --- a/src/cmd/go/internal/list/context.go +++ b/src/cmd/go/internal/list/context.go @@ -17,6 +17,7 @@ type Context struct { UseAllFiles bool `json:",omitempty"` // use files regardless of +build lines, file names Compiler string `json:",omitempty"` // compiler to assume when computing target paths BuildTags []string `json:",omitempty"` // build constraints to match in +build lines + ToolTags []string `json:",omitempty"` // toolchain-specific build constraints ReleaseTags []string `json:",omitempty"` // releases the current release is compatible with InstallSuffix string `json:",omitempty"` // suffix to use in the name of the install dir } @@ -31,6 +32,7 @@ func newContext(c *build.Context) *Context { UseAllFiles: c.UseAllFiles, Compiler: c.Compiler, BuildTags: c.BuildTags, + ToolTags: c.ToolTags, ReleaseTags: c.ReleaseTags, InstallSuffix: c.InstallSuffix, } diff --git a/src/cmd/go/internal/list/list.go b/src/cmd/go/internal/list/list.go index b4d82d9f8cc..53aaf311ec4 100644 --- a/src/cmd/go/internal/list/list.go +++ b/src/cmd/go/internal/list/list.go @@ -148,6 +148,7 @@ The template function "context" returns the build context, defined as: UseAllFiles bool // use files regardless of +build lines, file names Compiler string // compiler to assume when computing target paths BuildTags []string // build constraints to match in +build lines + ToolTags []string // toolchain-specific build constraints ReleaseTags []string // releases the current release is compatible with InstallSuffix string // suffix to use in the name of the install dir } @@ -335,7 +336,10 @@ var ( var nl = []byte{'\n'} func runList(ctx context.Context, cmd *base.Command, args []string) { - load.ModResolveTests = *listTest + if *listFmt != "" && *listJson == true { + base.Fatalf("go list -f cannot be used with -json") + } + work.BuildInit() out := newTrackingWriter(os.Stdout) defer out.w.Flush() @@ -344,7 +348,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { if *listM { *listFmt = "{{.String}}" if *listVersions { - *listFmt = `{{.Path}}{{range .Versions}} {{.}}{{end}}` + *listFmt = `{{.Path}}{{range .Versions}} {{.}}{{end}}{{if .Deprecated}} (deprecated){{end}}` } } else { *listFmt = "{{.ImportPath}}" @@ -423,7 +427,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { base.Fatalf("go list -m: not using modules") } - modload.LoadModFile(ctx) // Parses go.mod and sets cfg.BuildMod. + modload.LoadModFile(ctx) // Sets cfg.BuildMod as a side-effect. if cfg.BuildMod == "vendor" { const actionDisabledFormat = "go list -m: can't %s using the vendor directory\n\t(Use -mod=mod or -mod=readonly to bypass.)" @@ -447,13 +451,29 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { } } - mods := modload.ListModules(ctx, args, *listU, *listVersions, *listRetracted) + var mode modload.ListMode + if *listU { + mode |= modload.ListU | modload.ListRetracted | modload.ListDeprecated + } + if *listRetracted { + mode |= modload.ListRetracted + } + if *listVersions { + mode |= modload.ListVersions + if *listRetracted { + mode |= modload.ListRetractedVersions + } + } + mods, err := modload.ListModules(ctx, args, mode) if !*listE { for _, m := range mods { if m.Error != nil { base.Errorf("go list -m: %v", m.Error.Err) } } + if err != nil { + base.Errorf("go list -m: %v", err) + } base.ExitIfErrors() } for _, m := range mods { @@ -478,8 +498,11 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { base.Fatalf("go list -test cannot be used with -find") } - load.IgnoreImports = *listFind - pkgs := load.PackagesAndErrors(ctx, args) + pkgOpts := load.PackageOpts{ + IgnoreImports: *listFind, + ModResolveTests: *listTest, + } + pkgs := load.PackagesAndErrors(ctx, pkgOpts, args) if !*listE { w := 0 for _, pkg := range pkgs { @@ -516,9 +539,9 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { var pmain, ptest, pxtest *load.Package var err error if *listE { - pmain, ptest, pxtest = load.TestPackagesAndErrors(ctx, p, nil) + pmain, ptest, pxtest = load.TestPackagesAndErrors(ctx, pkgOpts, p, nil) } else { - pmain, ptest, pxtest, err = load.TestPackagesFor(ctx, p, nil) + pmain, ptest, pxtest, err = load.TestPackagesFor(ctx, pkgOpts, p, nil) if err != nil { base.Errorf("can't load test package: %s", err) } @@ -605,7 +628,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { old := make(map[string]string) for _, p := range all { if p.ForTest != "" { - new := p.ImportPath + " [" + p.ForTest + ".test]" + new := p.Desc() old[new] = p.ImportPath p.ImportPath = new } @@ -679,9 +702,14 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { } if len(args) > 0 { - listU := false - listVersions := false - rmods := modload.ListModules(ctx, args, listU, listVersions, *listRetracted) + var mode modload.ListMode + if *listRetracted { + mode |= modload.ListRetracted + } + rmods, err := modload.ListModules(ctx, args, mode) + if err != nil && !*listE { + base.Errorf("go list -retracted: %v", err) + } for i, arg := range args { rmod := rmods[i] for _, mod := range argToMods[arg] { diff --git a/src/cmd/go/internal/load/flag.go b/src/cmd/go/internal/load/flag.go index 7534e65f54c..440cb861344 100644 --- a/src/cmd/go/internal/load/flag.go +++ b/src/cmd/go/internal/load/flag.go @@ -34,7 +34,7 @@ type ppfValue struct { // Set is called each time the flag is encountered on the command line. func (f *PerPackageFlag) Set(v string) error { - return f.set(v, base.Cwd) + return f.set(v, base.Cwd()) } // set is the implementation of Set, taking a cwd (current working directory) for easier testing. diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go index 8b12faf4cd2..3c7cd44ee33 100644 --- a/src/cmd/go/internal/load/pkg.go +++ b/src/cmd/go/internal/load/pkg.go @@ -14,6 +14,7 @@ import ( "go/build" "go/scanner" "go/token" + "internal/goroot" "io/fs" "os" "path" @@ -29,6 +30,8 @@ import ( "cmd/go/internal/base" "cmd/go/internal/cfg" "cmd/go/internal/fsys" + "cmd/go/internal/imports" + "cmd/go/internal/modfetch" "cmd/go/internal/modinfo" "cmd/go/internal/modload" "cmd/go/internal/par" @@ -37,11 +40,10 @@ import ( "cmd/go/internal/trace" "cmd/internal/sys" + "golang.org/x/mod/modfile" "golang.org/x/mod/module" ) -var IgnoreImports bool // control whether we ignore imports in packages - // A Package describes a single package found in a directory. type Package struct { PackagePublic // visible in 'go list' @@ -85,6 +87,7 @@ type PackagePublic struct { CgoFiles []string `json:",omitempty"` // .go source files that import "C" CompiledGoFiles []string `json:",omitempty"` // .go output from running cgo on CgoFiles IgnoredGoFiles []string `json:",omitempty"` // .go source files ignored due to build constraints + InvalidGoFiles []string `json:",omitempty"` // .go source files with detected problems (parse error, wrong package name, and so on) IgnoredOtherFiles []string `json:",omitempty"` // non-.go source files ignored due to build constraints CFiles []string `json:",omitempty"` // .c source files CXXFiles []string `json:",omitempty"` // .cc, .cpp and .cxx source files @@ -142,6 +145,7 @@ func (p *Package) AllFiles() []string { p.CgoFiles, // no p.CompiledGoFiles, because they are from GoFiles or generated by us p.IgnoredGoFiles, + // no p.InvalidGoFiles, because they are from GoFiles p.IgnoredOtherFiles, p.CFiles, p.CXXFiles, @@ -206,6 +210,7 @@ type PackageInternal struct { BuildInfo string // add this info to package main TestmainGo *[]byte // content for _testmain.go Embed map[string][]string // //go:embed comment mapping + OrigImportPath string // original import path before adding '_test' suffix Asmflags []string // -asmflags for this package Gcflags []string // -gcflags for this package @@ -343,7 +348,7 @@ type CoverVar struct { Var string // name of count struct } -func (p *Package) copyBuild(pp *build.Package) { +func (p *Package) copyBuild(opts PackageOpts, pp *build.Package) { p.Internal.Build = pp if pp.PkgTargetRoot != "" && cfg.BuildPkgdir != "" { @@ -368,6 +373,7 @@ func (p *Package) copyBuild(pp *build.Package) { p.GoFiles = pp.GoFiles p.CgoFiles = pp.CgoFiles p.IgnoredGoFiles = pp.IgnoredGoFiles + p.InvalidGoFiles = pp.InvalidGoFiles p.IgnoredOtherFiles = pp.IgnoredOtherFiles p.CFiles = pp.CFiles p.CXXFiles = pp.CXXFiles @@ -392,7 +398,7 @@ func (p *Package) copyBuild(pp *build.Package) { p.TestImports = pp.TestImports p.XTestGoFiles = pp.XTestGoFiles p.XTestImports = pp.XTestImports - if IgnoreImports { + if opts.IgnoreImports { p.Imports = nil p.Internal.RawImports = nil p.TestImports = nil @@ -401,6 +407,7 @@ func (p *Package) copyBuild(pp *build.Package) { p.EmbedPatterns = pp.EmbedPatterns p.TestEmbedPatterns = pp.TestEmbedPatterns p.XTestEmbedPatterns = pp.XTestEmbedPatterns + p.Internal.OrigImportPath = pp.ImportPath } // A PackageError describes an error loading information about a package. @@ -476,8 +483,10 @@ type ImportPathError interface { var ( _ ImportPathError = (*importError)(nil) + _ ImportPathError = (*mainPackageError)(nil) _ ImportPathError = (*modload.ImportMissingError)(nil) _ ImportPathError = (*modload.ImportMissingSumError)(nil) + _ ImportPathError = (*modload.DirectImportFromImplicitDependencyError)(nil) ) type importError struct { @@ -597,7 +606,7 @@ func ReloadPackageNoFlags(arg string, stk *ImportStack) *Package { }) packageDataCache.Delete(p.ImportPath) } - return LoadImport(context.TODO(), arg, base.Cwd, nil, stk, nil, 0) + return LoadImport(context.TODO(), PackageOpts{}, arg, base.Cwd(), nil, stk, nil, 0) } // dirToImportPath returns the pseudo-import path we use for a package @@ -649,11 +658,11 @@ const ( // LoadImport does not set tool flags and should only be used by // this package, as part of a bigger load operation, and by GOPATH-based "go get". // TODO(rsc): When GOPATH-based "go get" is removed, unexport this function. -func LoadImport(ctx context.Context, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) *Package { - return loadImport(ctx, nil, path, srcDir, parent, stk, importPos, mode) +func LoadImport(ctx context.Context, opts PackageOpts, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) *Package { + return loadImport(ctx, opts, nil, path, srcDir, parent, stk, importPos, mode) } -func loadImport(ctx context.Context, pre *preload, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) *Package { +func loadImport(ctx context.Context, opts PackageOpts, pre *preload, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) *Package { if path == "" { panic("LoadImport called with empty package path") } @@ -665,25 +674,30 @@ func loadImport(ctx context.Context, pre *preload, path, srcDir string, parent * parentRoot = parent.Root parentIsStd = parent.Standard } - bp, loaded, err := loadPackageData(path, parentPath, srcDir, parentRoot, parentIsStd, mode) - if loaded && pre != nil && !IgnoreImports { - pre.preloadImports(bp.Imports, bp) + bp, loaded, err := loadPackageData(ctx, path, parentPath, srcDir, parentRoot, parentIsStd, mode) + if loaded && pre != nil && !opts.IgnoreImports { + pre.preloadImports(ctx, opts, bp.Imports, bp) } if bp == nil { + p := &Package{ + PackagePublic: PackagePublic{ + ImportPath: path, + Incomplete: true, + }, + } if importErr, ok := err.(ImportPathError); !ok || importErr.ImportPath() != path { - // Only add path to the error's import stack if it's not already present on the error. + // Only add path to the error's import stack if it's not already present + // in the error. + // + // TODO(bcmills): setLoadPackageDataError itself has a similar Push / Pop + // sequence that empirically doesn't trigger for these errors, guarded by + // a somewhat complex condition. Figure out how to generalize that + // condition and eliminate the explicit calls here. stk.Push(path) defer stk.Pop() } - return &Package{ - PackagePublic: PackagePublic{ - ImportPath: path, - Error: &PackageError{ - ImportStack: stk.Copy(), - Err: err, - }, - }, - } + p.setLoadPackageDataError(err, path, stk, nil) + return p } importPath := bp.ImportPath @@ -701,7 +715,7 @@ func loadImport(ctx context.Context, pre *preload, path, srcDir string, parent * // Load package. // loadPackageData may return bp != nil even if an error occurs, // in order to return partial information. - p.load(ctx, path, stk, importPos, bp, err) + p.load(ctx, opts, path, stk, importPos, bp, err) if !cfg.ModulesEnabled && path != cleanImport(path) { p.Error = &PackageError{ @@ -714,7 +728,7 @@ func loadImport(ctx context.Context, pre *preload, path, srcDir string, parent * } // Checked on every import because the rules depend on the code doing the importing. - if perr := disallowInternal(srcDir, parent, parentPath, p, stk); perr != p { + if perr := disallowInternal(ctx, srcDir, parent, parentPath, p, stk); perr != p { perr.Error.setPos(importPos) return perr } @@ -763,7 +777,7 @@ func loadImport(ctx context.Context, pre *preload, path, srcDir string, parent * // // loadPackageData returns a boolean, loaded, which is true if this is the // first time the package was loaded. Callers may preload imports in this case. -func loadPackageData(path, parentPath, parentDir, parentRoot string, parentIsStd bool, mode int) (bp *build.Package, loaded bool, err error) { +func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoot string, parentIsStd bool, mode int) (bp *build.Package, loaded bool, err error) { if path == "" { panic("loadPackageData called with empty package path") } @@ -836,10 +850,31 @@ func loadPackageData(path, parentPath, parentDir, parentRoot string, parentIsStd } data.p, data.err = cfg.BuildContext.ImportDir(r.dir, buildMode) if data.p.Root == "" && cfg.ModulesEnabled { - if info := modload.PackageModuleInfo(path); info != nil { + if info := modload.PackageModuleInfo(ctx, path); info != nil { data.p.Root = info.Dir } } + if r.err != nil { + if data.err != nil { + // ImportDir gave us one error, and the module loader gave us another. + // We arbitrarily choose to keep the error from ImportDir because + // that's what our tests already expect, and it seems to provide a bit + // more detail in most cases. + } else if errors.Is(r.err, imports.ErrNoGo) { + // ImportDir said there were files in the package, but the module + // loader said there weren't. Which one is right? + // Without this special-case hack, the TestScript/test_vet case fails + // on the vetfail/p1 package (added in CL 83955). + // Apparently, imports.ShouldBuild biases toward rejecting files + // with invalid build constraints, whereas ImportDir biases toward + // accepting them. + // + // TODO(#41410: Figure out how this actually ought to work and fix + // this mess. + } else { + data.err = r.err + } + } } else if r.err != nil { data.p = new(build.Package) data.err = r.err @@ -950,7 +985,7 @@ func newPreload() *preload { // preloadMatches loads data for package paths matched by patterns. // When preloadMatches returns, some packages may not be loaded yet, but // loadPackageData and loadImport are always safe to call. -func (pre *preload) preloadMatches(matches []*search.Match) { +func (pre *preload) preloadMatches(ctx context.Context, opts PackageOpts, matches []*search.Match) { for _, m := range matches { for _, pkg := range m.Pkgs { select { @@ -959,10 +994,10 @@ func (pre *preload) preloadMatches(matches []*search.Match) { case pre.sema <- struct{}{}: go func(pkg string) { mode := 0 // don't use vendoring or module import resolution - bp, loaded, err := loadPackageData(pkg, "", base.Cwd, "", false, mode) + bp, loaded, err := loadPackageData(ctx, pkg, "", base.Cwd(), "", false, mode) <-pre.sema - if bp != nil && loaded && err == nil && !IgnoreImports { - pre.preloadImports(bp.Imports, bp) + if bp != nil && loaded && err == nil && !opts.IgnoreImports { + pre.preloadImports(ctx, opts, bp.Imports, bp) } }(pkg) } @@ -973,7 +1008,7 @@ func (pre *preload) preloadMatches(matches []*search.Match) { // preloadImports queues a list of imports for preloading. // When preloadImports returns, some packages may not be loaded yet, // but loadPackageData and loadImport are always safe to call. -func (pre *preload) preloadImports(imports []string, parent *build.Package) { +func (pre *preload) preloadImports(ctx context.Context, opts PackageOpts, imports []string, parent *build.Package) { parentIsStd := parent.Goroot && parent.ImportPath != "" && search.IsStandardImportPath(parent.ImportPath) for _, path := range imports { if path == "C" || path == "unsafe" { @@ -984,10 +1019,10 @@ func (pre *preload) preloadImports(imports []string, parent *build.Package) { return case pre.sema <- struct{}{}: go func(path string) { - bp, loaded, err := loadPackageData(path, parent.ImportPath, parent.Dir, parent.Root, parentIsStd, ResolveImport) + bp, loaded, err := loadPackageData(ctx, path, parent.ImportPath, parent.Dir, parent.Root, parentIsStd, ResolveImport) <-pre.sema - if bp != nil && loaded && err == nil && !IgnoreImports { - pre.preloadImports(bp.Imports, bp) + if bp != nil && loaded && err == nil && !opts.IgnoreImports { + pre.preloadImports(ctx, opts, bp.Imports, bp) } }(path) } @@ -1323,6 +1358,11 @@ func reusePackage(p *Package, stk *ImportStack) *Package { Err: errors.New("import cycle not allowed"), IsImportCycle: true, } + } else if !p.Error.IsImportCycle { + // If the error is already set, but it does not indicate that + // we are in an import cycle, set IsImportCycle so that we don't + // end up stuck in a loop down the road. + p.Error.IsImportCycle = true } p.Incomplete = true } @@ -1338,7 +1378,7 @@ func reusePackage(p *Package, stk *ImportStack) *Package { // is allowed to import p. // If the import is allowed, disallowInternal returns the original package p. // If not, it returns a new package containing just an appropriate error. -func disallowInternal(srcDir string, importer *Package, importerPath string, p *Package, stk *ImportStack) *Package { +func disallowInternal(ctx context.Context, srcDir string, importer *Package, importerPath string, p *Package, stk *ImportStack) *Package { // golang.org/s/go14internal: // An import of a path containing the element “internal” // is disallowed if the importing code is outside the tree @@ -1410,7 +1450,7 @@ func disallowInternal(srcDir string, importer *Package, importerPath string, p * // directory containing them. // If the directory is outside the main module, this will resolve to ".", // which is not a prefix of any valid module. - importerPath = modload.DirImportPath(importer.Dir) + importerPath = modload.DirImportPath(ctx, importer.Dir) } parentOfInternal := p.ImportPath[:i] if str.HasPathPrefix(importerPath, parentOfInternal) { @@ -1632,8 +1672,8 @@ func (p *Package) DefaultExecName() string { // load populates p using information from bp, err, which should // be the result of calling build.Context.Import. // stk contains the import stack, not including path itself. -func (p *Package) load(ctx context.Context, path string, stk *ImportStack, importPos []token.Position, bp *build.Package, err error) { - p.copyBuild(bp) +func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk *ImportStack, importPos []token.Position, bp *build.Package, err error) { + p.copyBuild(opts, bp) // The localPrefix is the path we interpret ./ imports relative to. // Synthesized main packages sometimes override this. @@ -1809,6 +1849,14 @@ func (p *Package) load(ctx context.Context, path string, stk *ImportStack, impor stk.Push(path) defer stk.Pop() + pkgPath := p.ImportPath + if p.Internal.CmdlineFiles { + pkgPath = "command-line-arguments" + } + if cfg.ModulesEnabled { + p.Module = modload.PackageModuleInfo(ctx, pkgPath) + } + p.EmbedFiles, p.Internal.Embed, err = resolveEmbed(p.Dir, p.EmbedPatterns) if err != nil { p.Incomplete = true @@ -1852,7 +1900,7 @@ func (p *Package) load(ctx context.Context, path string, stk *ImportStack, impor if path == "C" { continue } - p1 := LoadImport(ctx, path, p.Dir, p, stk, p.Internal.Build.ImportPos[path], ResolveImport) + p1 := LoadImport(ctx, opts, path, p.Dir, p, stk, p.Internal.Build.ImportPos[path], ResolveImport) path = p1.ImportPath importPaths[i] = path @@ -1868,6 +1916,10 @@ func (p *Package) load(ctx context.Context, path string, stk *ImportStack, impor p.Internal.Imports = imports p.collectDeps() + if cfg.ModulesEnabled && p.Error == nil && p.Name == "main" && len(p.DepsErrors) == 0 { + p.Internal.BuildInfo = modload.PackageBuildInfo(pkgPath, p.Deps) + } + // unsafe is a fake package. if p.Standard && (p.ImportPath == "unsafe" || cfg.BuildContext.Compiler == "gccgo") { p.Target = "" @@ -1907,17 +1959,6 @@ func (p *Package) load(ctx context.Context, path string, stk *ImportStack, impor setError(fmt.Errorf("Fortran source files not allowed when not using cgo or SWIG: %s", strings.Join(p.FFiles, " "))) return } - - if cfg.ModulesEnabled && p.Error == nil { - mainPath := p.ImportPath - if p.Internal.CmdlineFiles { - mainPath = "command-line-arguments" - } - p.Module = modload.PackageModuleInfo(mainPath) - if p.Name == "main" && len(p.DepsErrors) == 0 { - p.Internal.BuildInfo = modload.PackageBuildInfo(mainPath, p.Deps) - } - } } // An EmbedError indicates a problem with a go:embed directive. @@ -2299,7 +2340,7 @@ func PackageList(roots []*Package) []*Package { // TestPackageList returns the list of packages in the dag rooted at roots // as visited in a depth-first post-order traversal, including the test // imports of the roots. This ignores errors in test packages. -func TestPackageList(ctx context.Context, roots []*Package) []*Package { +func TestPackageList(ctx context.Context, opts PackageOpts, roots []*Package) []*Package { seen := map[*Package]bool{} all := []*Package{} var walk func(*Package) @@ -2315,7 +2356,7 @@ func TestPackageList(ctx context.Context, roots []*Package) []*Package { } walkTest := func(root *Package, path string) { var stk ImportStack - p1 := LoadImport(ctx, path, root.Dir, root, &stk, root.Internal.Build.TestImportPos[path], ResolveImport) + p1 := LoadImport(ctx, opts, path, root.Dir, root, &stk, root.Internal.Build.TestImportPos[path], ResolveImport) if p1.Error == nil { walk(p1) } @@ -2338,22 +2379,33 @@ func TestPackageList(ctx context.Context, roots []*Package) []*Package { // TODO(jayconrod): delete this function and set flags automatically // in LoadImport instead. func LoadImportWithFlags(path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) *Package { - p := LoadImport(context.TODO(), path, srcDir, parent, stk, importPos, mode) + p := LoadImport(context.TODO(), PackageOpts{}, path, srcDir, parent, stk, importPos, mode) setToolFlags(p) return p } -// ModResolveTests indicates whether calls to the module loader should also -// resolve test dependencies of the requested packages. -// -// If ModResolveTests is true, then the module loader needs to resolve test -// dependencies at the same time as packages; otherwise, the test dependencies -// of those packages could be missing, and resolving those missing dependencies -// could change the selected versions of modules that provide other packages. -// -// TODO(#40775): Change this from a global variable to an explicit function -// argument where needed. -var ModResolveTests bool +// PackageOpts control the behavior of PackagesAndErrors and other package +// loading functions. +type PackageOpts struct { + // IgnoreImports controls whether we ignore imports when loading packages. + IgnoreImports bool + + // ModResolveTests indicates whether calls to the module loader should also + // resolve test dependencies of the requested packages. + // + // If ModResolveTests is true, then the module loader needs to resolve test + // dependencies at the same time as packages; otherwise, the test dependencies + // of those packages could be missing, and resolving those missing dependencies + // could change the selected versions of modules that provide other packages. + ModResolveTests bool + + // MainOnly is true if the caller only wants to load main packages. + // For a literal argument matching a non-main package, a stub may be returned + // with an error. For a non-literal argument (with "..."), non-main packages + // are not be matched, and their dependencies may not be loaded. A warning + // may be printed for non-literal arguments that match no main packages. + MainOnly bool +} // PackagesAndErrors returns the packages named by the command line arguments // 'patterns'. If a named package cannot be loaded, PackagesAndErrors returns @@ -2363,7 +2415,7 @@ var ModResolveTests bool // // To obtain a flat list of packages, use PackageList. // To report errors loading packages, use ReportPackageErrors. -func PackagesAndErrors(ctx context.Context, patterns []string) []*Package { +func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) []*Package { ctx, span := trace.StartSpan(ctx, "load.PackagesAndErrors") defer span.Done() @@ -2375,19 +2427,19 @@ func PackagesAndErrors(ctx context.Context, patterns []string) []*Package { // We need to test whether the path is an actual Go file and not a // package path or pattern ending in '.go' (see golang.org/issue/34653). if fi, err := fsys.Stat(p); err == nil && !fi.IsDir() { - return []*Package{GoFilesPackage(ctx, patterns)} + return []*Package{GoFilesPackage(ctx, opts, patterns)} } } } var matches []*search.Match if modload.Init(); cfg.ModulesEnabled { - loadOpts := modload.PackageOpts{ + modOpts := modload.PackageOpts{ ResolveMissingImports: true, - LoadTests: ModResolveTests, - SilenceErrors: true, + LoadTests: opts.ModResolveTests, + SilencePackageErrors: true, } - matches, _ = modload.LoadPackages(ctx, loadOpts, patterns...) + matches, _ = modload.LoadPackages(ctx, modOpts, patterns...) } else { matches = search.ImportPaths(patterns) } @@ -2400,14 +2452,14 @@ func PackagesAndErrors(ctx context.Context, patterns []string) []*Package { pre := newPreload() defer pre.flush() - pre.preloadMatches(matches) + pre.preloadMatches(ctx, opts, matches) for _, m := range matches { for _, pkg := range m.Pkgs { if pkg == "" { panic(fmt.Sprintf("ImportPaths returned empty package for pattern %s", m.Pattern())) } - p := loadImport(ctx, pre, pkg, base.Cwd, nil, &stk, nil, 0) + p := loadImport(ctx, opts, pre, pkg, base.Cwd(), nil, &stk, nil, 0) p.Match = append(p.Match, m.Pattern()) p.Internal.CmdlinePkg = true if m.IsLiteral() { @@ -2443,6 +2495,10 @@ func PackagesAndErrors(ctx context.Context, patterns []string) []*Package { } } + if opts.MainOnly { + pkgs = mainPackagesOnly(pkgs, matches) + } + // Now that CmdlinePkg is set correctly, // compute the effective flags for all loaded packages // (not just the ones matching the patterns but also @@ -2491,6 +2547,80 @@ func CheckPackageErrors(pkgs []*Package) { base.ExitIfErrors() } +// mainPackagesOnly filters out non-main packages matched only by arguments +// containing "..." and returns the remaining main packages. +// +// Packages with missing, invalid, or ambiguous names may be treated as +// possibly-main packages. +// +// mainPackagesOnly sets a non-main package's Error field and returns it if it +// is named by a literal argument. +// +// mainPackagesOnly prints warnings for non-literal arguments that only match +// non-main packages. +func mainPackagesOnly(pkgs []*Package, matches []*search.Match) []*Package { + treatAsMain := map[string]bool{} + for _, m := range matches { + if m.IsLiteral() { + for _, path := range m.Pkgs { + treatAsMain[path] = true + } + } + } + + var mains []*Package + for _, pkg := range pkgs { + if pkg.Name == "main" { + treatAsMain[pkg.ImportPath] = true + mains = append(mains, pkg) + continue + } + + if len(pkg.InvalidGoFiles) > 0 { // TODO(#45999): && pkg.Name == "", but currently go/build sets pkg.Name arbitrarily if it is ambiguous. + // The package has (or may have) conflicting names, and we can't easily + // tell whether one of them is "main". So assume that it could be, and + // report an error for the package. + treatAsMain[pkg.ImportPath] = true + } + if treatAsMain[pkg.ImportPath] { + if pkg.Error == nil { + pkg.Error = &PackageError{Err: &mainPackageError{importPath: pkg.ImportPath}} + } + mains = append(mains, pkg) + } + } + + for _, m := range matches { + if m.IsLiteral() || len(m.Pkgs) == 0 { + continue + } + foundMain := false + for _, path := range m.Pkgs { + if treatAsMain[path] { + foundMain = true + break + } + } + if !foundMain { + fmt.Fprintf(os.Stderr, "go: warning: %q matched only non-main packages\n", m.Pattern()) + } + } + + return mains +} + +type mainPackageError struct { + importPath string +} + +func (e *mainPackageError) Error() string { + return fmt.Sprintf("package %s is not a main package", e.importPath) +} + +func (e *mainPackageError) ImportPath() string { + return e.importPath +} + func setToolFlags(pkgs ...*Package) { for _, p := range PackageList(pkgs) { p.Internal.Asmflags = BuildAsmflags.For(p) @@ -2503,7 +2633,7 @@ func setToolFlags(pkgs ...*Package) { // GoFilesPackage creates a package for building a collection of Go files // (typically named on the command line). The target is named p.a for // package p or named after the first Go file for package main. -func GoFilesPackage(ctx context.Context, gofiles []string) *Package { +func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Package { modload.Init() for _, f := range gofiles { @@ -2556,7 +2686,7 @@ func GoFilesPackage(ctx context.Context, gofiles []string) *Package { var err error if dir == "" { - dir = base.Cwd + dir = base.Cwd() } dir, err = filepath.Abs(dir) if err != nil { @@ -2567,7 +2697,7 @@ func GoFilesPackage(ctx context.Context, gofiles []string) *Package { pkg := new(Package) pkg.Internal.Local = true pkg.Internal.CmdlineFiles = true - pkg.load(ctx, "command-line-arguments", &stk, nil, bp, err) + pkg.load(ctx, opts, "command-line-arguments", &stk, nil, bp, err) pkg.Internal.LocalPrefix = dirToImportPath(dir) pkg.ImportPath = "command-line-arguments" pkg.Target = "" @@ -2583,7 +2713,138 @@ func GoFilesPackage(ctx context.Context, gofiles []string) *Package { } } + if opts.MainOnly && pkg.Name != "main" && pkg.Error == nil { + pkg.Error = &PackageError{Err: &mainPackageError{importPath: pkg.ImportPath}} + } setToolFlags(pkg) return pkg } + +// PackagesAndErrorsOutsideModule is like PackagesAndErrors but runs in +// module-aware mode and ignores the go.mod file in the current directory or any +// parent directory, if there is one. This is used in the implementation of 'go +// install pkg@version' and other commands that support similar forms. +// +// modload.ForceUseModules must be true, and modload.RootMode must be NoRoot +// before calling this function. +// +// PackagesAndErrorsOutsideModule imposes several constraints to avoid +// ambiguity. All arguments must have the same version suffix (not just a suffix +// that resolves to the same version). They must refer to packages in the same +// module, which must not be std or cmd. That module is not considered the main +// module, but its go.mod file (if it has one) must not contain directives that +// would cause it to be interpreted differently if it were the main module +// (replace, exclude). +func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args []string) ([]*Package, error) { + if !modload.ForceUseModules { + panic("modload.ForceUseModules must be true") + } + if modload.RootMode != modload.NoRoot { + panic("modload.RootMode must be NoRoot") + } + + // Check that the arguments satisfy syntactic constraints. + var version string + for _, arg := range args { + if i := strings.Index(arg, "@"); i >= 0 { + version = arg[i+1:] + if version == "" { + return nil, fmt.Errorf("%s: version must not be empty", arg) + } + break + } + } + patterns := make([]string, len(args)) + for i, arg := range args { + if !strings.HasSuffix(arg, "@"+version) { + return nil, fmt.Errorf("%s: all arguments must have the same version (@%s)", arg, version) + } + p := arg[:len(arg)-len(version)-1] + switch { + case build.IsLocalImport(p): + return nil, fmt.Errorf("%s: argument must be a package path, not a relative path", arg) + case filepath.IsAbs(p): + return nil, fmt.Errorf("%s: argument must be a package path, not an absolute path", arg) + case search.IsMetaPackage(p): + return nil, fmt.Errorf("%s: argument must be a package path, not a meta-package", arg) + case path.Clean(p) != p: + return nil, fmt.Errorf("%s: argument must be a clean package path", arg) + case !strings.Contains(p, "...") && search.IsStandardImportPath(p) && goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, p): + return nil, fmt.Errorf("%s: argument must not be a package in the standard library", arg) + default: + patterns[i] = p + } + } + + // Query the module providing the first argument, load its go.mod file, and + // check that it doesn't contain directives that would cause it to be + // interpreted differently if it were the main module. + // + // If multiple modules match the first argument, accept the longest match + // (first result). It's possible this module won't provide packages named by + // later arguments, and other modules would. Let's not try to be too + // magical though. + allowed := modload.CheckAllowed + if modload.IsRevisionQuery(version) { + // Don't check for retractions if a specific revision is requested. + allowed = nil + } + noneSelected := func(path string) (version string) { return "none" } + qrs, err := modload.QueryPackages(ctx, patterns[0], version, noneSelected, allowed) + if err != nil { + return nil, fmt.Errorf("%s: %w", args[0], err) + } + rootMod := qrs[0].Mod + data, err := modfetch.GoMod(rootMod.Path, rootMod.Version) + if err != nil { + return nil, fmt.Errorf("%s: %w", args[0], err) + } + f, err := modfile.Parse("go.mod", data, nil) + if err != nil { + return nil, fmt.Errorf("%s (in %s): %w", args[0], rootMod, err) + } + directiveFmt := "%s (in %s):\n" + + "\tThe go.mod file for the module providing named packages contains one or\n" + + "\tmore %s directives. It must not contain directives that would cause\n" + + "\tit to be interpreted differently than if it were the main module." + if len(f.Replace) > 0 { + return nil, fmt.Errorf(directiveFmt, args[0], rootMod, "replace") + } + if len(f.Exclude) > 0 { + return nil, fmt.Errorf(directiveFmt, args[0], rootMod, "exclude") + } + + // Since we are in NoRoot mode, the build list initially contains only + // the dummy command-line-arguments module. Add a requirement on the + // module that provides the packages named on the command line. + if _, err := modload.EditBuildList(ctx, nil, []module.Version{rootMod}); err != nil { + return nil, fmt.Errorf("%s: %w", args[0], err) + } + + // Load packages for all arguments. + pkgs := PackagesAndErrors(ctx, opts, patterns) + + // Check that named packages are all provided by the same module. + for _, pkg := range pkgs { + var pkgErr error + if pkg.Module == nil { + // Packages in std, cmd, and their vendored dependencies + // don't have this field set. + pkgErr = fmt.Errorf("package %s not provided by module %s", pkg.ImportPath, rootMod) + } else if pkg.Module.Path != rootMod.Path || pkg.Module.Version != rootMod.Version { + pkgErr = fmt.Errorf("package %s provided by module %s@%s\n\tAll packages must be provided by the same module (%s).", pkg.ImportPath, pkg.Module.Path, pkg.Module.Version, rootMod) + } + if pkgErr != nil && pkg.Error == nil { + pkg.Error = &PackageError{Err: pkgErr} + } + } + + matchers := make([]func(string) bool, len(patterns)) + for i, p := range patterns { + if strings.Contains(p, "...") { + matchers[i] = search.MatchPattern(p) + } + } + return pkgs, nil +} diff --git a/src/cmd/go/internal/load/test.go b/src/cmd/go/internal/load/test.go index eb8aef3ee28..6baa1db14f0 100644 --- a/src/cmd/go/internal/load/test.go +++ b/src/cmd/go/internal/load/test.go @@ -21,6 +21,7 @@ import ( "unicode" "unicode/utf8" + "cmd/go/internal/fsys" "cmd/go/internal/str" "cmd/go/internal/trace" ) @@ -45,8 +46,8 @@ type TestCover struct { // TestPackagesFor is like TestPackagesAndErrors but it returns // an error if the test packages or their dependencies have errors. // Only test packages without errors are returned. -func TestPackagesFor(ctx context.Context, p *Package, cover *TestCover) (pmain, ptest, pxtest *Package, err error) { - pmain, ptest, pxtest = TestPackagesAndErrors(ctx, p, cover) +func TestPackagesFor(ctx context.Context, opts PackageOpts, p *Package, cover *TestCover) (pmain, ptest, pxtest *Package, err error) { + pmain, ptest, pxtest = TestPackagesAndErrors(ctx, opts, p, cover) for _, p1 := range []*Package{ptest, pxtest, pmain} { if p1 == nil { // pxtest may be nil @@ -92,7 +93,7 @@ func TestPackagesFor(ctx context.Context, p *Package, cover *TestCover) (pmain, // // The caller is expected to have checked that len(p.TestGoFiles)+len(p.XTestGoFiles) > 0, // or else there's no point in any of this. -func TestPackagesAndErrors(ctx context.Context, p *Package, cover *TestCover) (pmain, ptest, pxtest *Package) { +func TestPackagesAndErrors(ctx context.Context, opts PackageOpts, p *Package, cover *TestCover) (pmain, ptest, pxtest *Package) { ctx, span := trace.StartSpan(ctx, "load.TestPackagesAndErrors") defer span.Done() @@ -100,7 +101,7 @@ func TestPackagesAndErrors(ctx context.Context, p *Package, cover *TestCover) (p defer pre.flush() allImports := append([]string{}, p.TestImports...) allImports = append(allImports, p.XTestImports...) - pre.preloadImports(allImports, p.Internal.Build) + pre.preloadImports(ctx, opts, allImports, p.Internal.Build) var ptestErr, pxtestErr *PackageError var imports, ximports []*Package @@ -109,7 +110,7 @@ func TestPackagesAndErrors(ctx context.Context, p *Package, cover *TestCover) (p stk.Push(p.ImportPath + " (test)") rawTestImports := str.StringList(p.TestImports) for i, path := range p.TestImports { - p1 := loadImport(ctx, pre, path, p.Dir, p, &stk, p.Internal.Build.TestImportPos[path], ResolveImport) + p1 := loadImport(ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.TestImportPos[path], ResolveImport) if str.Contains(p1.Deps, p.ImportPath) || p1.ImportPath == p.ImportPath { // Same error that loadPackage returns (via reusePackage) in pkg.go. // Can't change that code, because that code is only for loading the @@ -139,7 +140,7 @@ func TestPackagesAndErrors(ctx context.Context, p *Package, cover *TestCover) (p pxtestNeedsPtest := false rawXTestImports := str.StringList(p.XTestImports) for i, path := range p.XTestImports { - p1 := loadImport(ctx, pre, path, p.Dir, p, &stk, p.Internal.Build.XTestImportPos[path], ResolveImport) + p1 := loadImport(ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.XTestImportPos[path], ResolveImport) if p1.ImportPath == p.ImportPath { pxtestNeedsPtest = true } else { @@ -203,6 +204,7 @@ func TestPackagesAndErrors(ctx context.Context, p *Package, cover *TestCover) (p } ptest.Internal.Embed = testEmbed ptest.EmbedFiles = str.StringList(p.EmbedFiles, p.TestEmbedFiles) + ptest.Internal.OrigImportPath = p.Internal.OrigImportPath ptest.collectDeps() } else { ptest = p @@ -232,11 +234,12 @@ func TestPackagesAndErrors(ctx context.Context, p *Package, cover *TestCover) (p Imports: ximports, RawImports: rawXTestImports, - Asmflags: p.Internal.Asmflags, - Gcflags: p.Internal.Gcflags, - Ldflags: p.Internal.Ldflags, - Gccgoflags: p.Internal.Gccgoflags, - Embed: xtestEmbed, + Asmflags: p.Internal.Asmflags, + Gcflags: p.Internal.Gcflags, + Ldflags: p.Internal.Ldflags, + Gccgoflags: p.Internal.Gccgoflags, + Embed: xtestEmbed, + OrigImportPath: p.Internal.OrigImportPath, }, } if pxtestNeedsPtest { @@ -257,12 +260,13 @@ func TestPackagesAndErrors(ctx context.Context, p *Package, cover *TestCover) (p Module: p.Module, }, Internal: PackageInternal{ - Build: &build.Package{Name: "main"}, - BuildInfo: p.Internal.BuildInfo, - Asmflags: p.Internal.Asmflags, - Gcflags: p.Internal.Gcflags, - Ldflags: p.Internal.Ldflags, - Gccgoflags: p.Internal.Gccgoflags, + Build: &build.Package{Name: "main"}, + BuildInfo: p.Internal.BuildInfo, + Asmflags: p.Internal.Asmflags, + Gcflags: p.Internal.Gcflags, + Ldflags: p.Internal.Ldflags, + Gccgoflags: p.Internal.Gccgoflags, + OrigImportPath: p.Internal.OrigImportPath, }, } @@ -277,7 +281,7 @@ func TestPackagesAndErrors(ctx context.Context, p *Package, cover *TestCover) (p if dep == ptest.ImportPath { pmain.Internal.Imports = append(pmain.Internal.Imports, ptest) } else { - p1 := loadImport(ctx, pre, dep, "", nil, &stk, nil, 0) + p1 := loadImport(ctx, opts, pre, dep, "", nil, &stk, nil, 0) pmain.Internal.Imports = append(pmain.Internal.Imports, p1) } } @@ -290,10 +294,12 @@ func TestPackagesAndErrors(ctx context.Context, p *Package, cover *TestCover) (p seen[p1] = true } for _, p1 := range cover.Pkgs { - if !seen[p1] { - seen[p1] = true - pmain.Internal.Imports = append(pmain.Internal.Imports, p1) + if seen[p1] { + // Don't add duplicate imports. + continue } + seen[p1] = true + pmain.Internal.Imports = append(pmain.Internal.Imports, p1) } } @@ -576,7 +582,13 @@ type testFunc struct { var testFileSet = token.NewFileSet() func (t *testFuncs) load(filename, pkg string, doImport, seen *bool) error { - f, err := parser.ParseFile(testFileSet, filename, nil, parser.ParseComments) + // Pass in the overlaid source if we have an overlay for this file. + src, err := fsys.Open(filename) + if err != nil { + return err + } + defer src.Close() + f, err := parser.ParseFile(testFileSet, filename, src, parser.ParseComments) if err != nil { return err } diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go index 1fa4327a89d..a37b2ad6d18 100644 --- a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go +++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || (solaris && !illumos) // +build aix solaris,!illumos // This code implements the filelock API using POSIX 'fcntl' locks, which attach diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_other.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_other.go index bc480343fc3..70f5d7a688a 100644 --- a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_other.go +++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_other.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !plan9 && !solaris && !windows // +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!plan9,!solaris,!windows package filelock diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_plan9.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_plan9.go index 0798ee469a4..908afb6c8cb 100644 --- a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_plan9.go +++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_plan9.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build plan9 // +build plan9 package filelock diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_test.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_test.go index 2ac2052b8f5..640d4406f42 100644 --- a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_test.go +++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_test.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !js && !plan9 // +build !js,!plan9 package filelock_test diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_unix.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_unix.go index ed07bac6088..878a1e770d4 100644 --- a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_unix.go +++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_unix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin || dragonfly || freebsd || illumos || linux || netbsd || openbsd // +build darwin dragonfly freebsd illumos linux netbsd openbsd package filelock diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_windows.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_windows.go index 19de27eb9b6..dd27ce92bd8 100644 --- a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_windows.go +++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_windows.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build windows // +build windows package filelock diff --git a/src/cmd/go/internal/lockedfile/lockedfile_filelock.go b/src/cmd/go/internal/lockedfile/lockedfile_filelock.go index efc66461ed2..729df5c681c 100644 --- a/src/cmd/go/internal/lockedfile/lockedfile_filelock.go +++ b/src/cmd/go/internal/lockedfile/lockedfile_filelock.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !plan9 // +build !plan9 package lockedfile diff --git a/src/cmd/go/internal/lockedfile/lockedfile_plan9.go b/src/cmd/go/internal/lockedfile/lockedfile_plan9.go index 70d6eddf2d2..3d4b97d78e5 100644 --- a/src/cmd/go/internal/lockedfile/lockedfile_plan9.go +++ b/src/cmd/go/internal/lockedfile/lockedfile_plan9.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build plan9 // +build plan9 package lockedfile diff --git a/src/cmd/go/internal/lockedfile/lockedfile_test.go b/src/cmd/go/internal/lockedfile/lockedfile_test.go index 34327dd841e..3acc6695a74 100644 --- a/src/cmd/go/internal/lockedfile/lockedfile_test.go +++ b/src/cmd/go/internal/lockedfile/lockedfile_test.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // js does not support inter-process file locking. +//go:build !js // +build !js package lockedfile_test diff --git a/src/cmd/go/internal/lockedfile/transform_test.go b/src/cmd/go/internal/lockedfile/transform_test.go index 407d48ea4a3..b753346e7da 100644 --- a/src/cmd/go/internal/lockedfile/transform_test.go +++ b/src/cmd/go/internal/lockedfile/transform_test.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // js does not support inter-process file locking. +//go:build !js // +build !js package lockedfile_test diff --git a/src/cmd/go/internal/modcmd/download.go b/src/cmd/go/internal/modcmd/download.go index e7d3d869cbc..a6c6d914e1d 100644 --- a/src/cmd/go/internal/modcmd/download.go +++ b/src/cmd/go/internal/modcmd/download.go @@ -132,12 +132,10 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { } var mods []*moduleJSON - listU := false - listVersions := false - listRetractions := false type token struct{} sem := make(chan token, runtime.GOMAXPROCS(0)) - for _, info := range modload.ListModules(ctx, args, listU, listVersions, listRetractions) { + infos, infosErr := modload.ListModules(ctx, args, 0) + for _, info := range infos { if info.Replace != nil { info = info.Replace } @@ -188,5 +186,12 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { } // Update go.mod and especially go.sum if needed. - modload.WriteGoMod() + modload.WriteGoMod(ctx) + + // If there was an error matching some of the requested packages, emit it now + // (after we've written the checksums for the modules that were downloaded + // successfully). + if infosErr != nil { + base.Errorf("go mod download: %v", infosErr) + } } diff --git a/src/cmd/go/internal/modcmd/edit.go b/src/cmd/go/internal/modcmd/edit.go index 1df104eb1dd..79a93ca44b9 100644 --- a/src/cmd/go/internal/modcmd/edit.go +++ b/src/cmd/go/internal/modcmd/edit.go @@ -85,12 +85,12 @@ The -json flag prints the final go.mod file in JSON format instead of writing it back to go.mod. The JSON output corresponds to these Go types: type Module struct { - Path string + Path string Version string } type GoMod struct { - Module Module + Module ModPath Go string Require []Require Exclude []Module @@ -98,6 +98,11 @@ writing it back to go.mod. The JSON output corresponds to these Go types: Retract []Retract } + type ModPath struct { + Path string + Deprecated string + } + type Require struct { Path string Version string @@ -450,7 +455,7 @@ func flagDropRetract(arg string) { // fileJSON is the -json output data structure. type fileJSON struct { - Module module.Version + Module editModuleJSON Go string `json:",omitempty"` Require []requireJSON Exclude []module.Version @@ -458,6 +463,11 @@ type fileJSON struct { Retract []retractJSON } +type editModuleJSON struct { + Path string + Deprecated string `json:",omitempty"` +} + type requireJSON struct { Path string Version string `json:",omitempty"` @@ -479,7 +489,10 @@ type retractJSON struct { func editPrintJSON(modFile *modfile.File) { var f fileJSON if modFile.Module != nil { - f.Module = modFile.Module.Mod + f.Module = editModuleJSON{ + Path: modFile.Module.Mod.Path, + Deprecated: modFile.Module.Deprecated, + } } if modFile.Go != nil { f.Go = modFile.Go.Version diff --git a/src/cmd/go/internal/modcmd/graph.go b/src/cmd/go/internal/modcmd/graph.go index a88e9ef4557..77853304e9d 100644 --- a/src/cmd/go/internal/modcmd/graph.go +++ b/src/cmd/go/internal/modcmd/graph.go @@ -10,7 +10,6 @@ import ( "bufio" "context" "os" - "sort" "cmd/go/internal/base" "cmd/go/internal/modload" @@ -42,43 +41,26 @@ func runGraph(ctx context.Context, cmd *base.Command, args []string) { } modload.ForceUseModules = true modload.RootMode = modload.NeedRoot - modload.LoadAllModules(ctx) - - reqs := modload.MinReqs() - format := func(m module.Version) string { - if m.Version == "" { - return m.Path - } - return m.Path + "@" + m.Version - } - - var out []string - var deps int // index in out where deps start - seen := map[module.Version]bool{modload.Target: true} - queue := []module.Version{modload.Target} - for len(queue) > 0 { - var m module.Version - m, queue = queue[0], queue[1:] - list, _ := reqs.Required(m) - for _, r := range list { - if !seen[r] { - queue = append(queue, r) - seen[r] = true - } - out = append(out, format(m)+" "+format(r)+"\n") - } - if m == modload.Target { - deps = len(out) - } - } - - sort.Slice(out[deps:], func(i, j int) bool { - return out[deps+i][0] < out[deps+j][0] - }) + mg := modload.LoadModGraph(ctx) w := bufio.NewWriter(os.Stdout) - for _, line := range out { - w.WriteString(line) + defer w.Flush() + + format := func(m module.Version) { + w.WriteString(m.Path) + if m.Version != "" { + w.WriteString("@") + w.WriteString(m.Version) + } } - w.Flush() + + mg.WalkBreadthFirst(func(m module.Version) { + reqs, _ := mg.RequiredBy(m) + for _, r := range reqs { + format(m) + w.WriteByte(' ') + format(r) + w.WriteByte('\n') + } + }) } diff --git a/src/cmd/go/internal/modcmd/tidy.go b/src/cmd/go/internal/modcmd/tidy.go index 3b83d87a8eb..c72ec30a572 100644 --- a/src/cmd/go/internal/modcmd/tidy.go +++ b/src/cmd/go/internal/modcmd/tidy.go @@ -12,10 +12,12 @@ import ( "cmd/go/internal/imports" "cmd/go/internal/modload" "context" + + "golang.org/x/mod/modfile" ) var cmdTidy = &base.Command{ - UsageLine: "go mod tidy [-e] [-v]", + UsageLine: "go mod tidy [-e] [-v] [-go=version]", Short: "add missing and remove unused modules", Long: ` Tidy makes sure go.mod matches the source code in the module. @@ -30,16 +32,26 @@ to standard error. The -e flag causes tidy to attempt to proceed despite errors encountered while loading packages. +The -go flag causes tidy to update the 'go' directive in the go.mod +file to the given version, which may change which module dependencies +are retained as explicit requirements in the go.mod file. +(Go versions 1.17 and higher retain more requirements in order to +support lazy module loading.) + See https://golang.org/ref/mod#go-mod-tidy for more about 'go mod tidy'. `, Run: runTidy, } -var tidyE bool // if true, report errors but proceed anyway. +var ( + tidyE bool // if true, report errors but proceed anyway. + tidyGo string // go version to write to the tidied go.mod file (toggles lazy loading) +) func init() { cmdTidy.Flag.BoolVar(&cfg.BuildV, "v", false, "") cmdTidy.Flag.BoolVar(&tidyE, "e", false, "") + cmdTidy.Flag.StringVar(&tidyGo, "go", "", "") base.AddModCommonFlags(&cmdTidy.Flag) } @@ -48,6 +60,12 @@ func runTidy(ctx context.Context, cmd *base.Command, args []string) { base.Fatalf("go mod tidy: no arguments allowed") } + if tidyGo != "" { + if !modfile.GoVersionRE.MatchString(tidyGo) { + base.Fatalf(`go mod: invalid -go option %q; expecting something like "-go 1.17"`, tidyGo) + } + } + // Tidy aims to make 'go test' reproducible for any package in 'all', so we // need to include test dependencies. For modules that specify go 1.15 or // earlier this is a no-op (because 'all' saturates transitive test @@ -62,13 +80,13 @@ func runTidy(ctx context.Context, cmd *base.Command, args []string) { modload.RootMode = modload.NeedRoot modload.LoadPackages(ctx, modload.PackageOpts{ - Tags: imports.AnyTags(), - ResolveMissingImports: true, - LoadTests: true, - AllowErrors: tidyE, + GoVersion: tidyGo, + Tags: imports.AnyTags(), + Tidy: true, + VendorModulesInGOROOTSrc: true, + ResolveMissingImports: true, + LoadTests: true, + AllowErrors: tidyE, + SilenceMissingStdImports: true, }, "all") - - modload.TidyBuildList() - modload.TrimGoSum() - modload.WriteGoMod() } diff --git a/src/cmd/go/internal/modcmd/vendor.go b/src/cmd/go/internal/modcmd/vendor.go index d3ed9e00e22..8e1c0432f72 100644 --- a/src/cmd/go/internal/modcmd/vendor.go +++ b/src/cmd/go/internal/modcmd/vendor.go @@ -64,10 +64,12 @@ func runVendor(ctx context.Context, cmd *base.Command, args []string) { modload.RootMode = modload.NeedRoot loadOpts := modload.PackageOpts{ - Tags: imports.AnyTags(), - ResolveMissingImports: true, - UseVendorAll: true, - AllowErrors: vendorE, + Tags: imports.AnyTags(), + VendorModulesInGOROOTSrc: true, + ResolveMissingImports: true, + UseVendorAll: true, + AllowErrors: vendorE, + SilenceMissingStdImports: true, } _, pkgs := modload.LoadPackages(ctx, loadOpts, "all") @@ -86,15 +88,23 @@ func runVendor(ctx context.Context, cmd *base.Command, args []string) { } includeAllReplacements := false + includeGoVersions := false isExplicit := map[module.Version]bool{} - if gv := modload.ModFile().Go; gv != nil && semver.Compare("v"+gv.Version, "v1.14") >= 0 { - // If the Go version is at least 1.14, annotate all explicit 'require' and - // 'replace' targets found in the go.mod file so that we can perform a - // stronger consistency check when -mod=vendor is set. - for _, r := range modload.ModFile().Require { - isExplicit[r.Mod] = true + if gv := modload.ModFile().Go; gv != nil { + if semver.Compare("v"+gv.Version, "v1.14") >= 0 { + // If the Go version is at least 1.14, annotate all explicit 'require' and + // 'replace' targets found in the go.mod file so that we can perform a + // stronger consistency check when -mod=vendor is set. + for _, r := range modload.ModFile().Require { + isExplicit[r.Mod] = true + } + includeAllReplacements = true + } + if semver.Compare("v"+gv.Version, "v1.17") >= 0 { + // If the Go version is at least 1.17, annotate all modules with their + // 'go' version directives. + includeGoVersions = true } - includeAllReplacements = true } var vendorMods []module.Version @@ -108,26 +118,35 @@ func runVendor(ctx context.Context, cmd *base.Command, args []string) { } module.Sort(vendorMods) - var buf bytes.Buffer + var ( + buf bytes.Buffer + w io.Writer = &buf + ) + if cfg.BuildV { + w = io.MultiWriter(&buf, os.Stderr) + } + for _, m := range vendorMods { line := moduleLine(m, modload.Replacement(m)) - buf.WriteString(line) - if cfg.BuildV { - os.Stderr.WriteString(line) + io.WriteString(w, line) + + goVersion := "" + if includeGoVersions { + goVersion = modload.ModuleInfo(ctx, m.Path).GoVersion } - if isExplicit[m] { - buf.WriteString("## explicit\n") - if cfg.BuildV { - os.Stderr.WriteString("## explicit\n") - } + switch { + case isExplicit[m] && goVersion != "": + fmt.Fprintf(w, "## explicit; go %s\n", goVersion) + case isExplicit[m]: + io.WriteString(w, "## explicit\n") + case goVersion != "": + fmt.Fprintf(w, "## go %s\n", goVersion) } + pkgs := modpkgs[m] sort.Strings(pkgs) for _, pkg := range pkgs { - fmt.Fprintf(&buf, "%s\n", pkg) - if cfg.BuildV { - fmt.Fprintf(os.Stderr, "%s\n", pkg) - } + fmt.Fprintf(w, "%s\n", pkg) vendorPkg(vdir, pkg) } } @@ -321,6 +340,15 @@ func matchPotentialSourceFile(dir string, info fs.DirEntry) bool { if strings.HasSuffix(info.Name(), "_test.go") { return false } + if info.Name() == "go.mod" || info.Name() == "go.sum" { + if gv := modload.ModFile().Go; gv != nil && semver.Compare("v"+gv.Version, "v1.17") >= 0 { + // As of Go 1.17, we strip go.mod and go.sum files from dependency modules. + // Otherwise, 'go' commands invoked within the vendor subtree may misidentify + // an arbitrary directory within the vendor tree as a module root. + // (See https://golang.org/issue/42970.) + return false + } + } if strings.HasSuffix(info.Name(), ".go") { f, err := fsys.Open(filepath.Join(dir, info.Name())) if err != nil { diff --git a/src/cmd/go/internal/modcmd/verify.go b/src/cmd/go/internal/modcmd/verify.go index 83214291310..5c321c783ae 100644 --- a/src/cmd/go/internal/modcmd/verify.go +++ b/src/cmd/go/internal/modcmd/verify.go @@ -54,7 +54,7 @@ func runVerify(ctx context.Context, cmd *base.Command, args []string) { sem := make(chan token, runtime.GOMAXPROCS(0)) // Use a slice of result channels, so that the output is deterministic. - mods := modload.LoadAllModules(ctx)[1:] + mods := modload.LoadModGraph(ctx).BuildList()[1:] errsChans := make([]<-chan []error, len(mods)) for i, mod := range mods { diff --git a/src/cmd/go/internal/modcmd/why.go b/src/cmd/go/internal/modcmd/why.go index a5f3e8afcbe..3b14b27c8c7 100644 --- a/src/cmd/go/internal/modcmd/why.go +++ b/src/cmd/go/internal/modcmd/why.go @@ -68,22 +68,25 @@ func runWhy(ctx context.Context, cmd *base.Command, args []string) { modload.RootMode = modload.NeedRoot loadOpts := modload.PackageOpts{ - Tags: imports.AnyTags(), - LoadTests: !*whyVendor, - SilenceErrors: true, - UseVendorAll: *whyVendor, + Tags: imports.AnyTags(), + VendorModulesInGOROOTSrc: true, + LoadTests: !*whyVendor, + SilencePackageErrors: true, + UseVendorAll: *whyVendor, } if *whyM { - listU := false - listVersions := false - listRetractions := false for _, arg := range args { if strings.Contains(arg, "@") { base.Fatalf("go mod why: module query not allowed") } } - mods := modload.ListModules(ctx, args, listU, listVersions, listRetractions) + + mods, err := modload.ListModules(ctx, args, 0) + if err != nil { + base.Fatalf("go mod why: %v", err) + } + byModule := make(map[module.Version][]string) _, pkgs := modload.LoadPackages(ctx, loadOpts, "all") for _, path := range pkgs { diff --git a/src/cmd/go/internal/modconv/convert.go b/src/cmd/go/internal/modconv/convert.go index 5d4165c9443..9c861f8e99e 100644 --- a/src/cmd/go/internal/modconv/convert.go +++ b/src/cmd/go/internal/modconv/convert.go @@ -12,7 +12,6 @@ import ( "strings" "cmd/go/internal/base" - "cmd/go/internal/modfetch" "golang.org/x/mod/modfile" "golang.org/x/mod/module" @@ -21,7 +20,7 @@ import ( // ConvertLegacyConfig converts legacy config to modfile. // The file argument is slash-delimited. -func ConvertLegacyConfig(f *modfile.File, file string, data []byte) error { +func ConvertLegacyConfig(f *modfile.File, file string, data []byte, queryPackage func(path, rev string) (module.Version, error)) error { i := strings.LastIndex(file, "/") j := -2 if i >= 0 { @@ -62,15 +61,13 @@ func ConvertLegacyConfig(f *modfile.File, file string, data []byte) error { sem <- token{} go func(i int, m module.Version) { defer func() { <-sem }() - repo, info, err := modfetch.ImportRepoRev(m.Path, m.Version) + version, err := queryPackage(m.Path, m.Version) if err != nil { fmt.Fprintf(os.Stderr, "go: converting %s: stat %s@%s: %v\n", base.ShortPath(file), m.Path, m.Version, err) return } - path := repo.ModulePath() - versions[i].Path = path - versions[i].Version = info.Version + versions[i] = version }(i, m) } // Fill semaphore channel to wait for all tasks to finish. diff --git a/src/cmd/go/internal/modconv/convert_test.go b/src/cmd/go/internal/modconv/convert_test.go deleted file mode 100644 index 66b9ff4f382..00000000000 --- a/src/cmd/go/internal/modconv/convert_test.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package modconv - -import ( - "bytes" - "context" - "fmt" - "internal/testenv" - "log" - "os" - "os/exec" - "path/filepath" - "strings" - "testing" - - "cmd/go/internal/cfg" - "cmd/go/internal/modfetch" - - "golang.org/x/mod/modfile" - "golang.org/x/mod/module" -) - -func TestMain(m *testing.M) { - os.Exit(testMain(m)) -} - -func testMain(m *testing.M) int { - cfg.GOPROXY = "direct" - - if _, err := exec.LookPath("git"); err != nil { - fmt.Fprintln(os.Stderr, "skipping because git binary not found") - fmt.Println("PASS") - return 0 - } - - dir, err := os.MkdirTemp("", "modconv-test-") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(dir) - cfg.GOMODCACHE = filepath.Join(dir, "pkg/mod") - - return m.Run() -} - -func TestConvertLegacyConfig(t *testing.T) { - testenv.MustHaveExternalNetwork(t) - - if testing.Verbose() { - old := cfg.BuildX - defer func() { - cfg.BuildX = old - }() - cfg.BuildX = true - } - - var tests = []struct { - path string - vers string - gomod string - }{ - /* - Different versions of git seem to find or not find - github.com/Masterminds/semver's a93e51b5a57e, - which is an unmerged pull request. - We'd rather not provide access to unmerged pull requests, - so the line is removed from the golden file here, - but some git commands still find it somehow. - - { - // Gopkg.lock parsing. - "github.com/golang/dep", "v0.4.0", - `module github.com/golang/dep - - require ( - github.com/Masterminds/vcs v1.11.1 - github.com/armon/go-radix v0.0.0-20160115234725-4239b77079c7 - github.com/boltdb/bolt v1.3.1 - github.com/go-yaml/yaml v0.0.0-20170407172122-cd8b52f8269e - github.com/golang/protobuf v0.0.0-20170901042739-5afd06f9d81a - github.com/jmank88/nuts v0.3.0 - github.com/nightlyone/lockfile v0.0.0-20170707060451-e83dc5e7bba0 - github.com/pelletier/go-toml v0.0.0-20171218135716-b8b5e7696574 - github.com/pkg/errors v0.8.0 - github.com/sdboyer/constext v0.0.0-20170321163424-836a14457353 - golang.org/x/net v0.0.0-20170828231752-66aacef3dd8a - golang.org/x/sync v0.0.0-20170517211232-f52d1811a629 - golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea - )`, - }, - */ - - // TODO: https://github.com/docker/distribution uses vendor.conf - - { - // Godeps.json parsing. - // TODO: Should v2.0.0 work here too? - "github.com/docker/distribution", "v0.0.0-20150410205453-85de3967aa93", - `module github.com/docker/distribution - - require ( - github.com/AdRoll/goamz v0.0.0-20150130162828-d3664b76d905 - github.com/MSOpenTech/azure-sdk-for-go v0.0.0-20150323223030-d90753bcad2e - github.com/Sirupsen/logrus v0.7.3 - github.com/bugsnag/bugsnag-go v1.0.3-0.20141110184014-b1d153021fcd - github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b - github.com/bugsnag/panicwrap v0.0.0-20141110184334-e5f9854865b9 - github.com/codegangsta/cli v1.4.2-0.20150131031259-6086d7927ec3 - github.com/docker/docker v1.4.2-0.20150204013315-165ea5c158cf - github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 - github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7 - github.com/gorilla/context v0.0.0-20140604161150-14f550f51af5 - github.com/gorilla/handlers v0.0.0-20140825150757-0e84b7d810c1 - github.com/gorilla/mux v0.0.0-20140926153814-e444e69cbd2e - github.com/jlhawn/go-crypto v0.0.0-20150401213827-cd738dde20f0 - github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 - github.com/yvasiyarov/gorelic v0.0.7-0.20141212073537-a9bba5b9ab50 - github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f - golang.org/x/net v0.0.0-20150202051010-1dfe7915deaf - gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789 - gopkg.in/yaml.v2 v2.0.0-20150116202057-bef53efd0c76 - )`, - }, - - { - // golang.org/issue/24585 - confusion about v2.0.0 tag in legacy non-v2 module - "github.com/fishy/gcsbucket", "v0.0.0-20180217031846-618d60fe84e0", - `module github.com/fishy/gcsbucket - - require ( - cloud.google.com/go v0.18.0 - github.com/fishy/fsdb v0.0.0-20180217030800-5527ded01371 - github.com/golang/protobuf v1.0.0 - github.com/googleapis/gax-go v2.0.0+incompatible - golang.org/x/net v0.0.0-20180216171745-136a25c244d3 - golang.org/x/oauth2 v0.0.0-20180207181906-543e37812f10 - golang.org/x/text v0.3.1-0.20180208041248-4e4a3210bb54 - google.golang.org/api v0.0.0-20180217000815-c7a403bb5fe1 - google.golang.org/appengine v1.0.0 - google.golang.org/genproto v0.0.0-20180206005123-2b5a72b8730b - google.golang.org/grpc v1.10.0 - )`, - }, - } - - ctx := context.Background() - - for _, tt := range tests { - t.Run(strings.ReplaceAll(tt.path, "/", "_")+"_"+tt.vers, func(t *testing.T) { - f, err := modfile.Parse("golden", []byte(tt.gomod), nil) - if err != nil { - t.Fatal(err) - } - want, err := f.Format() - if err != nil { - t.Fatal(err) - } - - dir, err := modfetch.Download(ctx, module.Version{Path: tt.path, Version: tt.vers}) - if err != nil { - t.Fatal(err) - } - - for name := range Converters { - file := filepath.Join(dir, name) - data, err := os.ReadFile(file) - if err == nil { - f := new(modfile.File) - f.AddModuleStmt(tt.path) - if err := ConvertLegacyConfig(f, filepath.ToSlash(file), data); err != nil { - t.Fatal(err) - } - out, err := f.Format() - if err != nil { - t.Fatalf("format after conversion: %v", err) - } - if !bytes.Equal(out, want) { - t.Fatalf("final go.mod:\n%s\n\nwant:\n%s", out, want) - } - return - } - } - t.Fatalf("no converter found for %s@%s", tt.path, tt.vers) - }) - } -} diff --git a/src/cmd/go/internal/modfetch/bootstrap.go b/src/cmd/go/internal/modfetch/bootstrap.go index e4020b0b41e..ed694581a7c 100644 --- a/src/cmd/go/internal/modfetch/bootstrap.go +++ b/src/cmd/go/internal/modfetch/bootstrap.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build cmd_go_bootstrap // +build cmd_go_bootstrap package modfetch diff --git a/src/cmd/go/internal/modfetch/cache.go b/src/cmd/go/internal/modfetch/cache.go index 3a2ff63721c..f3b58a172a6 100644 --- a/src/cmd/go/internal/modfetch/cache.go +++ b/src/cmd/go/internal/modfetch/cache.go @@ -11,8 +11,10 @@ import ( "fmt" "io" "io/fs" + "math/rand" "os" "path/filepath" + "strconv" "strings" "sync" @@ -21,17 +23,15 @@ import ( "cmd/go/internal/lockedfile" "cmd/go/internal/modfetch/codehost" "cmd/go/internal/par" - "cmd/go/internal/renameio" + "cmd/go/internal/robustio" "golang.org/x/mod/module" "golang.org/x/mod/semver" ) func cacheDir(path string) (string, error) { - if cfg.GOMODCACHE == "" { - // modload.Init exits if GOPATH[0] is empty, and cfg.GOMODCACHE - // is set to GOPATH[0]/pkg/mod if GOMODCACHE is empty, so this should never happen. - return "", fmt.Errorf("internal error: cfg.GOMODCACHE not set") + if err := checkCacheDir(); err != nil { + return "", err } enc, err := module.EscapePath(path) if err != nil { @@ -64,10 +64,8 @@ func CachePath(m module.Version, suffix string) (string, error) { // along with the directory if the directory does not exist or if the directory // is not completely populated. func DownloadDir(m module.Version) (string, error) { - if cfg.GOMODCACHE == "" { - // modload.Init exits if GOPATH[0] is empty, and cfg.GOMODCACHE - // is set to GOPATH[0]/pkg/mod if GOMODCACHE is empty, so this should never happen. - return "", fmt.Errorf("internal error: cfg.GOMODCACHE not set") + if err := checkCacheDir(); err != nil { + return "", err } enc, err := module.EscapePath(m.Path) if err != nil { @@ -84,6 +82,7 @@ func DownloadDir(m module.Version) (string, error) { return "", err } + // Check whether the directory itself exists. dir := filepath.Join(cfg.GOMODCACHE, enc+"@"+encVer) if fi, err := os.Stat(dir); os.IsNotExist(err) { return dir, err @@ -92,6 +91,9 @@ func DownloadDir(m module.Version) (string, error) { } else if !fi.IsDir() { return dir, &DownloadDirPartialError{dir, errors.New("not a directory")} } + + // Check if a .partial file exists. This is created at the beginning of + // a download and removed after the zip is extracted. partialPath, err := CachePath(m, "partial") if err != nil { return dir, err @@ -101,6 +103,21 @@ func DownloadDir(m module.Version) (string, error) { } else if !os.IsNotExist(err) { return dir, err } + + // Check if a .ziphash file exists. It should be created before the + // zip is extracted, but if it was deleted (by another program?), we need + // to re-calculate it. Note that checkMod will repopulate the ziphash + // file if it doesn't exist, but if the module is excluded by checks + // through GONOSUMDB or GOPRIVATE, that check and repopulation won't happen. + ziphashPath, err := CachePath(m, "ziphash") + if err != nil { + return dir, err + } + if _, err := os.Stat(ziphashPath); os.IsNotExist(err) { + return dir, &DownloadDirPartialError{dir, errors.New("ziphash file is missing")} + } else if err != nil { + return dir, err + } return dir, nil } @@ -129,15 +146,13 @@ func lockVersion(mod module.Version) (unlock func(), err error) { return lockedfile.MutexAt(path).Lock() } -// SideLock locks a file within the module cache that that previously guarded +// SideLock locks a file within the module cache that previously guarded // edits to files outside the cache, such as go.sum and go.mod files in the // user's working directory. // If err is nil, the caller MUST eventually call the unlock function. func SideLock() (unlock func(), err error) { - if cfg.GOMODCACHE == "" { - // modload.Init exits if GOPATH[0] is empty, and cfg.GOMODCACHE - // is set to GOPATH[0]/pkg/mod if GOMODCACHE is empty, so this should never happen. - base.Fatalf("go: internal error: cfg.GOMODCACHE not set") + if err := checkCacheDir(); err != nil { + base.Fatalf("go: %v", err) } path := filepath.Join(cfg.GOMODCACHE, "cache", "lock") @@ -315,7 +330,7 @@ func InfoFile(path, version string) (string, error) { } // Stat should have populated the disk cache for us. - file, _, err := readDiskStat(path, version) + file, err := CachePath(module.Version{Path: path, Version: version}, "info") if err != nil { return "", err } @@ -332,6 +347,9 @@ func GoMod(path, rev string) ([]byte, error) { if _, info, err := readDiskStat(path, rev); err == nil { rev = info.Version } else { + if errors.Is(err, statCacheErr) { + return nil, err + } err := TryProxies(func(proxy string) error { info, err := Lookup(proxy, path).Stat(rev) if err == nil { @@ -367,7 +385,7 @@ func GoModFile(path, version string) (string, error) { return "", err } // GoMod should have populated the disk cache for us. - file, _, err := readDiskGoMod(path, version) + file, err := CachePath(module.Version{Path: path, Version: version}, "mod") if err != nil { return "", err } @@ -482,7 +500,7 @@ func readDiskStatByHash(path, rev string) (file string, info *RevInfo, err error for _, name := range names { if strings.HasSuffix(name, suffix) { v := strings.TrimSuffix(name, ".info") - if IsPseudoVersion(v) && semver.Compare(v, maxVersion) > 0 { + if module.IsPseudoVersion(v) && semver.Compare(v, maxVersion) > 0 { maxVersion = v file, info, err = readDiskStat(path, strings.TrimSuffix(name, ".info")) } @@ -530,7 +548,7 @@ func readDiskCache(path, rev, suffix string) (file string, data []byte, err erro if err != nil { return "", nil, errNotCached } - data, err = renameio.ReadFile(file) + data, err = robustio.ReadFile(file) if err != nil { return file, nil, errNotCached } @@ -567,7 +585,29 @@ func writeDiskCache(file string, data []byte) error { return err } - if err := renameio.WriteFile(file, data, 0666); err != nil { + // Write the file to a temporary location, and then rename it to its final + // path to reduce the likelihood of a corrupt file existing at that final path. + f, err := tempFile(filepath.Dir(file), filepath.Base(file), 0666) + if err != nil { + return err + } + defer func() { + // Only call os.Remove on f.Name() if we failed to rename it: otherwise, + // some other process may have created a new file with the same name after + // the rename completed. + if err != nil { + f.Close() + os.Remove(f.Name()) + } + }() + + if _, err := f.Write(data); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + if err := robustio.Rename(f.Name(), file); err != nil { return err } @@ -577,29 +617,49 @@ func writeDiskCache(file string, data []byte) error { return nil } +// tempFile creates a new temporary file with given permission bits. +func tempFile(dir, prefix string, perm fs.FileMode) (f *os.File, err error) { + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+strconv.Itoa(rand.Intn(1000000000))+".tmp") + f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm) + if os.IsExist(err) { + continue + } + break + } + return +} + // rewriteVersionList rewrites the version list in dir // after a new *.mod file has been written. -func rewriteVersionList(dir string) { +func rewriteVersionList(dir string) (err error) { if filepath.Base(dir) != "@v" { base.Fatalf("go: internal error: misuse of rewriteVersionList") } listFile := filepath.Join(dir, "list") - // We use a separate lockfile here instead of locking listFile itself because - // we want to use Rename to write the file atomically. The list may be read by - // a GOPROXY HTTP server, and if we crash midway through a rewrite (or if the - // HTTP server ignores our locking and serves the file midway through a - // rewrite) it's better to serve a stale list than a truncated one. - unlock, err := lockedfile.MutexAt(listFile + ".lock").Lock() + // Lock listfile when writing to it to try to avoid corruption to the file. + // Under rare circumstances, for instance, if the system loses power in the + // middle of a write it is possible for corrupt data to be written. This is + // not a problem for the go command itself, but may be an issue if the the + // cache is being served by a GOPROXY HTTP server. This will be corrected + // the next time a new version of the module is fetched and the file is rewritten. + // TODO(matloob): golang.org/issue/43313 covers adding a go mod verify + // command that removes module versions that fail checksums. It should also + // remove list files that are detected to be corrupt. + f, err := lockedfile.Edit(listFile) if err != nil { - base.Fatalf("go: can't lock version list lockfile: %v", err) + return err } - defer unlock() - + defer func() { + if cerr := f.Close(); cerr != nil && err == nil { + err = cerr + } + }() infos, err := os.ReadDir(dir) if err != nil { - return + return err } var list []string for _, info := range infos { @@ -617,19 +677,74 @@ func rewriteVersionList(dir string) { } } } - SortVersions(list) + semver.Sort(list) var buf bytes.Buffer for _, v := range list { buf.WriteString(v) buf.WriteString("\n") } - old, _ := renameio.ReadFile(listFile) - if bytes.Equal(buf.Bytes(), old) { - return + if fi, err := f.Stat(); err == nil && int(fi.Size()) == buf.Len() { + old := make([]byte, buf.Len()+1) + if n, err := f.ReadAt(old, 0); err == io.EOF && n == buf.Len() && bytes.Equal(buf.Bytes(), old) { + return nil // No edit needed. + } + } + // Remove existing contents, so that when we truncate to the actual size it will zero-fill, + // and we will be able to detect (some) incomplete writes as files containing trailing NUL bytes. + if err := f.Truncate(0); err != nil { + return err + } + // Reserve the final size and zero-fill. + if err := f.Truncate(int64(buf.Len())); err != nil { + return err + } + // Write the actual contents. If this fails partway through, + // the remainder of the file should remain as zeroes. + if _, err := f.Write(buf.Bytes()); err != nil { + f.Truncate(0) + return err } - if err := renameio.WriteFile(listFile, buf.Bytes(), 0666); err != nil { - base.Fatalf("go: failed to write version list: %v", err) - } + return nil +} + +var ( + statCacheOnce sync.Once + statCacheErr error +) + +// checkCacheDir checks if the directory specified by GOMODCACHE exists. An +// error is returned if it does not. +func checkCacheDir() error { + if cfg.GOMODCACHE == "" { + // modload.Init exits if GOPATH[0] is empty, and cfg.GOMODCACHE + // is set to GOPATH[0]/pkg/mod if GOMODCACHE is empty, so this should never happen. + return fmt.Errorf("internal error: cfg.GOMODCACHE not set") + } + if !filepath.IsAbs(cfg.GOMODCACHE) { + return fmt.Errorf("GOMODCACHE entry is relative; must be absolute path: %q.\n", cfg.GOMODCACHE) + } + + // os.Stat is slow on Windows, so we only call it once to prevent unnecessary + // I/O every time this function is called. + statCacheOnce.Do(func() { + fi, err := os.Stat(cfg.GOMODCACHE) + if err != nil { + if !os.IsNotExist(err) { + statCacheErr = fmt.Errorf("could not create module cache: %w", err) + return + } + if err := os.MkdirAll(cfg.GOMODCACHE, 0777); err != nil { + statCacheErr = fmt.Errorf("could not create module cache: %w", err) + return + } + return + } + if !fi.IsDir() { + statCacheErr = fmt.Errorf("could not create module cache: %q is not a directory", cfg.GOMODCACHE) + return + } + }) + return statCacheErr } diff --git a/src/cmd/go/internal/modfetch/codehost/git.go b/src/cmd/go/internal/modfetch/codehost/git.go index 72005e27d5e..4d4964edf44 100644 --- a/src/cmd/go/internal/modfetch/codehost/git.go +++ b/src/cmd/go/internal/modfetch/codehost/git.go @@ -296,6 +296,9 @@ func (r *gitRepo) stat(rev string) (*RevInfo, error) { // Or maybe it's the prefix of a hash of a named ref. // Try to resolve to both a ref (git name) and full (40-hex-digit) commit hash. r.refsOnce.Do(r.loadRefs) + // loadRefs may return an error if git fails, for example segfaults, or + // could not load a private repo, but defer checking to the else block + // below, in case we already have the rev in question in the local cache. var ref, hash string if r.refs["refs/tags/"+rev] != "" { ref = "refs/tags/" + rev @@ -332,6 +335,9 @@ func (r *gitRepo) stat(rev string) (*RevInfo, error) { hash = rev } } else { + if r.refsErr != nil { + return nil, r.refsErr + } return nil, &UnknownRevisionError{Rev: rev} } diff --git a/src/cmd/go/internal/modfetch/codehost/shell.go b/src/cmd/go/internal/modfetch/codehost/shell.go index ce8b501d53c..0e9f3819667 100644 --- a/src/cmd/go/internal/modfetch/codehost/shell.go +++ b/src/cmd/go/internal/modfetch/codehost/shell.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build ignore // +build ignore // Interactive debugging shell for codehost.Repo implementations. diff --git a/src/cmd/go/internal/modfetch/coderepo.go b/src/cmd/go/internal/modfetch/coderepo.go index 2dcbb99b185..f817a045834 100644 --- a/src/cmd/go/internal/modfetch/coderepo.go +++ b/src/cmd/go/internal/modfetch/coderepo.go @@ -159,7 +159,7 @@ func (r *codeRepo) Versions(prefix string) ([]string, error) { if r.codeDir != "" { v = v[len(r.codeDir)+1:] } - if v == "" || v != module.CanonicalVersion(v) || IsPseudoVersion(v) { + if v == "" || v != module.CanonicalVersion(v) || module.IsPseudoVersion(v) { continue } @@ -172,8 +172,8 @@ func (r *codeRepo) Versions(prefix string) ([]string, error) { list = append(list, v) } - SortVersions(list) - SortVersions(incompatible) + semver.Sort(list) + semver.Sort(incompatible) return r.appendIncompatibleVersions(list, incompatible) } @@ -385,7 +385,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e if statVers != "" && statVers == module.CanonicalVersion(statVers) { info2.Version = statVers - if IsPseudoVersion(info2.Version) { + if module.IsPseudoVersion(info2.Version) { if err := r.validatePseudoVersion(info, info2.Version); err != nil { return nil, err } @@ -433,7 +433,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e } trimmed := tag[len(tagPrefix):] // Tags that look like pseudo-versions would be confusing. Ignore them. - if IsPseudoVersion(tag) { + if module.IsPseudoVersion(tag) { return "", false } @@ -531,7 +531,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e pseudoBase, _ = tagToVersion(tag) // empty if the tag is invalid } - info2.Version = PseudoVersion(r.pseudoMajor, pseudoBase, info.Time, info.Short) + info2.Version = module.PseudoVersion(r.pseudoMajor, pseudoBase, info.Time, info.Short) return checkGoMod() } @@ -560,7 +560,7 @@ func (r *codeRepo) validatePseudoVersion(info *codehost.RevInfo, version string) return err } - rev, err := PseudoVersionRev(version) + rev, err := module.PseudoVersionRev(version) if err != nil { return err } @@ -575,12 +575,12 @@ func (r *codeRepo) validatePseudoVersion(info *codehost.RevInfo, version string) } } - t, err := PseudoVersionTime(version) + t, err := module.PseudoVersionTime(version) if err != nil { return err } if !t.Equal(info.Time.Truncate(time.Second)) { - return fmt.Errorf("does not match version-control timestamp (expected %s)", info.Time.UTC().Format(pseudoVersionTimestampFormat)) + return fmt.Errorf("does not match version-control timestamp (expected %s)", info.Time.UTC().Format(module.PseudoVersionTimestampFormat)) } tagPrefix := "" @@ -604,7 +604,7 @@ func (r *codeRepo) validatePseudoVersion(info *codehost.RevInfo, version string) // not enforce that property when resolving existing pseudo-versions: we don't // know when the parent tags were added, and the highest-tagged parent may not // have existed when the pseudo-version was first resolved. - base, err := PseudoVersionBase(strings.TrimSuffix(version, "+incompatible")) + base, err := module.PseudoVersionBase(strings.TrimSuffix(version, "+incompatible")) if err != nil { return err } @@ -661,7 +661,7 @@ func (r *codeRepo) validatePseudoVersion(info *codehost.RevInfo, version string) if err != nil { return err } - rev, err := PseudoVersionRev(version) + rev, err := module.PseudoVersionRev(version) if err != nil { return fmt.Errorf("not a descendent of preceding tag (%s)", lastTag) } @@ -672,8 +672,8 @@ func (r *codeRepo) validatePseudoVersion(info *codehost.RevInfo, version string) func (r *codeRepo) revToRev(rev string) string { if semver.IsValid(rev) { - if IsPseudoVersion(rev) { - r, _ := PseudoVersionRev(rev) + if module.IsPseudoVersion(rev) { + r, _ := module.PseudoVersionRev(rev) return r } if semver.Build(rev) == "+incompatible" { @@ -843,7 +843,7 @@ func (r *codeRepo) GoMod(version string) (data []byte, err error) { return nil, fmt.Errorf("version %s is not canonical", version) } - if IsPseudoVersion(version) { + if module.IsPseudoVersion(version) { // findDir ignores the metadata encoded in a pseudo-version, // only using the revision at the end. // Invoke Stat to verify the metadata explicitly so we don't return @@ -942,7 +942,7 @@ func (r *codeRepo) Zip(dst io.Writer, version string) error { return fmt.Errorf("version %s is not canonical", version) } - if IsPseudoVersion(version) { + if module.IsPseudoVersion(version) { // findDir ignores the metadata encoded in a pseudo-version, // only using the revision at the end. // Invoke Stat to verify the metadata explicitly so we don't return diff --git a/src/cmd/go/internal/modfetch/fetch.go b/src/cmd/go/internal/modfetch/fetch.go index c55c3cf2534..e40593abae8 100644 --- a/src/cmd/go/internal/modfetch/fetch.go +++ b/src/cmd/go/internal/modfetch/fetch.go @@ -8,6 +8,8 @@ import ( "archive/zip" "bytes" "context" + "crypto/sha256" + "encoding/base64" "errors" "fmt" "io" @@ -22,7 +24,6 @@ import ( "cmd/go/internal/cfg" "cmd/go/internal/lockedfile" "cmd/go/internal/par" - "cmd/go/internal/renameio" "cmd/go/internal/robustio" "cmd/go/internal/trace" @@ -37,10 +38,8 @@ var downloadCache par.Cache // local download cache and returns the name of the directory // corresponding to the root of the module's file tree. func Download(ctx context.Context, mod module.Version) (dir string, err error) { - if cfg.GOMODCACHE == "" { - // modload.Init exits if GOPATH[0] is empty, and cfg.GOMODCACHE - // is set to GOPATH[0]/pkg/mod if GOMODCACHE is empty, so this should never happen. - base.Fatalf("go: internal error: cfg.GOMODCACHE not set") + if err := checkCacheDir(); err != nil { + base.Fatalf("go: %v", err) } // The par.Cache here avoids duplicate work. @@ -170,13 +169,16 @@ func DownloadZip(ctx context.Context, mod module.Version) (zipfile string, err e if err != nil { return cached{"", err} } + ziphashfile := zipfile + "hash" - // Skip locking if the zipfile already exists. + // Return without locking if the zip and ziphash files exist. if _, err := os.Stat(zipfile); err == nil { - return cached{zipfile, nil} + if _, err := os.Stat(ziphashfile); err == nil { + return cached{zipfile, nil} + } } - // The zip file does not exist. Acquire the lock and create it. + // The zip or ziphash file does not exist. Acquire the lock and create them. if cfg.CmdName != "mod download" { fmt.Fprintf(os.Stderr, "go: downloading %s %s\n", mod.Path, mod.Version) } @@ -186,14 +188,6 @@ func DownloadZip(ctx context.Context, mod module.Version) (zipfile string, err e } defer unlock() - // Double-check that the zipfile was not created while we were waiting for - // the lock. - if _, err := os.Stat(zipfile); err == nil { - return cached{zipfile, nil} - } - if err := os.MkdirAll(filepath.Dir(zipfile), 0777); err != nil { - return cached{"", err} - } if err := downloadZip(ctx, mod, zipfile); err != nil { return cached{"", err} } @@ -206,23 +200,47 @@ func downloadZip(ctx context.Context, mod module.Version, zipfile string) (err e ctx, span := trace.StartSpan(ctx, "modfetch.downloadZip "+zipfile) defer span.Done() + // Double-check that the zipfile was not created while we were waiting for + // the lock in DownloadZip. + ziphashfile := zipfile + "hash" + var zipExists, ziphashExists bool + if _, err := os.Stat(zipfile); err == nil { + zipExists = true + } + if _, err := os.Stat(ziphashfile); err == nil { + ziphashExists = true + } + if zipExists && ziphashExists { + return nil + } + + // Create parent directories. + if err := os.MkdirAll(filepath.Dir(zipfile), 0777); err != nil { + return err + } + // Clean up any remaining tempfiles from previous runs. // This is only safe to do because the lock file ensures that their // writers are no longer active. - for _, base := range []string{zipfile, zipfile + "hash"} { - if old, err := filepath.Glob(renameio.Pattern(base)); err == nil { - for _, path := range old { - os.Remove(path) // best effort - } + tmpPattern := filepath.Base(zipfile) + "*.tmp" + if old, err := filepath.Glob(filepath.Join(filepath.Dir(zipfile), tmpPattern)); err == nil { + for _, path := range old { + os.Remove(path) // best effort } } + // If the zip file exists, the ziphash file must have been deleted + // or lost after a file system crash. Re-hash the zip without downloading. + if zipExists { + return hashZip(mod, zipfile, ziphashfile) + } + // From here to the os.Rename call below is functionally almost equivalent to // renameio.WriteToFile, with one key difference: we want to validate the // contents of the file (by hashing it) before we commit it. Because the file // is zip-compressed, we need an actual file — or at least an io.ReaderAt — to // validate it: we can't just tee the stream as we write it. - f, err := os.CreateTemp(filepath.Dir(zipfile), filepath.Base(renameio.Pattern(zipfile))) + f, err := os.CreateTemp(filepath.Dir(zipfile), tmpPattern) if err != nil { return err } @@ -278,26 +296,12 @@ func downloadZip(ctx context.Context, mod module.Version, zipfile string) (err e } } - // Sync the file before renaming it: otherwise, after a crash the reader may - // observe a 0-length file instead of the actual contents. - // See https://golang.org/issue/22397#issuecomment-380831736. - if err := f.Sync(); err != nil { - return err - } if err := f.Close(); err != nil { return err } // Hash the zip file and check the sum before renaming to the final location. - hash, err := dirhash.HashZip(f.Name(), dirhash.DefaultHash) - if err != nil { - return err - } - if err := checkModSum(mod, hash); err != nil { - return err - } - - if err := renameio.WriteFile(zipfile+"hash", []byte(hash), 0666); err != nil { + if err := hashZip(mod, f.Name(), ziphashfile); err != nil { return err } if err := os.Rename(f.Name(), zipfile); err != nil { @@ -309,6 +313,36 @@ func downloadZip(ctx context.Context, mod module.Version, zipfile string) (err e return nil } +// hashZip reads the zip file opened in f, then writes the hash to ziphashfile, +// overwriting that file if it exists. +// +// If the hash does not match go.sum (or the sumdb if enabled), hashZip returns +// an error and does not write ziphashfile. +func hashZip(mod module.Version, zipfile, ziphashfile string) error { + hash, err := dirhash.HashZip(zipfile, dirhash.DefaultHash) + if err != nil { + return err + } + if err := checkModSum(mod, hash); err != nil { + return err + } + hf, err := lockedfile.Create(ziphashfile) + if err != nil { + return err + } + if err := hf.Truncate(int64(len(hash))); err != nil { + return err + } + if _, err := hf.WriteAt([]byte(hash), 0); err != nil { + return err + } + if err := hf.Close(); err != nil { + return err + } + + return nil +} + // makeDirsReadOnly makes a best-effort attempt to remove write permissions for dir // and its transitive contents. func makeDirsReadOnly(dir string) { @@ -452,25 +486,29 @@ func HaveSum(mod module.Version) bool { // checkMod checks the given module's checksum. func checkMod(mod module.Version) { - if cfg.GOMODCACHE == "" { - // Do not use current directory. - return - } - // Do the file I/O before acquiring the go.sum lock. ziphash, err := CachePath(mod, "ziphash") if err != nil { base.Fatalf("verifying %v", module.VersionError(mod, err)) } - data, err := renameio.ReadFile(ziphash) + data, err := lockedfile.Read(ziphash) if err != nil { - if errors.Is(err, fs.ErrNotExist) { - // This can happen if someone does rm -rf GOPATH/src/cache/download. So it goes. - return - } base.Fatalf("verifying %v", module.VersionError(mod, err)) } - h := strings.TrimSpace(string(data)) + data = bytes.TrimSpace(data) + if !isValidSum(data) { + // Recreate ziphash file from zip file and use that to check the mod sum. + zip, err := CachePath(mod, "zip") + if err != nil { + base.Fatalf("verifying %v", module.VersionError(mod, err)) + } + err = hashZip(mod, zip, ziphash) + if err != nil { + base.Fatalf("verifying %v", module.VersionError(mod, err)) + } + return + } + h := string(data) if !strings.HasPrefix(h, "h1:") { base.Fatalf("verifying %v", module.VersionError(mod, fmt.Errorf("unexpected ziphash: %q", h))) } @@ -615,11 +653,32 @@ func Sum(mod module.Version) string { if err != nil { return "" } - data, err := renameio.ReadFile(ziphash) + data, err := lockedfile.Read(ziphash) if err != nil { return "" } - return strings.TrimSpace(string(data)) + data = bytes.TrimSpace(data) + if !isValidSum(data) { + return "" + } + return string(data) +} + +// isValidSum returns true if data is the valid contents of a zip hash file. +// Certain critical files are written to disk by first truncating +// then writing the actual bytes, so that if the write fails +// the corrupt file should contain at least one of the null +// bytes written by the truncate operation. +func isValidSum(data []byte) bool { + if bytes.IndexByte(data, '\000') >= 0 { + return false + } + + if len(data) != len("h1:")+base64.StdEncoding.EncodedLen(sha256.Size) { + return false + } + + return true } // WriteGoSum writes the go.sum file if it needs to be updated. diff --git a/src/cmd/go/internal/modfetch/insecure.go b/src/cmd/go/internal/modfetch/insecure.go deleted file mode 100644 index 012d05f29db..00000000000 --- a/src/cmd/go/internal/modfetch/insecure.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package modfetch - -import ( - "cmd/go/internal/cfg" - - "golang.org/x/mod/module" -) - -// allowInsecure reports whether we are allowed to fetch this path in an insecure manner. -func allowInsecure(path string) bool { - return cfg.Insecure || module.MatchPrefixPatterns(cfg.GOINSECURE, path) -} diff --git a/src/cmd/go/internal/modfetch/proxy.go b/src/cmd/go/internal/modfetch/proxy.go index 6c86d8d786d..31d453c8074 100644 --- a/src/cmd/go/internal/modfetch/proxy.go +++ b/src/cmd/go/internal/modfetch/proxy.go @@ -228,7 +228,7 @@ func (p *proxyRepo) versionError(version string, err error) error { Path: p.path, Err: &module.InvalidVersionError{ Version: version, - Pseudo: IsPseudoVersion(version), + Pseudo: module.IsPseudoVersion(version), Err: err, }, } @@ -276,11 +276,11 @@ func (p *proxyRepo) Versions(prefix string) ([]string, error) { var list []string for _, line := range strings.Split(string(data), "\n") { f := strings.Fields(line) - if len(f) >= 1 && semver.IsValid(f[0]) && strings.HasPrefix(f[0], prefix) && !IsPseudoVersion(f[0]) { + if len(f) >= 1 && semver.IsValid(f[0]) && strings.HasPrefix(f[0], prefix) && !module.IsPseudoVersion(f[0]) { list = append(list, f[0]) } } - SortVersions(list) + semver.Sort(list) return list, nil } @@ -307,8 +307,8 @@ func (p *proxyRepo) latest() (*RevInfo, error) { ) if len(f) >= 2 { ft, _ = time.Parse(time.RFC3339, f[1]) - } else if IsPseudoVersion(f[0]) { - ft, _ = PseudoVersionTime(f[0]) + } else if module.IsPseudoVersion(f[0]) { + ft, _ = module.PseudoVersionTime(f[0]) ftIsFromPseudo = true } else { // Repo.Latest promises that this method is only called where there are diff --git a/src/cmd/go/internal/modfetch/pseudo_test.go b/src/cmd/go/internal/modfetch/pseudo_test.go deleted file mode 100644 index 4483f8e962f..00000000000 --- a/src/cmd/go/internal/modfetch/pseudo_test.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package modfetch - -import ( - "testing" - "time" -) - -var pseudoTests = []struct { - major string - older string - version string -}{ - {"", "", "v0.0.0-20060102150405-hash"}, - {"v0", "", "v0.0.0-20060102150405-hash"}, - {"v1", "", "v1.0.0-20060102150405-hash"}, - {"v2", "", "v2.0.0-20060102150405-hash"}, - {"unused", "v0.0.0", "v0.0.1-0.20060102150405-hash"}, - {"unused", "v1.2.3", "v1.2.4-0.20060102150405-hash"}, - {"unused", "v1.2.99999999999999999", "v1.2.100000000000000000-0.20060102150405-hash"}, - {"unused", "v1.2.3-pre", "v1.2.3-pre.0.20060102150405-hash"}, - {"unused", "v1.3.0-pre", "v1.3.0-pre.0.20060102150405-hash"}, - {"unused", "v0.0.0--", "v0.0.0--.0.20060102150405-hash"}, - {"unused", "v1.0.0+metadata", "v1.0.1-0.20060102150405-hash+metadata"}, - {"unused", "v2.0.0+incompatible", "v2.0.1-0.20060102150405-hash+incompatible"}, - {"unused", "v2.3.0-pre+incompatible", "v2.3.0-pre.0.20060102150405-hash+incompatible"}, -} - -var pseudoTime = time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC) - -func TestPseudoVersion(t *testing.T) { - for _, tt := range pseudoTests { - v := PseudoVersion(tt.major, tt.older, pseudoTime, "hash") - if v != tt.version { - t.Errorf("PseudoVersion(%q, %q, ...) = %v, want %v", tt.major, tt.older, v, tt.version) - } - } -} - -func TestIsPseudoVersion(t *testing.T) { - for _, tt := range pseudoTests { - if !IsPseudoVersion(tt.version) { - t.Errorf("IsPseudoVersion(%q) = false, want true", tt.version) - } - if IsPseudoVersion(tt.older) { - t.Errorf("IsPseudoVersion(%q) = true, want false", tt.older) - } - } -} - -func TestPseudoVersionTime(t *testing.T) { - for _, tt := range pseudoTests { - tm, err := PseudoVersionTime(tt.version) - if tm != pseudoTime || err != nil { - t.Errorf("PseudoVersionTime(%q) = %v, %v, want %v, nil", tt.version, tm.Format(time.RFC3339), err, pseudoTime.Format(time.RFC3339)) - } - tm, err = PseudoVersionTime(tt.older) - if tm != (time.Time{}) || err == nil { - t.Errorf("PseudoVersionTime(%q) = %v, %v, want %v, error", tt.older, tm.Format(time.RFC3339), err, time.Time{}.Format(time.RFC3339)) - } - } -} - -func TestInvalidPseudoVersionTime(t *testing.T) { - const v = "---" - if _, err := PseudoVersionTime(v); err == nil { - t.Error("expected error, got nil instead") - } -} - -func TestPseudoVersionRev(t *testing.T) { - for _, tt := range pseudoTests { - rev, err := PseudoVersionRev(tt.version) - if rev != "hash" || err != nil { - t.Errorf("PseudoVersionRev(%q) = %q, %v, want %q, nil", tt.older, rev, err, "hash") - } - rev, err = PseudoVersionRev(tt.older) - if rev != "" || err == nil { - t.Errorf("PseudoVersionRev(%q) = %q, %v, want %q, error", tt.older, rev, err, "") - } - } -} - -func TestPseudoVersionBase(t *testing.T) { - for _, tt := range pseudoTests { - base, err := PseudoVersionBase(tt.version) - if err != nil { - t.Errorf("PseudoVersionBase(%q): %v", tt.version, err) - } else if base != tt.older { - t.Errorf("PseudoVersionBase(%q) = %q; want %q", tt.version, base, tt.older) - } - } -} - -func TestInvalidPseudoVersionBase(t *testing.T) { - for _, in := range []string{ - "v0.0.0", - "v0.0.0-", // malformed: empty prerelease - "v0.0.0-0.20060102150405-hash", // Z+1 == 0 - "v0.1.0-0.20060102150405-hash", // Z+1 == 0 - "v1.0.0-0.20060102150405-hash", // Z+1 == 0 - "v0.0.0-20060102150405-hash+incompatible", // "+incompatible without base version - "v0.0.0-20060102150405-hash+metadata", // other metadata without base version - } { - base, err := PseudoVersionBase(in) - if err == nil || base != "" { - t.Errorf(`PseudoVersionBase(%q) = %q, %v; want "", error`, in, base, err) - } - } -} - -func TestIncDecimal(t *testing.T) { - cases := []struct { - in, want string - }{ - {"0", "1"}, - {"1", "2"}, - {"99", "100"}, - {"100", "101"}, - {"101", "102"}, - } - - for _, tc := range cases { - got := incDecimal(tc.in) - if got != tc.want { - t.Fatalf("incDecimal(%q) = %q; want %q", tc.in, tc.want, got) - } - } -} - -func TestDecDecimal(t *testing.T) { - cases := []struct { - in, want string - }{ - {"", ""}, - {"0", ""}, - {"00", ""}, - {"1", "0"}, - {"2", "1"}, - {"99", "98"}, - {"100", "99"}, - {"101", "100"}, - } - - for _, tc := range cases { - got := decDecimal(tc.in) - if got != tc.want { - t.Fatalf("decDecimal(%q) = %q; want %q", tc.in, tc.want, got) - } - } -} diff --git a/src/cmd/go/internal/modfetch/repo.go b/src/cmd/go/internal/modfetch/repo.go index af9e24cefdc..0bffa55af6f 100644 --- a/src/cmd/go/internal/modfetch/repo.go +++ b/src/cmd/go/internal/modfetch/repo.go @@ -9,7 +9,6 @@ import ( "io" "io/fs" "os" - "sort" "strconv" "time" @@ -20,7 +19,6 @@ import ( web "cmd/go/internal/web" "golang.org/x/mod/module" - "golang.org/x/mod/semver" ) const traceRepo = false // trace all repo actions, for debugging @@ -35,7 +33,7 @@ type Repo interface { // Pseudo-versions are not included. // // Versions should be returned sorted in semver order - // (implementations can use SortVersions). + // (implementations can use semver.Sort). // // Versions returns a non-nil error only if there was a problem // fetching the list of versions: it may return an empty list @@ -171,15 +169,6 @@ type RevInfo struct { // and it can check that the path can be resolved to a target repository. // To avoid version control access except when absolutely necessary, // Lookup does not attempt to connect to the repository itself. -// -// The ImportRepoRev function is a variant of Import which is limited -// to code in a source code repository at a particular revision identifier -// (usually a commit hash or source code repository tag, not necessarily -// a module version). -// ImportRepoRev is used when converting legacy dependency requirements -// from older systems into go.mod files. Those older systems worked -// at either package or repository granularity, and most of the time they -// recorded commit hashes, not tagged versions. var lookupCache par.Cache @@ -194,7 +183,8 @@ type lookupCacheKey struct { // from its origin, and "noproxy" indicates that the patch should be fetched // directly only if GONOPROXY matches the given path. // -// For the distinguished proxy "off", Lookup always returns a non-nil error. +// For the distinguished proxy "off", Lookup always returns a Repo that returns +// a non-nil error for every method call. // // A successful return does not guarantee that the module // has any defined versions. @@ -267,7 +257,7 @@ var ( func lookupDirect(path string) (Repo, error) { security := web.SecureOnly - if allowInsecure(path) { + if module.MatchPrefixPatterns(cfg.GOINSECURE, path) { security = web.Insecure } rr, err := vcs.RepoRootForImportPath(path, vcs.PreferMod, security) @@ -299,63 +289,6 @@ func lookupCodeRepo(rr *vcs.RepoRoot) (codehost.Repo, error) { return code, nil } -// ImportRepoRev returns the module and version to use to access -// the given import path loaded from the source code repository that -// the original "go get" would have used, at the specific repository revision -// (typically a commit hash, but possibly also a source control tag). -func ImportRepoRev(path, rev string) (Repo, *RevInfo, error) { - if cfg.BuildMod == "vendor" || cfg.BuildMod == "readonly" { - return nil, nil, fmt.Errorf("repo version lookup disabled by -mod=%s", cfg.BuildMod) - } - - // Note: Because we are converting a code reference from a legacy - // version control system, we ignore meta tags about modules - // and use only direct source control entries (get.IgnoreMod). - security := web.SecureOnly - if allowInsecure(path) { - security = web.Insecure - } - rr, err := vcs.RepoRootForImportPath(path, vcs.IgnoreMod, security) - if err != nil { - return nil, nil, err - } - - code, err := lookupCodeRepo(rr) - if err != nil { - return nil, nil, err - } - - revInfo, err := code.Stat(rev) - if err != nil { - return nil, nil, err - } - - // TODO: Look in repo to find path, check for go.mod files. - // For now we're just assuming rr.Root is the module path, - // which is true in the absence of go.mod files. - - repo, err := newCodeRepo(code, rr.Root, rr.Root) - if err != nil { - return nil, nil, err - } - - info, err := repo.(*codeRepo).convert(revInfo, rev) - if err != nil { - return nil, nil, err - } - return repo, info, nil -} - -func SortVersions(list []string) { - sort.Slice(list, func(i, j int) bool { - cmp := semver.Compare(list[i], list[j]) - if cmp != 0 { - return cmp < 0 - } - return list[i] < list[j] - }) -} - // A loggingRepo is a wrapper around an underlying Repo // that prints a log message at the start and end of each call. // It can be inserted when debugging. diff --git a/src/cmd/go/internal/modfetch/sumdb.go b/src/cmd/go/internal/modfetch/sumdb.go index 4fbc54d15ce..f233cba6df1 100644 --- a/src/cmd/go/internal/modfetch/sumdb.go +++ b/src/cmd/go/internal/modfetch/sumdb.go @@ -4,6 +4,7 @@ // Go checksum database lookup +//go:build !cmd_go_bootstrap // +build !cmd_go_bootstrap package modfetch @@ -33,7 +34,7 @@ import ( // useSumDB reports whether to use the Go checksum database for the given module. func useSumDB(mod module.Version) bool { - return cfg.GOSUMDB != "off" && !cfg.Insecure && !module.MatchPrefixPatterns(cfg.GONOSUMDB, mod.Path) + return cfg.GOSUMDB != "off" && !module.MatchPrefixPatterns(cfg.GONOSUMDB, mod.Path) } // lookupSumDB returns the Go checksum database's go.sum lines for the given module, @@ -184,7 +185,7 @@ func (c *dbClient) initBase() { } }) if errors.Is(err, fs.ErrNotExist) { - // No proxies, or all proxies failed (with 404, 410, or were were allowed + // No proxies, or all proxies failed (with 404, 410, or were allowed // to fall back), or we reached an explicit "direct" or "off". c.base = c.direct } else if err != nil { diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go index 6b328d8bc82..3a24b6a2f7e 100644 --- a/src/cmd/go/internal/modget/get.go +++ b/src/cmd/go/internal/modget/get.go @@ -30,14 +30,12 @@ import ( "fmt" "os" "path/filepath" - "reflect" "runtime" "sort" "strings" "sync" "cmd/go/internal/base" - "cmd/go/internal/cfg" "cmd/go/internal/imports" "cmd/go/internal/load" "cmd/go/internal/modload" @@ -53,7 +51,7 @@ import ( var CmdGet = &base.Command{ // Note: -d -u are listed explicitly because they are the most common get flags. // Do not send CLs removing them because they're covered by [get flags]. - UsageLine: "go get [-d] [-t] [-u] [-v] [-insecure] [build flags] [packages]", + UsageLine: "go get [-d] [-t] [-u] [-v] [build flags] [packages]", Short: "add dependencies to current module and install them", Long: ` Get resolves its command-line arguments to packages at specific module versions, @@ -99,14 +97,6 @@ but changes the default to select patch releases. When the -t and -u flags are used together, get will update test dependencies as well. -The -insecure flag permits fetching from repositories and resolving -custom domains using insecure schemes such as HTTP, and also bypassess -module sum validation using the checksum database. Use with caution. -This flag is deprecated and will be removed in a future version of go. -To permit the use of insecure schemes, use the GOINSECURE environment -variable instead. To bypass module sum validation, use GOPRIVATE or -GONOSUMDB. See 'go help environment' for details. - The -d flag instructs get not to build or install packages. get will only update go.mod and download source code needed to build packages. @@ -227,13 +217,13 @@ variable for future go command invocations. } var ( - getD = CmdGet.Flag.Bool("d", false, "") - getF = CmdGet.Flag.Bool("f", false, "") - getFix = CmdGet.Flag.Bool("fix", false, "") - getM = CmdGet.Flag.Bool("m", false, "") - getT = CmdGet.Flag.Bool("t", false, "") - getU upgradeFlag - // -insecure is cfg.Insecure + getD = CmdGet.Flag.Bool("d", false, "") + getF = CmdGet.Flag.Bool("f", false, "") + getFix = CmdGet.Flag.Bool("fix", false, "") + getM = CmdGet.Flag.Bool("m", false, "") + getT = CmdGet.Flag.Bool("t", false, "") + getU upgradeFlag + getInsecure = CmdGet.Flag.Bool("insecure", false, "") // -v is cfg.BuildV ) @@ -264,7 +254,6 @@ func (v *upgradeFlag) String() string { return "" } func init() { work.AddBuildFlags(CmdGet, work.OmitModFlag) CmdGet.Run = runGet // break init loop - CmdGet.Flag.BoolVar(&cfg.Insecure, "insecure", cfg.Insecure, "") CmdGet.Flag.Var(&getU, "u", "") } @@ -284,10 +273,9 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { if *getM { base.Fatalf("go get: -m flag is no longer supported; consider -d to skip building packages") } - if cfg.Insecure { - fmt.Fprintf(os.Stderr, "go get: -insecure flag is deprecated; see 'go help get' for details\n") + if *getInsecure { + base.Fatalf("go get: -insecure flag is no longer supported; use GOINSECURE instead") } - load.ModResolveTests = *getT // Do not allow any updating of go.mod until we've applied // all the requested changes and checked that the result matches @@ -298,8 +286,6 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { // 'go get' is expected to do this, unlike other commands. modload.AllowMissingModuleImports() - modload.LoadModFile(ctx) // Initializes modload.Target. - queries := parseArgs(ctx, args) r := newResolver(ctx, queries) @@ -367,7 +353,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { pkgPatterns = append(pkgPatterns, q.pattern) } } - r.checkPackagesAndRetractions(ctx, pkgPatterns) + r.checkPackageProblems(ctx, pkgPatterns) // We've already downloaded modules (and identified direct and indirect // dependencies) by loading packages in findAndUpgradeImports. @@ -380,12 +366,51 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { // directory. if !*getD && len(pkgPatterns) > 0 { work.BuildInit() - pkgs := load.PackagesAndErrors(ctx, pkgPatterns) + + pkgOpts := load.PackageOpts{ModResolveTests: *getT} + var pkgs []*load.Package + for _, pkg := range load.PackagesAndErrors(ctx, pkgOpts, pkgPatterns) { + if pkg.Error != nil { + var noGo *load.NoGoError + if errors.As(pkg.Error.Err, &noGo) { + if m := modload.PackageModule(pkg.ImportPath); m.Path == pkg.ImportPath { + // pkg is at the root of a module, and doesn't exist with the current + // build tags. Probably the user just wanted to change the version of + // that module — not also build the package — so suppress the error. + // (See https://golang.org/issue/33526.) + continue + } + } + } + pkgs = append(pkgs, pkg) + } load.CheckPackageErrors(pkgs) + + haveExe := false + for _, pkg := range pkgs { + if pkg.Name == "main" { + haveExe = true + break + } + } + if haveExe { + fmt.Fprint(os.Stderr, "go get: installing executables with 'go get' in module mode is deprecated.") + var altMsg string + if modload.HasModRoot() { + altMsg = ` + To adjust and download dependencies of the current module, use 'go get -d'. + To install using requirements of the current module, use 'go install'. + To install ignoring the current module, use 'go install' with a version, + like 'go install example.com/cmd@latest'. +` + } else { + altMsg = "\n\tUse 'go install pkg@version' instead.\n" + } + fmt.Fprint(os.Stderr, altMsg) + fmt.Fprintf(os.Stderr, "\tFor more information, see https://golang.org/doc/go-get-install-deprecation\n\tor run 'go help get' or 'go help install'.\n") + } + work.InstallPackages(ctx, pkgPatterns, pkgs) - // TODO(#40276): After Go 1.16, print a deprecation notice when building and - // installing main packages. 'go install pkg' or 'go install pkg@version' - // should be used instead. Give the specific argument to use if possible. } if !modload.HasModRoot() { @@ -396,7 +421,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { oldReqs := reqsFromGoMod(modload.ModFile()) modload.AllowWriteGoMod() - modload.WriteGoMod() + modload.WriteGoMod(ctx) modload.DisallowWriteGoMod() newReqs := reqsFromGoMod(modload.ModFile()) @@ -478,7 +503,11 @@ type versionReason struct { } func newResolver(ctx context.Context, queries []*query) *resolver { - buildList := modload.LoadAllModules(ctx) + // LoadModGraph also sets modload.Target, which is needed by various resolver + // methods. + mg := modload.LoadModGraph(ctx) + + buildList := mg.BuildList() initialVersion := make(map[string]string, len(buildList)) for _, m := range buildList { initialVersion[m.Path] = m.Version @@ -687,7 +716,7 @@ func (r *resolver) performLocalQueries(ctx context.Context) { // Absolute paths like C:\foo and relative paths like ../foo... are // restricted to matching packages in the main module. - pkgPattern := modload.DirImportPath(q.pattern) + pkgPattern := modload.DirImportPath(ctx, q.pattern) if pkgPattern == "." { return errSet(fmt.Errorf("%s%s is not within module rooted at %s", q.pattern, absDetail, modload.ModRoot())) } @@ -1120,9 +1149,10 @@ func (r *resolver) findAndUpgradeImports(ctx context.Context, queries []*query) // build list. func (r *resolver) loadPackages(ctx context.Context, patterns []string, findPackage func(ctx context.Context, path string, m module.Version) (versionOk bool)) { opts := modload.PackageOpts{ - Tags: imports.AnyTags(), - LoadTests: *getT, - SilenceErrors: true, // May be fixed by subsequent upgrades or downgrades. + Tags: imports.AnyTags(), + VendorModulesInGOROOTSrc: true, + LoadTests: *getT, + SilencePackageErrors: true, // May be fixed by subsequent upgrades or downgrades. } opts.AllowPackage = func(ctx context.Context, path string, m module.Version) error { @@ -1433,25 +1463,31 @@ func (r *resolver) chooseArbitrarily(cs pathSet) (isPackage bool, m module.Versi return false, cs.mod } -// checkPackagesAndRetractions reloads packages for the given patterns and -// reports missing and ambiguous package errors. It also reports loads and -// reports retractions for resolved modules and modules needed to build -// named packages. +// checkPackageProblems reloads packages for the given patterns and reports +// missing and ambiguous package errors. It also reports retractions and +// deprecations for resolved modules and modules needed to build named packages. // // We skip missing-package errors earlier in the process, since we want to // resolve pathSets ourselves, but at that point, we don't have enough context // to log the package-import chains leading to each error. -func (r *resolver) checkPackagesAndRetractions(ctx context.Context, pkgPatterns []string) { +func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []string) { defer base.ExitIfErrors() - // Build a list of modules to load retractions for. Start with versions - // selected based on command line queries. - // - // This is a subset of the build list. If the main module has a lot of - // dependencies, loading retractions for the entire build list would be slow. - relevantMods := make(map[module.Version]struct{}) + // Gather information about modules we might want to load retractions and + // deprecations for. Loading this metadata requires at least one version + // lookup per module, and we don't want to load information that's neither + // relevant nor actionable. + type modFlags int + const ( + resolved modFlags = 1 << iota // version resolved by 'go get' + named // explicitly named on command line or provides a named package + hasPkg // needed to build named packages + direct // provides a direct dependency of the main module + ) + relevantMods := make(map[module.Version]modFlags) for path, reason := range r.resolvedVersion { - relevantMods[module.Version{Path: path, Version: reason.version}] = struct{}{} + m := module.Version{Path: path, Version: reason.version} + relevantMods[m] |= resolved } // Reload packages, reporting errors for missing and ambiguous imports. @@ -1459,9 +1495,11 @@ func (r *resolver) checkPackagesAndRetractions(ctx context.Context, pkgPatterns // LoadPackages will print errors (since it has more context) but will not // exit, since we need to load retractions later. pkgOpts := modload.PackageOpts{ - LoadTests: *getT, - ResolveMissingImports: false, - AllowErrors: true, + VendorModulesInGOROOTSrc: true, + LoadTests: *getT, + ResolveMissingImports: false, + AllowErrors: true, + SilenceNoGoErrors: true, } matches, pkgs := modload.LoadPackages(ctx, pkgOpts, pkgPatterns...) for _, m := range matches { @@ -1477,60 +1515,105 @@ func (r *resolver) checkPackagesAndRetractions(ctx context.Context, pkgPatterns // associated with either the package or its test — ErrNoGo must // indicate that none of those source files happen to apply in this // configuration. If we are actually building the package (no -d - // flag), the compiler will report the problem; otherwise, assume that - // the user is going to build or test it in some other configuration - // and suppress the error. + // flag), we will report the problem then; otherwise, assume that the + // user is going to build or test this package in some other + // configuration and suppress the error. continue } base.SetExitStatus(1) if ambiguousErr := (*modload.AmbiguousImportError)(nil); errors.As(err, &ambiguousErr) { for _, m := range ambiguousErr.Modules { - relevantMods[m] = struct{}{} + relevantMods[m] |= hasPkg } } } if m := modload.PackageModule(pkg); m.Path != "" { - relevantMods[m] = struct{}{} + relevantMods[m] |= hasPkg + } + } + for _, match := range matches { + for _, pkg := range match.Pkgs { + m := modload.PackageModule(pkg) + relevantMods[m] |= named } } } - // Load and report retractions. - type retraction struct { - m module.Version - err error - } - retractions := make([]retraction, 0, len(relevantMods)) + reqs := modload.LoadModFile(ctx) for m := range relevantMods { - retractions = append(retractions, retraction{m: m}) + if reqs.IsDirect(m.Path) { + relevantMods[m] |= direct + } } - sort.Slice(retractions, func(i, j int) bool { - return retractions[i].m.Path < retractions[j].m.Path - }) - for i := 0; i < len(retractions); i++ { + + // Load retractions for modules mentioned on the command line and modules + // needed to build named packages. We care about retractions of indirect + // dependencies, since we might be able to upgrade away from them. + type modMessage struct { + m module.Version + message string + } + retractions := make([]modMessage, 0, len(relevantMods)) + for m, flags := range relevantMods { + if flags&(resolved|named|hasPkg) != 0 { + retractions = append(retractions, modMessage{m: m}) + } + } + sort.Slice(retractions, func(i, j int) bool { return retractions[i].m.Path < retractions[j].m.Path }) + for i := range retractions { i := i r.work.Add(func() { err := modload.CheckRetractions(ctx, retractions[i].m) if retractErr := (*modload.ModuleRetractedError)(nil); errors.As(err, &retractErr) { - retractions[i].err = err + retractions[i].message = err.Error() } }) } + + // Load deprecations for modules mentioned on the command line. Only load + // deprecations for indirect dependencies if they're also direct dependencies + // of the main module. Deprecations of purely indirect dependencies are + // not actionable. + deprecations := make([]modMessage, 0, len(relevantMods)) + for m, flags := range relevantMods { + if flags&(resolved|named) != 0 || flags&(hasPkg|direct) == hasPkg|direct { + deprecations = append(deprecations, modMessage{m: m}) + } + } + sort.Slice(deprecations, func(i, j int) bool { return deprecations[i].m.Path < deprecations[j].m.Path }) + for i := range deprecations { + i := i + r.work.Add(func() { + deprecation, err := modload.CheckDeprecation(ctx, deprecations[i].m) + if err != nil || deprecation == "" { + return + } + deprecations[i].message = modload.ShortMessage(deprecation, "") + }) + } + <-r.work.Idle() + + // Report deprecations, then retractions. + for _, mm := range deprecations { + if mm.message != "" { + fmt.Fprintf(os.Stderr, "go: warning: module %s is deprecated: %s\n", mm.m.Path, mm.message) + } + } var retractPath string - for _, r := range retractions { - if r.err != nil { - fmt.Fprintf(os.Stderr, "go: warning: %v\n", r.err) + for _, mm := range retractions { + if mm.message != "" { + fmt.Fprintf(os.Stderr, "go: warning: %v\n", mm.message) if retractPath == "" { - retractPath = r.m.Path + retractPath = mm.m.Path } else { retractPath = "" } } } if retractPath != "" { - fmt.Fprintf(os.Stderr, "go: to switch to the latest unretracted version, run:\n\tgo get %s@latest", retractPath) + fmt.Fprintf(os.Stderr, "go: to switch to the latest unretracted version, run:\n\tgo get %s@latest\n", retractPath) } } @@ -1643,7 +1726,8 @@ func (r *resolver) updateBuildList(ctx context.Context, additions []module.Versi } } - if err := modload.EditBuildList(ctx, additions, resolved); err != nil { + changed, err := modload.EditBuildList(ctx, additions, resolved) + if err != nil { var constraint *modload.ConstraintError if !errors.As(err, &constraint) { base.Errorf("go get: %v", err) @@ -1662,12 +1746,11 @@ func (r *resolver) updateBuildList(ctx context.Context, additions []module.Versi } return false } - - buildList := modload.LoadAllModules(ctx) - if reflect.DeepEqual(r.buildList, buildList) { + if !changed { return false } - r.buildList = buildList + + r.buildList = modload.LoadModGraph(ctx).BuildList() r.buildListVersion = make(map[string]string, len(r.buildList)) for _, m := range r.buildList { r.buildListVersion[m.Path] = m.Version diff --git a/src/cmd/go/internal/modget/query.go b/src/cmd/go/internal/modget/query.go index d8364c8c0d3..1a5a60f7eb9 100644 --- a/src/cmd/go/internal/modget/query.go +++ b/src/cmd/go/internal/modget/query.go @@ -186,7 +186,7 @@ func (q *query) validate() error { if q.pattern == "all" { // If there is no main module, "all" is not meaningful. if !modload.HasModRoot() { - return fmt.Errorf(`cannot match "all": working directory is not part of a module`) + return fmt.Errorf(`cannot match "all": %v`, modload.ErrNoModRoot) } if !versionOkForMainModule(q.version) { // TODO(bcmills): "all@none" seems like a totally reasonable way to diff --git a/src/cmd/go/internal/modinfo/info.go b/src/cmd/go/internal/modinfo/info.go index 897be56397d..19088352f05 100644 --- a/src/cmd/go/internal/modinfo/info.go +++ b/src/cmd/go/internal/modinfo/info.go @@ -10,19 +10,20 @@ import "time" // and the fields are documented in the help text in ../list/list.go type ModulePublic struct { - Path string `json:",omitempty"` // module path - Version string `json:",omitempty"` // module version - Versions []string `json:",omitempty"` // available module versions - Replace *ModulePublic `json:",omitempty"` // replaced by this module - Time *time.Time `json:",omitempty"` // time version was created - Update *ModulePublic `json:",omitempty"` // available update (with -u) - Main bool `json:",omitempty"` // is this the main module? - Indirect bool `json:",omitempty"` // module is only indirectly needed by main module - Dir string `json:",omitempty"` // directory holding local copy of files, if any - GoMod string `json:",omitempty"` // path to go.mod file describing module, if any - GoVersion string `json:",omitempty"` // go version used in module - Retracted []string `json:",omitempty"` // retraction information, if any (with -retracted or -u) - Error *ModuleError `json:",omitempty"` // error loading module + Path string `json:",omitempty"` // module path + Version string `json:",omitempty"` // module version + Versions []string `json:",omitempty"` // available module versions + Replace *ModulePublic `json:",omitempty"` // replaced by this module + Time *time.Time `json:",omitempty"` // time version was created + Update *ModulePublic `json:",omitempty"` // available update (with -u) + Main bool `json:",omitempty"` // is this the main module? + Indirect bool `json:",omitempty"` // module is only indirectly needed by main module + Dir string `json:",omitempty"` // directory holding local copy of files, if any + GoMod string `json:",omitempty"` // path to go.mod file describing module, if any + GoVersion string `json:",omitempty"` // go version used in module + Retracted []string `json:",omitempty"` // retraction information, if any (with -retracted or -u) + Deprecated string `json:",omitempty"` // deprecation message, if any (with -u) + Error *ModuleError `json:",omitempty"` // error loading module } type ModuleError struct { @@ -45,6 +46,9 @@ func (m *ModulePublic) String() string { s += " [" + versionString(m.Update) + "]" } } + if m.Deprecated != "" { + s += " (deprecated)" + } if m.Replace != nil { s += " => " + m.Replace.Path if m.Replace.Version != "" { @@ -53,6 +57,9 @@ func (m *ModulePublic) String() string { s += " [" + versionString(m.Replace.Update) + "]" } } + if m.Replace.Deprecated != "" { + s += " (deprecated)" + } } return s } diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go index 8ad5f834def..76e1ad589f4 100644 --- a/src/cmd/go/internal/modload/build.go +++ b/src/cmd/go/internal/modload/build.go @@ -11,6 +11,7 @@ import ( "errors" "fmt" "internal/goroot" + "io/fs" "os" "path/filepath" "strings" @@ -50,17 +51,17 @@ func findStandardImportPath(path string) string { // a given package. If modules are not enabled or if the package is in the // standard library or if the package was not successfully loaded with // LoadPackages or ImportFromFiles, nil is returned. -func PackageModuleInfo(pkgpath string) *modinfo.ModulePublic { +func PackageModuleInfo(ctx context.Context, pkgpath string) *modinfo.ModulePublic { if isStandardImportPath(pkgpath) || !Enabled() { return nil } - m, ok := findModule(pkgpath) + m, ok := findModule(loaded, pkgpath) if !ok { return nil } - fromBuildList := true - listRetracted := false - return moduleInfo(context.TODO(), m, fromBuildList, listRetracted) + + rs := LoadModFile(ctx) + return moduleInfo(ctx, rs, m, 0) } func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic { @@ -68,26 +69,38 @@ func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic { return nil } - listRetracted := false if i := strings.Index(path, "@"); i >= 0 { m := module.Version{Path: path[:i], Version: path[i+1:]} - fromBuildList := false - return moduleInfo(ctx, m, fromBuildList, listRetracted) + return moduleInfo(ctx, nil, m, 0) } - for _, m := range buildList { - if m.Path == path { - fromBuildList := true - return moduleInfo(ctx, m, fromBuildList, listRetracted) + rs := LoadModFile(ctx) + + var ( + v string + ok bool + ) + if rs.depth == lazy { + v, ok = rs.rootSelected(path) + } + if !ok { + mg, err := rs.Graph(ctx) + if err != nil { + base.Fatalf("go: %v", err) + } + v = mg.Selected(path) + } + + if v == "none" { + return &modinfo.ModulePublic{ + Path: path, + Error: &modinfo.ModuleError{ + Err: "module not in current build", + }, } } - return &modinfo.ModulePublic{ - Path: path, - Error: &modinfo.ModuleError{ - Err: "module not in current build", - }, - } + return moduleInfo(ctx, rs, module.Version{Path: path, Version: v}, 0) } // addUpdate fills in m.Update if an updated version is available. @@ -96,7 +109,26 @@ func addUpdate(ctx context.Context, m *modinfo.ModulePublic) { return } - if info, err := Query(ctx, m.Path, "upgrade", m.Version, CheckAllowed); err == nil && semver.Compare(info.Version, m.Version) > 0 { + info, err := Query(ctx, m.Path, "upgrade", m.Version, CheckAllowed) + var noVersionErr *NoMatchingVersionError + if errors.Is(err, fs.ErrNotExist) || errors.As(err, &noVersionErr) { + // Ignore "not found" and "no matching version" errors. + // This means the proxy has no matching version or no versions at all. + // + // We should report other errors though. An attacker that controls the + // network shouldn't be able to hide versions by interfering with + // the HTTPS connection. An attacker that controls the proxy may still + // hide versions, since the "list" and "latest" endpoints are not + // authenticated. + return + } else if err != nil { + if m.Error == nil { + m.Error = &modinfo.ModuleError{Err: err.Error()} + } + return + } + + if semver.Compare(info.Version, m.Version) > 0 { m.Update = &modinfo.ModulePublic{ Path: m.Path, Version: info.Version, @@ -113,7 +145,11 @@ func addVersions(ctx context.Context, m *modinfo.ModulePublic, listRetracted boo if listRetracted { allowed = CheckExclusions } - m.Versions, _ = versions(ctx, m.Path, allowed) + var err error + m.Versions, err = versions(ctx, m.Path, allowed) + if err != nil && m.Error == nil { + m.Error = &modinfo.ModuleError{Err: err.Error()} + } } // addRetraction fills in m.Retracted if the module was retracted by its author. @@ -124,31 +160,72 @@ func addRetraction(ctx context.Context, m *modinfo.ModulePublic) { } err := CheckRetractions(ctx, module.Version{Path: m.Path, Version: m.Version}) - var rerr *ModuleRetractedError - if errors.As(err, &rerr) { - if len(rerr.Rationale) == 0 { + var noVersionErr *NoMatchingVersionError + var retractErr *ModuleRetractedError + if err == nil || errors.Is(err, fs.ErrNotExist) || errors.As(err, &noVersionErr) { + // Ignore "not found" and "no matching version" errors. + // This means the proxy has no matching version or no versions at all. + // + // We should report other errors though. An attacker that controls the + // network shouldn't be able to hide versions by interfering with + // the HTTPS connection. An attacker that controls the proxy may still + // hide versions, since the "list" and "latest" endpoints are not + // authenticated. + return + } else if errors.As(err, &retractErr) { + if len(retractErr.Rationale) == 0 { m.Retracted = []string{"retracted by module author"} } else { - m.Retracted = rerr.Rationale + m.Retracted = retractErr.Rationale } - } else if err != nil && m.Error == nil { + } else if m.Error == nil { m.Error = &modinfo.ModuleError{Err: err.Error()} } } -func moduleInfo(ctx context.Context, m module.Version, fromBuildList, listRetracted bool) *modinfo.ModulePublic { +// addDeprecation fills in m.Deprecated if the module was deprecated by its +// author. m.Error is set if there's an error loading deprecation information. +func addDeprecation(ctx context.Context, m *modinfo.ModulePublic) { + deprecation, err := CheckDeprecation(ctx, module.Version{Path: m.Path, Version: m.Version}) + var noVersionErr *NoMatchingVersionError + if errors.Is(err, fs.ErrNotExist) || errors.As(err, &noVersionErr) { + // Ignore "not found" and "no matching version" errors. + // This means the proxy has no matching version or no versions at all. + // + // We should report other errors though. An attacker that controls the + // network shouldn't be able to hide versions by interfering with + // the HTTPS connection. An attacker that controls the proxy may still + // hide versions, since the "list" and "latest" endpoints are not + // authenticated. + return + } + if err != nil { + if m.Error == nil { + m.Error = &modinfo.ModuleError{Err: err.Error()} + } + return + } + m.Deprecated = deprecation +} + +// moduleInfo returns information about module m, loaded from the requirements +// in rs (which may be nil to indicate that m was not loaded from a requirement +// graph). +func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode ListMode) *modinfo.ModulePublic { if m == Target { info := &modinfo.ModulePublic{ Path: m.Path, Version: m.Version, Main: true, } + if v, ok := rawGoVersion.Load(Target); ok { + info.GoVersion = v.(string) + } else { + panic("internal error: GoVersion not set for main module") + } if HasModRoot() { info.Dir = ModRoot() info.GoMod = ModFilePath() - if modFile.Go != nil { - info.GoVersion = modFile.Go.Version - } } return info } @@ -156,7 +233,7 @@ func moduleInfo(ctx context.Context, m module.Version, fromBuildList, listRetrac info := &modinfo.ModulePublic{ Path: m.Path, Version: m.Version, - Indirect: fromBuildList && loaded != nil && !loaded.direct[m.Path], + Indirect: rs != nil && !rs.direct[m.Path], } if v, ok := rawGoVersion.Load(m); ok { info.GoVersion = v.(string) @@ -164,7 +241,10 @@ func moduleInfo(ctx context.Context, m module.Version, fromBuildList, listRetrac // completeFromModCache fills in the extra fields in m using the module cache. completeFromModCache := func(m *modinfo.ModulePublic) { - mod := module.Version{Path: m.Path, Version: m.Version} + checksumOk := func(suffix string) bool { + return rs == nil || m.Version == "" || cfg.BuildMod == "mod" || + modfetch.HaveSum(module.Version{Path: m.Path, Version: m.Version + suffix}) + } if m.Version != "" { if q, err := Query(ctx, m.Path, m.Version, "", nil); err != nil { @@ -173,31 +253,40 @@ func moduleInfo(ctx context.Context, m module.Version, fromBuildList, listRetrac m.Version = q.Version m.Time = &q.Time } + } + mod := module.Version{Path: m.Path, Version: m.Version} - gomod, err := modfetch.CachePath(mod, "mod") - if err == nil { - if info, err := os.Stat(gomod); err == nil && info.Mode().IsRegular() { - m.GoMod = gomod - } - } - dir, err := modfetch.DownloadDir(mod) - if err == nil { - m.Dir = dir - } - - if listRetracted { - addRetraction(ctx, m) + if m.GoVersion == "" && checksumOk("/go.mod") { + // Load the go.mod file to determine the Go version, since it hasn't + // already been populated from rawGoVersion. + if summary, err := rawGoModSummary(mod); err == nil && summary.goVersion != "" { + m.GoVersion = summary.goVersion } } - if m.GoVersion == "" { - if summary, err := rawGoModSummary(mod); err == nil && summary.goVersionV != "" { - m.GoVersion = summary.goVersionV[1:] + if m.Version != "" { + if checksumOk("/go.mod") { + gomod, err := modfetch.CachePath(mod, "mod") + if err == nil { + if info, err := os.Stat(gomod); err == nil && info.Mode().IsRegular() { + m.GoMod = gomod + } + } + } + if checksumOk("") { + dir, err := modfetch.DownloadDir(mod) + if err == nil { + m.Dir = dir + } + } + + if mode&ListRetracted != 0 { + addRetraction(ctx, m) } } } - if !fromBuildList { + if rs == nil { // If this was an explicitly-versioned argument to 'go mod download' or // 'go list -m', report the actual requested version, not its replacement. completeFromModCache(info) // Will set m.Error in vendor mode. @@ -255,11 +344,11 @@ func PackageBuildInfo(path string, deps []string) string { return "" } - target := mustFindModule(path, path) + target := mustFindModule(loaded, path, path) mdeps := make(map[module.Version]bool) for _, dep := range deps { if !isStandardImportPath(dep) { - mdeps[mustFindModule(path, dep)] = true + mdeps[mustFindModule(loaded, path, dep)] = true } } var mods []module.Version @@ -298,8 +387,8 @@ func PackageBuildInfo(path string, deps []string) string { // // TODO(jayconrod): remove this. Callers should use findModule and return // errors instead of relying on base.Fatalf. -func mustFindModule(target, path string) module.Version { - pkg, ok := loaded.pkgCache.Get(path).(*loadPkg) +func mustFindModule(ld *loader, target, path string) module.Version { + pkg, ok := ld.pkgCache.Get(path).(*loadPkg) if ok { if pkg.err != nil { base.Fatalf("build %v: cannot load %v: %v", target, path, pkg.err) @@ -318,8 +407,8 @@ func mustFindModule(target, path string) module.Version { // findModule searches for the module that contains the package at path. // If the package was loaded, its containing module and true are returned. // Otherwise, module.Version{} and false are returend. -func findModule(path string) (module.Version, bool) { - if pkg, ok := loaded.pkgCache.Get(path).(*loadPkg); ok { +func findModule(ld *loader, path string) (module.Version, bool) { + if pkg, ok := ld.pkgCache.Get(path).(*loadPkg); ok { return pkg.mod, pkg.mod != module.Version{} } if path == "command-line-arguments" { diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go index 45f220a6ee6..7a0cea405e3 100644 --- a/src/cmd/go/internal/modload/buildlist.go +++ b/src/cmd/go/internal/modload/buildlist.go @@ -7,66 +7,454 @@ package modload import ( "cmd/go/internal/base" "cmd/go/internal/cfg" - "cmd/go/internal/imports" "cmd/go/internal/mvs" + "cmd/go/internal/par" "context" "fmt" "os" + "reflect" + "runtime" + "runtime/debug" "strings" + "sync" + "sync/atomic" "golang.org/x/mod/module" + "golang.org/x/mod/semver" ) -// buildList is the list of modules to use for building packages. -// It is initialized by calling LoadPackages or ImportFromFiles, -// each of which uses loaded.load. -// -// Ideally, exactly ONE of those functions would be called, -// and exactly once. Most of the time, that's true. -// During "go get" it may not be. TODO(rsc): Figure out if -// that restriction can be established, or else document why not. -// -var buildList []module.Version - -// additionalExplicitRequirements is a list of modules paths for which -// WriteGoMod should record explicit requirements, even if they would be -// selected without those requirements. Each path must also appear in buildList. -var additionalExplicitRequirements []string - // capVersionSlice returns s with its cap reduced to its length. func capVersionSlice(s []module.Version) []module.Version { return s[:len(s):len(s)] } -// LoadAllModules loads and returns the list of modules matching the "all" -// module pattern, starting with the Target module and in a deterministic -// (stable) order, without loading any packages. -// -// Modules are loaded automatically (and lazily) in LoadPackages: -// LoadAllModules need only be called if LoadPackages is not, -// typically in commands that care about modules but no particular package. -// -// The caller must not modify the returned list, but may append to it. -func LoadAllModules(ctx context.Context) []module.Version { - LoadModFile(ctx) - ReloadBuildList() - WriteGoMod() - return capVersionSlice(buildList) +// A Requirements represents a logically-immutable set of root module requirements. +type Requirements struct { + // depth is the depth at which the requirement graph is computed. + // + // If eager, the graph includes all transitive requirements regardless of depth. + // + // If lazy, the graph includes only the root modules, the explicit + // requirements of those root modules, and the transitive requirements of only + // the *non-lazy* root modules. + depth modDepth + + // rootModules is the set of module versions explicitly required by the main + // module, sorted and capped to length. It may contain duplicates, and may + // contain multiple versions for a given module path. + rootModules []module.Version + maxRootVersion map[string]string + + // direct is the set of module paths for which we believe the module provides + // a package directly imported by a package or test in the main module. + // + // The "direct" map controls which modules are annotated with "// indirect" + // comments in the go.mod file, and may impact which modules are listed as + // explicit roots (vs. indirect-only dependencies). However, it should not + // have a semantic effect on the build list overall. + // + // The initial direct map is populated from the existing "// indirect" + // comments (or lack thereof) in the go.mod file. It is updated by the + // package loader: dependencies may be promoted to direct if new + // direct imports are observed, and may be demoted to indirect during + // 'go mod tidy' or 'go mod vendor'. + // + // The direct map is keyed by module paths, not module versions. When a + // module's selected version changes, we assume that it remains direct if the + // previous version was a direct dependency. That assumption might not hold in + // rare cases (such as if a dependency splits out a nested module, or merges a + // nested module back into a parent module). + direct map[string]bool + + graphOnce sync.Once // guards writes to (but not reads from) graph + graph atomic.Value // cachedGraph } -// Selected returns the selected version of the module with the given path, or -// the empty string if the given module has no selected version -// (either because it is not required or because it is the Target module). -func Selected(path string) (version string) { - if path == Target.Path { - return "" - } - for _, m := range buildList { - if m.Path == path { - return m.Version +// A cachedGraph is a non-nil *ModuleGraph, together with any error discovered +// while loading that graph. +type cachedGraph struct { + mg *ModuleGraph + err error // If err is non-nil, mg may be incomplete (but must still be non-nil). +} + +// requirements is the requirement graph for the main module. +// +// It is always non-nil if the main module's go.mod file has been loaded. +// +// This variable should only be read from the loadModFile function, and should +// only be written in the loadModFile and commitRequirements functions. +// All other functions that need or produce a *Requirements should +// accept and/or return an explicit parameter. +var requirements *Requirements + +// newRequirements returns a new requirement set with the given root modules. +// The dependencies of the roots will be loaded lazily at the first call to the +// Graph method. +// +// The rootModules slice must be sorted according to module.Sort. +// The caller must not modify the rootModules slice or direct map after passing +// them to newRequirements. +// +// If vendoring is in effect, the caller must invoke initVendor on the returned +// *Requirements before any other method. +func newRequirements(depth modDepth, rootModules []module.Version, direct map[string]bool) *Requirements { + for i, m := range rootModules { + if m == Target { + panic(fmt.Sprintf("newRequirements called with untrimmed build list: rootModules[%v] is Target", i)) + } + if m.Path == "" || m.Version == "" { + panic(fmt.Sprintf("bad requirement: rootModules[%v] = %v", i, m)) + } + if i > 0 { + prev := rootModules[i-1] + if prev.Path > m.Path || (prev.Path == m.Path && semver.Compare(prev.Version, m.Version) > 0) { + panic(fmt.Sprintf("newRequirements called with unsorted roots: %v", rootModules)) + } } } - return "" + + rs := &Requirements{ + depth: depth, + rootModules: capVersionSlice(rootModules), + maxRootVersion: make(map[string]string, len(rootModules)), + direct: direct, + } + + for _, m := range rootModules { + if v, ok := rs.maxRootVersion[m.Path]; ok && cmpVersion(v, m.Version) >= 0 { + continue + } + rs.maxRootVersion[m.Path] = m.Version + } + return rs +} + +// initVendor initializes rs.graph from the given list of vendored module +// dependencies, overriding the graph that would normally be loaded from module +// requirements. +func (rs *Requirements) initVendor(vendorList []module.Version) { + rs.graphOnce.Do(func() { + mg := &ModuleGraph{ + g: mvs.NewGraph(cmpVersion, []module.Version{Target}), + } + + if rs.depth == lazy { + // The roots of a lazy module should already include every module in the + // vendor list, because the vendored modules are the same as those + // maintained as roots by the lazy loading “import invariant”. + // + // Just to be sure, we'll double-check that here. + inconsistent := false + for _, m := range vendorList { + if v, ok := rs.rootSelected(m.Path); !ok || v != m.Version { + base.Errorf("go: vendored module %v should be required explicitly in go.mod", m) + inconsistent = true + } + } + if inconsistent { + base.Fatalf("go: %v", errGoModDirty) + } + + // Now we can treat the rest of the module graph as effectively “pruned + // out”, like a more aggressive version of lazy loading: in vendor mode, + // the root requirements *are* the complete module graph. + mg.g.Require(Target, rs.rootModules) + } else { + // The transitive requirements of the main module are not in general available + // from the vendor directory, and we don't actually know how we got from + // the roots to the final build list. + // + // Instead, we'll inject a fake "vendor/modules.txt" module that provides + // those transitive dependencies, and mark it as a dependency of the main + // module. That allows us to elide the actual structure of the module + // graph, but still distinguishes between direct and indirect + // dependencies. + vendorMod := module.Version{Path: "vendor/modules.txt", Version: ""} + mg.g.Require(Target, append(rs.rootModules, vendorMod)) + mg.g.Require(vendorMod, vendorList) + } + + rs.graph.Store(cachedGraph{mg, nil}) + }) +} + +// rootSelected returns the version of the root dependency with the given module +// path, or the zero module.Version and ok=false if the module is not a root +// dependency. +func (rs *Requirements) rootSelected(path string) (version string, ok bool) { + if path == Target.Path { + return Target.Version, true + } + if v, ok := rs.maxRootVersion[path]; ok { + return v, true + } + return "", false +} + +// Graph returns the graph of module requirements loaded from the current +// root modules (as reported by RootModules). +// +// Graph always makes a best effort to load the requirement graph despite any +// errors, and always returns a non-nil *ModuleGraph. +// +// If the requirements of any relevant module fail to load, Graph also +// returns a non-nil error of type *mvs.BuildListError. +func (rs *Requirements) Graph(ctx context.Context) (*ModuleGraph, error) { + rs.graphOnce.Do(func() { + mg, mgErr := readModGraph(ctx, rs.depth, rs.rootModules) + rs.graph.Store(cachedGraph{mg, mgErr}) + }) + cached := rs.graph.Load().(cachedGraph) + return cached.mg, cached.err +} + +// IsDirect returns whether the given module provides a package directly +// imported by a package or test in the main module. +func (rs *Requirements) IsDirect(path string) bool { + return rs.direct[path] +} + +// A ModuleGraph represents the complete graph of module dependencies +// of a main module. +// +// If the main module is lazily loaded, the graph does not include +// transitive dependencies of non-root (implicit) dependencies. +type ModuleGraph struct { + g *mvs.Graph + loadCache par.Cache // module.Version → summaryError + + buildListOnce sync.Once + buildList []module.Version +} + +// A summaryError is either a non-nil modFileSummary or a non-nil error +// encountered while reading or parsing that summary. +type summaryError struct { + summary *modFileSummary + err error +} + +var readModGraphDebugOnce sync.Once + +// readModGraph reads and returns the module dependency graph starting at the +// given roots. +// +// Unlike LoadModGraph, readModGraph does not attempt to diagnose or update +// inconsistent roots. +func readModGraph(ctx context.Context, depth modDepth, roots []module.Version) (*ModuleGraph, error) { + if depth == lazy { + readModGraphDebugOnce.Do(func() { + for _, f := range strings.Split(os.Getenv("GODEBUG"), ",") { + switch f { + case "lazymod=log": + debug.PrintStack() + fmt.Fprintf(os.Stderr, "go: read full module graph.\n") + case "lazymod=strict": + debug.PrintStack() + base.Fatalf("go: read full module graph (forbidden by GODEBUG=lazymod=strict).") + } + } + }) + } + + var ( + mu sync.Mutex // guards mg.g and hasError during loading + hasError bool + mg = &ModuleGraph{ + g: mvs.NewGraph(cmpVersion, []module.Version{Target}), + } + ) + mg.g.Require(Target, roots) + + var ( + loadQueue = par.NewQueue(runtime.GOMAXPROCS(0)) + loadingEager sync.Map // module.Version → nil; the set of modules that have been or are being loaded via eager roots + ) + + // loadOne synchronously loads the explicit requirements for module m. + // It does not load the transitive requirements of m even if the go version in + // m's go.mod file indicates eager loading. + loadOne := func(m module.Version) (*modFileSummary, error) { + cached := mg.loadCache.Do(m, func() interface{} { + summary, err := goModSummary(m) + + mu.Lock() + if err == nil { + mg.g.Require(m, summary.require) + } else { + hasError = true + } + mu.Unlock() + + return summaryError{summary, err} + }).(summaryError) + + return cached.summary, cached.err + } + + var enqueue func(m module.Version, depth modDepth) + enqueue = func(m module.Version, depth modDepth) { + if m.Version == "none" { + return + } + + if depth == eager { + if _, dup := loadingEager.LoadOrStore(m, nil); dup { + // m has already been enqueued for loading. Since eager loading may + // follow cycles in the the requirement graph, we need to return early + // to avoid making the load queue infinitely long. + return + } + } + + loadQueue.Add(func() { + summary, err := loadOne(m) + if err != nil { + return // findError will report the error later. + } + + // If the version in m's go.mod file implies eager loading, then we cannot + // assume that the explicit requirements of m (added by loadOne) are + // sufficient to build the packages it contains. We must load its full + // transitive dependency graph to be sure that we see all relevant + // dependencies. + if depth == eager || summary.depth == eager { + for _, r := range summary.require { + enqueue(r, eager) + } + } + }) + } + + for _, m := range roots { + enqueue(m, depth) + } + <-loadQueue.Idle() + + if hasError { + return mg, mg.findError() + } + return mg, nil +} + +// RequiredBy returns the dependencies required by module m in the graph, +// or ok=false if module m's dependencies are not relevant (such as if they +// are pruned out by lazy loading). +// +// The caller must not modify the returned slice, but may safely append to it +// and may rely on it not to be modified. +func (mg *ModuleGraph) RequiredBy(m module.Version) (reqs []module.Version, ok bool) { + return mg.g.RequiredBy(m) +} + +// Selected returns the selected version of the module with the given path. +// +// If no version is selected, Selected returns version "none". +func (mg *ModuleGraph) Selected(path string) (version string) { + return mg.g.Selected(path) +} + +// WalkBreadthFirst invokes f once, in breadth-first order, for each module +// version other than "none" that appears in the graph, regardless of whether +// that version is selected. +func (mg *ModuleGraph) WalkBreadthFirst(f func(m module.Version)) { + mg.g.WalkBreadthFirst(f) +} + +// BuildList returns the selected versions of all modules present in the graph, +// beginning with Target. +// +// The order of the remaining elements in the list is deterministic +// but arbitrary. +// +// The caller must not modify the returned list, but may safely append to it +// and may rely on it not to be modified. +func (mg *ModuleGraph) BuildList() []module.Version { + mg.buildListOnce.Do(func() { + mg.buildList = capVersionSlice(mg.g.BuildList()) + }) + return mg.buildList +} + +func (mg *ModuleGraph) findError() error { + errStack := mg.g.FindPath(func(m module.Version) bool { + cached := mg.loadCache.Get(m) + return cached != nil && cached.(summaryError).err != nil + }) + if len(errStack) > 0 { + err := mg.loadCache.Get(errStack[len(errStack)-1]).(summaryError).err + var noUpgrade func(from, to module.Version) bool + return mvs.NewBuildListError(err, errStack, noUpgrade) + } + + return nil +} + +func (mg *ModuleGraph) allRootsSelected() bool { + roots, _ := mg.g.RequiredBy(Target) + for _, m := range roots { + if mg.Selected(m.Path) != m.Version { + return false + } + } + return true +} + +// LoadModGraph loads and returns the graph of module dependencies of the main module, +// without loading any packages. +// +// Modules are loaded automatically (and lazily) in LoadPackages: +// LoadModGraph need only be called if LoadPackages is not, +// typically in commands that care about modules but no particular package. +func LoadModGraph(ctx context.Context) *ModuleGraph { + rs, mg, err := expandGraph(ctx, LoadModFile(ctx)) + if err != nil { + base.Fatalf("go: %v", err) + } + + commitRequirements(ctx, modFileGoVersion(), rs) + return mg +} + +// expandGraph loads the complete module graph from rs. +// +// If the complete graph reveals that some root of rs is not actually the +// selected version of its path, expandGraph computes a new set of roots that +// are consistent. (When lazy loading is implemented, this may result in +// upgrades to other modules due to requirements that were previously pruned +// out.) +// +// expandGraph returns the updated roots, along with the module graph loaded +// from those roots and any error encountered while loading that graph. +// expandGraph returns non-nil requirements and a non-nil graph regardless of +// errors. On error, the roots might not be updated to be consistent. +func expandGraph(ctx context.Context, rs *Requirements) (*Requirements, *ModuleGraph, error) { + mg, mgErr := rs.Graph(ctx) + if mgErr != nil { + // Without the graph, we can't update the roots: we don't know which + // versions of transitive dependencies would be selected. + return rs, mg, mgErr + } + + if !mg.allRootsSelected() { + // The roots of rs are not consistent with the rest of the graph. Update + // them. In an eager module this is a no-op for the build list as a whole — + // it just promotes what were previously transitive requirements to be + // roots — but in a lazy module it may pull in previously-irrelevant + // transitive dependencies. + + newRS, rsErr := updateRoots(ctx, rs.direct, rs, nil, nil) + if rsErr != nil { + // Failed to update roots, perhaps because of an error in a transitive + // dependency needed for the update. Return the original Requirements + // instead. + return rs, mg, rsErr + } + rs = newRS + mg, mgErr = rs.Graph(ctx) + } + + return rs, mg, mgErr } // EditBuildList edits the global build list by first adding every module in add @@ -81,111 +469,17 @@ func Selected(path string) (version string) { // If the versions listed in mustSelect are mutually incompatible (due to one of // the listed modules requiring a higher version of another), EditBuildList // returns a *ConstraintError and leaves the build list in its previous state. -func EditBuildList(ctx context.Context, add, mustSelect []module.Version) error { - var upgraded = capVersionSlice(buildList) - if len(add) > 0 { - // First, upgrade the build list with any additions. - // In theory we could just append the additions to the build list and let - // mvs.Downgrade take care of resolving the upgrades too, but the - // diagnostics from Upgrade are currently much better in case of errors. - var err error - upgraded, err = mvs.Upgrade(Target, &mvsReqs{buildList: upgraded}, add...) - if err != nil { - return err - } - } - - downgraded, err := mvs.Downgrade(Target, &mvsReqs{buildList: append(upgraded, mustSelect...)}, mustSelect...) +// +// On success, EditBuildList reports whether the selected version of any module +// in the build list may have been changed (possibly to or from "none") as a +// result. +func EditBuildList(ctx context.Context, add, mustSelect []module.Version) (changed bool, err error) { + rs, changed, err := editRequirements(ctx, LoadModFile(ctx), add, mustSelect) if err != nil { - return err - } - - final, err := mvs.Upgrade(Target, &mvsReqs{buildList: downgraded}, mustSelect...) - if err != nil { - return err - } - - selected := make(map[string]module.Version, len(final)) - for _, m := range final { - selected[m.Path] = m - } - inconsistent := false - for _, m := range mustSelect { - s, ok := selected[m.Path] - if !ok { - if m.Version != "none" { - panic(fmt.Sprintf("internal error: mvs.BuildList lost %v", m)) - } - continue - } - if s.Version != m.Version { - inconsistent = true - break - } - } - - if !inconsistent { - buildList = final - additionalExplicitRequirements = make([]string, 0, len(mustSelect)) - for _, m := range mustSelect { - if m.Version != "none" { - additionalExplicitRequirements = append(additionalExplicitRequirements, m.Path) - } - } - return nil - } - - // We overshot one or more of the modules in mustSelected, which means that - // Downgrade removed something in mustSelect because it conflicted with - // something else in mustSelect. - // - // Walk the requirement graph to find the conflict. - // - // TODO(bcmills): Ideally, mvs.Downgrade (or a replacement for it) would do - // this directly. - - reqs := &mvsReqs{buildList: final} - reason := map[module.Version]module.Version{} - for _, m := range mustSelect { - reason[m] = m - } - queue := mustSelect[:len(mustSelect):len(mustSelect)] - for len(queue) > 0 { - var m module.Version - m, queue = queue[0], queue[1:] - required, err := reqs.Required(m) - if err != nil { - return err - } - for _, r := range required { - if _, ok := reason[r]; !ok { - reason[r] = reason[m] - queue = append(queue, r) - } - } - } - - var conflicts []Conflict - for _, m := range mustSelect { - s, ok := selected[m.Path] - if !ok { - if m.Version != "none" { - panic(fmt.Sprintf("internal error: mvs.BuildList lost %v", m)) - } - continue - } - if s.Version != m.Version { - conflicts = append(conflicts, Conflict{ - Source: reason[s], - Dep: s, - Constraint: m, - }) - } - } - - return &ConstraintError{ - Conflicts: conflicts, + return false, err } + commitRequirements(ctx, modFileGoVersion(), rs) + return changed, err } // A ConstraintError describes inconsistent constraints in EditBuildList @@ -213,66 +507,569 @@ type Conflict struct { Constraint module.Version } -// ReloadBuildList resets the state of loaded packages, then loads and returns -// the build list set by EditBuildList. -func ReloadBuildList() []module.Version { - loaded = loadFromRoots(loaderParams{ - PackageOpts: PackageOpts{ - Tags: imports.Tags(), - }, - listRoots: func() []string { return nil }, - allClosesOverTests: index.allPatternClosesOverTests(), // but doesn't matter because the root list is empty. - }) - return capVersionSlice(buildList) +// tidyRoots trims the root dependencies to the minimal requirements needed to +// both retain the same versions of all packages in pkgs and satisfy the +// lazy loading invariants (if applicable). +func tidyRoots(ctx context.Context, rs *Requirements, pkgs []*loadPkg) (*Requirements, error) { + if rs.depth == eager { + return tidyEagerRoots(ctx, rs.direct, pkgs) + } + return tidyLazyRoots(ctx, rs.direct, pkgs) } -// TidyBuildList trims the build list to the minimal requirements needed to -// retain the same versions of all packages from the preceding call to -// LoadPackages. -func TidyBuildList() { - used := map[module.Version]bool{Target: true} - for _, pkg := range loaded.pkgs { - used[pkg.mod] = true +func updateRoots(ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version) (*Requirements, error) { + if rs.depth == eager { + return updateEagerRoots(ctx, direct, rs, add) } - - keep := []module.Version{Target} - var direct []string - for _, m := range buildList[1:] { - if used[m] { - keep = append(keep, m) - if loaded.direct[m.Path] { - direct = append(direct, m.Path) - } - } else if cfg.BuildV { - if _, ok := index.require[m]; ok { - fmt.Fprintf(os.Stderr, "unused %s\n", m.Path) - } - } - } - - min, err := mvs.Req(Target, direct, &mvsReqs{buildList: keep}) - if err != nil { - base.Fatalf("go: %v", err) - } - buildList = append([]module.Version{Target}, min...) + return updateLazyRoots(ctx, direct, rs, pkgs, add) } -// checkMultiplePaths verifies that a given module path is used as itself -// or as a replacement for another module, but not both at the same time. +// tidyLazyRoots returns a minimal set of root requirements that maintains the +// "lazy loading" invariants of the go.mod file for the given packages: // -// (See https://golang.org/issue/26607 and https://golang.org/issue/34650.) -func checkMultiplePaths() { - firstPath := make(map[module.Version]string, len(buildList)) - for _, mod := range buildList { - src := mod - if rep := Replacement(mod); rep.Path != "" { - src = rep +// 1. For each package marked with pkgInAll, the module path that provided that +// package is included as a root. +// 2. For all packages, the module that provided that package either remains +// selected at the same version or is upgraded by the dependencies of a +// root. +// +// If any module that provided a package has been upgraded above its previous, +// version, the caller may need to reload and recompute the package graph. +// +// To ensure that the loading process eventually converges, the caller should +// add any needed roots from the tidy root set (without removing existing untidy +// roots) until the set of roots has converged. +func tidyLazyRoots(ctx context.Context, direct map[string]bool, pkgs []*loadPkg) (*Requirements, error) { + var ( + roots []module.Version + pathIncluded = map[string]bool{Target.Path: true} + ) + // We start by adding roots for every package in "all". + // + // Once that is done, we may still need to add more roots to cover upgraded or + // otherwise-missing test dependencies for packages in "all". For those test + // dependencies, we prefer to add roots for packages with shorter import + // stacks first, on the theory that the module requirements for those will + // tend to fill in the requirements for their transitive imports (which have + // deeper import stacks). So we add the missing dependencies for one depth at + // a time, starting with the packages actually in "all" and expanding outwards + // until we have scanned every package that was loaded. + var ( + queue []*loadPkg + queued = map[*loadPkg]bool{} + ) + for _, pkg := range pkgs { + if !pkg.flags.has(pkgInAll) { + continue } - if prev, ok := firstPath[src]; !ok { - firstPath[src] = mod.Path - } else if prev != mod.Path { - base.Errorf("go: %s@%s used for two different module paths (%s and %s)", src.Path, src.Version, prev, mod.Path) + if pkg.fromExternalModule() && !pathIncluded[pkg.mod.Path] { + roots = append(roots, pkg.mod) + pathIncluded[pkg.mod.Path] = true + } + queue = append(queue, pkg) + queued[pkg] = true + } + module.Sort(roots) + tidy := newRequirements(lazy, roots, direct) + + for len(queue) > 0 { + roots = tidy.rootModules + mg, err := tidy.Graph(ctx) + if err != nil { + return nil, err + } + + prevQueue := queue + queue = nil + for _, pkg := range prevQueue { + m := pkg.mod + if m.Path == "" { + continue + } + for _, dep := range pkg.imports { + if !queued[dep] { + queue = append(queue, dep) + queued[dep] = true + } + } + if pkg.test != nil && !queued[pkg.test] { + queue = append(queue, pkg.test) + queued[pkg.test] = true + } + if !pathIncluded[m.Path] { + if s := mg.Selected(m.Path); cmpVersion(s, m.Version) < 0 { + roots = append(roots, m) + } + pathIncluded[m.Path] = true + } + } + + if len(roots) > len(tidy.rootModules) { + module.Sort(roots) + tidy = newRequirements(lazy, roots, tidy.direct) } } - base.ExitIfErrors() + + _, err := tidy.Graph(ctx) + if err != nil { + return nil, err + } + return tidy, nil +} + +// updateLazyRoots returns a set of root requirements that maintains the “lazy +// loading” invariants of the go.mod file: +// +// 1. The selected version of the module providing each package marked with +// either pkgInAll or pkgIsRoot is included as a root. +// Note that certain root patterns (such as '...') may explode the root set +// to contain every module that provides any package imported (or merely +// required) by any other module. +// 2. Each root appears only once, at the selected version of its path +// (if rs.graph is non-nil) or at the highest version otherwise present as a +// root (otherwise). +// 3. Every module path that appears as a root in rs remains a root. +// 4. Every version in add is selected at its given version unless upgraded by +// (the dependencies of) an existing root or another module in add. +// +// The packages in pkgs are assumed to have been loaded from either the roots of +// rs or the modules selected in the graph of rs. +// +// The above invariants together imply the “lazy loading” invariants for the +// go.mod file: +// +// 1. (The import invariant.) Every module that provides a package transitively +// imported by any package or test in the main module is included as a root. +// This follows by induction from (1) and (3) above. Transitively-imported +// packages loaded during this invocation are marked with pkgInAll (1), +// and by hypothesis any transitively-imported packages loaded in previous +// invocations were already roots in rs (3). +// +// 2. (The argument invariant.) Every module that provides a package matching +// an explicit package pattern is included as a root. This follows directly +// from (1): packages matching explicit package patterns are marked with +// pkgIsRoot. +// +// 3. (The completeness invariant.) Every module that contributed any package +// to the build is required by either the main module or one of the modules +// it requires explicitly. This invariant is left up to the caller, who must +// not load packages from outside the module graph but may add roots to the +// graph, but is facilited by (3). If the caller adds roots to the graph in +// order to resolve missing packages, then updateLazyRoots will retain them, +// the selected versions of those roots cannot regress, and they will +// eventually be written back to the main module's go.mod file. +// +// (See https://golang.org/design/36460-lazy-module-loading#invariants for more +// detail.) +func updateLazyRoots(ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version) (*Requirements, error) { + roots := rs.rootModules + rootsUpgraded := false + + spotCheckRoot := map[module.Version]bool{} + + // “The selected version of the module providing each package marked with + // either pkgInAll or pkgIsRoot is included as a root.” + needSort := false + for _, pkg := range pkgs { + if !pkg.fromExternalModule() { + // pkg was not loaded from a module dependency, so we don't need + // to do anything special to maintain that dependency. + continue + } + + switch { + case pkg.flags.has(pkgInAll): + // pkg is transitively imported by a package or test in the main module. + // We need to promote the module that maintains it to a root: if some + // other module depends on the main module, and that other module also + // uses lazy loading, it will expect to find all of our transitive + // dependencies by reading just our go.mod file, not the go.mod files of + // everything we depend on. + // + // (This is the “import invariant” that makes lazy loading possible.) + + case pkg.flags.has(pkgIsRoot): + // pkg is a root of the package-import graph. (Generally this means that + // it matches a command-line argument.) We want future invocations of the + // 'go' command — such as 'go test' on the same package — to continue to + // use the same versions of its dependencies that we are using right now. + // So we need to bring this package's dependencies inside the lazy-loading + // horizon. + // + // Making the module containing this package a root of the module graph + // does exactly that: if the module containing the package is lazy it + // should satisfy the import invariant itself, so all of its dependencies + // should be in its go.mod file, and if the module containing the package + // is eager then if we make it a root we will load all of its transitive + // dependencies into the module graph. + // + // (This is the “argument invariant” of lazy loading, and is important for + // reproducibility.) + + default: + // pkg is a dependency of some other package outside of the main module. + // As far as we know it's not relevant to the main module (and thus not + // relevant to consumers of the main module either), and its dependencies + // should already be in the module graph — included in the dependencies of + // the package that imported it. + continue + } + + if _, ok := rs.rootSelected(pkg.mod.Path); ok { + // It is possible that the main module's go.mod file is incomplete or + // otherwise erroneous — for example, perhaps the author forgot to 'git + // add' their updated go.mod file after adding a new package import, or + // perhaps they made an edit to the go.mod file using a third-party tool + // ('git merge'?) that doesn't maintain consistency for module + // dependencies. If that happens, ideally we want to detect the missing + // requirements and fix them up here. + // + // However, we also need to be careful not to be too aggressive. For + // transitive dependencies of external tests, the go.mod file for the + // module containing the test itself is expected to provide all of the + // relevant dependencies, and we explicitly don't want to pull in + // requirements on *irrelevant* requirements that happen to occur in the + // go.mod files for these transitive-test-only dependencies. (See the test + // in mod_lazy_test_horizon.txt for a concrete example. + // + // The “goldilocks zone” seems to be to spot-check exactly the same + // modules that we promote to explicit roots: namely, those that provide + // packages transitively imported by the main module, and those that + // provide roots of the package-import graph. That will catch erroneous + // edits to the main module's go.mod file and inconsistent requirements in + // dependencies that provide imported packages, but will ignore erroneous + // or misleading requirements in dependencies that aren't obviously + // relevant to the packages in the main module. + spotCheckRoot[pkg.mod] = true + } else { + roots = append(roots, pkg.mod) + rootsUpgraded = true + // The roots slice was initially sorted because rs.rootModules was sorted, + // but the root we just added could be out of order. + needSort = true + } + } + + for _, m := range add { + if v, ok := rs.rootSelected(m.Path); !ok || cmpVersion(v, m.Version) < 0 { + roots = append(roots, m) + rootsUpgraded = true + needSort = true + } + } + if needSort { + module.Sort(roots) + } + + // "Each root appears only once, at the selected version of its path ….” + for { + var mg *ModuleGraph + if rootsUpgraded { + // We've added or upgraded one or more roots, so load the full module + // graph so that we can update those roots to be consistent with other + // requirements. + if cfg.BuildMod != "mod" { + // Our changes to the roots may have moved dependencies into or out of + // the lazy-loading horizon, which could in turn change the selected + // versions of other modules. (Unlike for eager modules, for lazy + // modules adding or removing an explicit root is a semantic change, not + // just a cosmetic one.) + return rs, errGoModDirty + } + + rs = newRequirements(lazy, roots, direct) + var err error + mg, err = rs.Graph(ctx) + if err != nil { + return rs, err + } + } else { + // Since none of the roots have been upgraded, we have no reason to + // suspect that they are inconsistent with the requirements of any other + // roots. Only look at the full module graph if we've already loaded it; + // otherwise, just spot-check the explicit requirements of the roots from + // which we loaded packages. + if rs.graph.Load() != nil { + // We've already loaded the full module graph, which includes the + // requirements of all of the root modules — even the transitive + // requirements, if they are eager! + mg, _ = rs.Graph(ctx) + } else if cfg.BuildMod == "vendor" { + // We can't spot-check the requirements of other modules because we + // don't in general have their go.mod files available in the vendor + // directory. (Fortunately this case is impossible, because mg.graph is + // always non-nil in vendor mode!) + panic("internal error: rs.graph is unexpectedly nil with -mod=vendor") + } else if !spotCheckRoots(ctx, rs, spotCheckRoot) { + // We spot-checked the explicit requirements of the roots that are + // relevant to the packages we've loaded. Unfortunately, they're + // inconsistent in some way; we need to load the full module graph + // so that we can fix the roots properly. + var err error + mg, err = rs.Graph(ctx) + if err != nil { + return rs, err + } + } + } + + roots = make([]module.Version, 0, len(rs.rootModules)) + rootsUpgraded = false + inRootPaths := make(map[string]bool, len(rs.rootModules)) + for _, m := range rs.rootModules { + if inRootPaths[m.Path] { + // This root specifies a redundant path. We already retained the + // selected version of this path when we saw it before, so omit the + // redundant copy regardless of its version. + // + // When we read the full module graph, we include the dependencies of + // every root even if that root is redundant. That better preserves + // reproducibility if, say, some automated tool adds a redundant + // 'require' line and then runs 'go mod tidy' to try to make everything + // consistent, since the requirements of the older version are carried + // over. + // + // So omitting a root that was previously present may *reduce* the + // selected versions of non-roots, but merely removing a requirement + // cannot *increase* the selected versions of other roots as a result — + // we don't need to mark this change as an upgrade. (This particular + // change cannot invalidate any other roots.) + continue + } + + var v string + if mg == nil { + v, _ = rs.rootSelected(m.Path) + } else { + v = mg.Selected(m.Path) + } + roots = append(roots, module.Version{Path: m.Path, Version: v}) + inRootPaths[m.Path] = true + if v != m.Version { + rootsUpgraded = true + } + } + // Note that rs.rootModules was already sorted by module path and version, + // and we appended to the roots slice in the same order and guaranteed that + // each path has only one version, so roots is also sorted by module path + // and (trivially) version. + + if !rootsUpgraded { + // The root set has converged: every root going into this iteration was + // already at its selected version, although we have have removed other + // (redundant) roots for the same path. + break + } + } + + if rs.depth == lazy && reflect.DeepEqual(roots, rs.rootModules) && reflect.DeepEqual(direct, rs.direct) { + // The root set is unchanged and rs was already lazy, so keep rs to + // preserve its cached ModuleGraph (if any). + return rs, nil + } + return newRequirements(lazy, roots, direct), nil +} + +// spotCheckRoots reports whether the versions of the roots in rs satisfy the +// explicit requirements of the modules in mods. +func spotCheckRoots(ctx context.Context, rs *Requirements, mods map[module.Version]bool) bool { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + work := par.NewQueue(runtime.GOMAXPROCS(0)) + for m := range mods { + m := m + work.Add(func() { + if ctx.Err() != nil { + return + } + + summary, err := goModSummary(m) + if err != nil { + cancel() + return + } + + for _, r := range summary.require { + if v, ok := rs.rootSelected(r.Path); ok && cmpVersion(v, r.Version) < 0 { + cancel() + return + } + } + }) + } + <-work.Idle() + + if ctx.Err() != nil { + // Either we failed a spot-check, or the caller no longer cares about our + // answer anyway. + return false + } + + return true +} + +// tidyEagerRoots returns a minimal set of root requirements that maintains the +// selected version of every module that provided a package in pkgs, and +// includes the selected version of every such module in direct as a root. +func tidyEagerRoots(ctx context.Context, direct map[string]bool, pkgs []*loadPkg) (*Requirements, error) { + var ( + keep []module.Version + keptPath = map[string]bool{} + ) + var ( + rootPaths []string // module paths that should be included as roots + inRootPaths = map[string]bool{} + ) + for _, pkg := range pkgs { + if !pkg.fromExternalModule() { + continue + } + if m := pkg.mod; !keptPath[m.Path] { + keep = append(keep, m) + keptPath[m.Path] = true + if direct[m.Path] && !inRootPaths[m.Path] { + rootPaths = append(rootPaths, m.Path) + inRootPaths[m.Path] = true + } + } + } + + min, err := mvs.Req(Target, rootPaths, &mvsReqs{roots: keep}) + if err != nil { + return nil, err + } + return newRequirements(eager, min, direct), nil +} + +// updateEagerRoots returns a set of root requirements that includes the selected +// version of every module path in direct as a root, and maintains the selected +// version of every module selected in the graph of rs. +// +// The roots are updated such that: +// +// 1. The selected version of every module path in direct is included as a root +// (if it is not "none"). +// 2. Each root is the selected version of its path. (We say that such a root +// set is “consistent”.) +// 3. Every version selected in the graph of rs remains selected unless upgraded +// by a dependency in add. +// 4. Every version in add is selected at its given version unless upgraded by +// (the dependencies of) an existing root or another module in add. +func updateEagerRoots(ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) { + mg, err := rs.Graph(ctx) + if err != nil { + // We can't ignore errors in the module graph even if the user passed the -e + // flag to try to push past them. If we can't load the complete module + // dependencies, then we can't reliably compute a minimal subset of them. + return rs, err + } + + if cfg.BuildMod != "mod" { + // Instead of actually updating the requirements, just check that no updates + // are needed. + if rs == nil { + // We're being asked to reconstruct the requirements from scratch, + // but we aren't even allowed to modify them. + return rs, errGoModDirty + } + for _, m := range rs.rootModules { + if m.Version != mg.Selected(m.Path) { + // The root version v is misleading: the actual selected version is higher. + return rs, errGoModDirty + } + } + for _, m := range add { + if m.Version != mg.Selected(m.Path) { + return rs, errGoModDirty + } + } + for mPath := range direct { + if _, ok := rs.rootSelected(mPath); !ok { + // Module m is supposed to be listed explicitly, but isn't. + // + // Note that this condition is also detected (and logged with more + // detail) earlier during package loading, so it shouldn't actually be + // possible at this point — this is just a defense in depth. + return rs, errGoModDirty + } + } + + // No explicit roots are missing and all roots are already at the versions + // we want to keep. Any other changes we would make are purely cosmetic, + // such as pruning redundant indirect dependencies. Per issue #34822, we + // ignore cosmetic changes when we cannot update the go.mod file. + return rs, nil + } + + var ( + rootPaths []string // module paths that should be included as roots + inRootPaths = map[string]bool{} + ) + for _, root := range rs.rootModules { + // If the selected version of the root is the same as what was already + // listed in the go.mod file, retain it as a root (even if redundant) to + // avoid unnecessary churn. (See https://golang.org/issue/34822.) + // + // We do this even for indirect requirements, since we don't know why they + // were added and they could become direct at any time. + if !inRootPaths[root.Path] && mg.Selected(root.Path) == root.Version { + rootPaths = append(rootPaths, root.Path) + inRootPaths[root.Path] = true + } + } + + // “The selected version of every module path in direct is included as a root.” + // + // This is only for convenience and clarity for end users: in an eager module, + // the choice of explicit vs. implicit dependency has no impact on MVS + // selection (for itself or any other module). + keep := append(mg.BuildList()[1:], add...) + for _, m := range keep { + if direct[m.Path] && !inRootPaths[m.Path] { + rootPaths = append(rootPaths, m.Path) + inRootPaths[m.Path] = true + } + } + + min, err := mvs.Req(Target, rootPaths, &mvsReqs{roots: keep}) + if err != nil { + return rs, err + } + if rs.depth == eager && reflect.DeepEqual(min, rs.rootModules) && reflect.DeepEqual(direct, rs.direct) { + // The root set is unchanged and rs was already eager, so keep rs to + // preserve its cached ModuleGraph (if any). + return rs, nil + } + return newRequirements(eager, min, direct), nil +} + +// convertDepth returns a version of rs with the given depth. +// If rs already has the given depth, convertDepth returns rs unmodified. +func convertDepth(ctx context.Context, rs *Requirements, depth modDepth) (*Requirements, error) { + if rs.depth == depth { + return rs, nil + } + + if depth == eager { + // We are converting a lazy module to an eager one. The roots of an eager + // module graph are a superset of the roots of a lazy graph, so we don't + // need to add any new roots — we just need to prune away the ones that are + // redundant given eager loading, which is exactly what updateEagerRoots + // does. + return updateEagerRoots(ctx, rs.direct, rs, nil) + } + + // We are converting an eager module to a lazy one. The module graph of an + // eager module includes the transitive dependencies of every module in the + // build list. + // + // Hey, we can express that as a lazy root set! “Include the transitive + // dependencies of every module in the build list” is exactly what happens in + // a lazy module if we promote every module in the build list to a root! + mg, err := rs.Graph(ctx) + if err != nil { + return rs, err + } + return newRequirements(lazy, mg.BuildList()[1:], rs.direct), nil } diff --git a/src/cmd/go/internal/modload/edit.go b/src/cmd/go/internal/modload/edit.go new file mode 100644 index 00000000000..c350b9d1b5c --- /dev/null +++ b/src/cmd/go/internal/modload/edit.go @@ -0,0 +1,569 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modload + +import ( + "cmd/go/internal/mvs" + "context" + "reflect" + "sort" + + "golang.org/x/mod/module" + "golang.org/x/mod/semver" +) + +// editRequirements returns an edited version of rs such that: +// +// 1. Each module version in mustSelect is selected. +// +// 2. Each module version in tryUpgrade is upgraded toward the indicated +// version as far as can be done without violating (1). +// +// 3. Each module version in rs.rootModules (or rs.graph, if rs.depth is eager) +// is downgraded from its original version only to the extent needed to +// satisfy (1), or upgraded only to the extent needed to satisfy (1) and +// (2). +// +// 4. No module is upgraded above the maximum version of its path found in the +// dependency graph of rs, the combined dependency graph of the versions in +// mustSelect, or the dependencies of each individual module version in +// tryUpgrade. +// +// Generally, the module versions in mustSelect are due to the module or a +// package within the module matching an explicit command line argument to 'go +// get', and the versions in tryUpgrade are transitive dependencies that are +// either being upgraded by 'go get -u' or being added to satisfy some +// otherwise-missing package import. +func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSelect []module.Version) (edited *Requirements, changed bool, err error) { + limiter, err := limiterForEdit(ctx, rs, tryUpgrade, mustSelect) + if err != nil { + return rs, false, err + } + + var conflicts []Conflict + for _, m := range mustSelect { + conflict, err := limiter.Select(m) + if err != nil { + return rs, false, err + } + if conflict.Path != "" { + conflicts = append(conflicts, Conflict{ + Source: m, + Dep: conflict, + Constraint: module.Version{ + Path: conflict.Path, + Version: limiter.max[conflict.Path], + }, + }) + } + } + if len(conflicts) > 0 { + return rs, false, &ConstraintError{Conflicts: conflicts} + } + + mods, changed, err := selectPotentiallyImportedModules(ctx, limiter, rs, tryUpgrade) + if err != nil { + return rs, false, err + } + + var roots []module.Version + if rs.depth == eager { + // In an eager module, modules that provide packages imported by the main + // module may either be explicit roots or implicit transitive dependencies. + // We promote the modules in mustSelect to be explicit requirements. + var rootPaths []string + for _, m := range mustSelect { + if m.Version != "none" && m.Path != Target.Path { + rootPaths = append(rootPaths, m.Path) + } + } + if !changed && len(rootPaths) == 0 { + // The build list hasn't changed and we have no new roots to add. + // We don't need to recompute the minimal roots for the module. + return rs, false, nil + } + + for _, m := range mods { + if v, ok := rs.rootSelected(m.Path); ok && (v == m.Version || rs.direct[m.Path]) { + // m.Path was formerly a root, and either its version hasn't changed or + // we believe that it provides a package directly imported by a package + // or test in the main module. For now we'll assume that it is still + // relevant enough to remain a root. If we actually load all of the + // packages and tests in the main module (which we are not doing here), + // we can revise the explicit roots at that point. + rootPaths = append(rootPaths, m.Path) + } + } + + roots, err = mvs.Req(Target, rootPaths, &mvsReqs{roots: mods}) + if err != nil { + return nil, false, err + } + } else { + // In a lazy module, every module that provides a package imported by the + // main module must be retained as a root. + roots = mods + if !changed { + // Because the roots we just computed are unchanged, the entire graph must + // be the same as it was before. Save the original rs, since we have + // probably already loaded its requirement graph. + return rs, false, nil + } + } + + // A module that is not even in the build list necessarily cannot provide + // any imported packages. Mark as direct only the direct modules that are + // still in the build list. + // + // TODO(bcmills): Would it make more sense to leave the direct map as-is + // but allow it to refer to modules that are no longer in the build list? + // That might complicate updateRoots, but it may be cleaner in other ways. + direct := make(map[string]bool, len(rs.direct)) + for _, m := range roots { + if rs.direct[m.Path] { + direct[m.Path] = true + } + } + return newRequirements(rs.depth, roots, direct), changed, nil +} + +// limiterForEdit returns a versionLimiter with its max versions set such that +// the max version for every module path in mustSelect is the version listed +// there, and the max version for every other module path is the maximum version +// of its path found in the dependency graph of rs, the combined dependency +// graph of the versions in mustSelect, or the dependencies of each individual +// module version in tryUpgrade. +func limiterForEdit(ctx context.Context, rs *Requirements, tryUpgrade, mustSelect []module.Version) (*versionLimiter, error) { + mg, err := rs.Graph(ctx) + if err != nil { + return nil, err + } + + maxVersion := map[string]string{} // module path → version + restrictTo := func(m module.Version) { + v, ok := maxVersion[m.Path] + if !ok || cmpVersion(v, m.Version) > 0 { + maxVersion[m.Path] = m.Version + } + } + + if rs.depth == eager { + // Eager go.mod files don't indicate which transitive dependencies are + // actually relevant to the main module, so we have to assume that any module + // that could have provided any package — that is, any module whose selected + // version was not "none" — may be relevant. + for _, m := range mg.BuildList() { + restrictTo(m) + } + } else { + // The go.mod file explicitly records every module that provides a package + // imported by the main module. + // + // If we need to downgrade an existing root or a new root found in + // tryUpgrade, we don't want to allow that downgrade to incidentally upgrade + // a module imported by the main module to some arbitrary version. + // However, we don't particularly care about arbitrary upgrades to modules + // that are (at best) only providing packages imported by tests of + // dependencies outside the main module. + for _, m := range rs.rootModules { + restrictTo(module.Version{ + Path: m.Path, + Version: mg.Selected(m.Path), + }) + } + } + + if err := raiseLimitsForUpgrades(ctx, maxVersion, rs.depth, tryUpgrade, mustSelect); err != nil { + return nil, err + } + + // The versions in mustSelect override whatever we would naively select — + // we will downgrade other modules as needed in order to meet them. + for _, m := range mustSelect { + restrictTo(m) + } + + return newVersionLimiter(rs.depth, maxVersion), nil +} + +// raiseLimitsForUpgrades increases the module versions in maxVersions to the +// versions that would be needed to allow each of the modules in tryUpgrade +// (individually) and all of the modules in mustSelect (simultaneously) to be +// added as roots. +// +// Versions not present in maxVersion are unrestricted, and it is assumed that +// they will not be promoted to root requirements (and thus will not contribute +// their own dependencies if the main module is lazy). +// +// These limits provide an upper bound on how far a module may be upgraded as +// part of an incidental downgrade, if downgrades are needed in order to select +// the versions in mustSelect. +func raiseLimitsForUpgrades(ctx context.Context, maxVersion map[string]string, depth modDepth, tryUpgrade []module.Version, mustSelect []module.Version) error { + // allow raises the limit for m.Path to at least m.Version. + // If m.Path was already unrestricted, it remains unrestricted. + allow := func(m module.Version) { + v, ok := maxVersion[m.Path] + if !ok { + return // m.Path is unrestricted. + } + if cmpVersion(v, m.Version) < 0 { + maxVersion[m.Path] = m.Version + } + } + + var eagerUpgrades []module.Version + if depth == eager { + eagerUpgrades = tryUpgrade + } else { + for _, m := range tryUpgrade { + if m.Path == Target.Path { + // Target is already considered to be higher than any possible m, so we + // won't be upgrading to it anyway and there is no point scanning its + // dependencies. + continue + } + + summary, err := goModSummary(m) + if err != nil { + return err + } + if summary.depth == eager { + // For efficiency, we'll load all of the eager upgrades as one big + // graph, rather than loading the (potentially-overlapping) subgraph for + // each upgrade individually. + eagerUpgrades = append(eagerUpgrades, m) + continue + } + + for _, r := range summary.require { + allow(r) + } + } + } + + if len(eagerUpgrades) > 0 { + // Compute the max versions for eager upgrades all together. + // Since these modules are eager, we'll end up scanning all of their + // transitive dependencies no matter which versions end up selected, + // and since we have a large dependency graph to scan we might get + // a significant benefit from not revisiting dependencies that are at + // common versions among multiple upgrades. + upgradeGraph, err := readModGraph(ctx, eager, eagerUpgrades) + if err != nil { + if go117LazyTODO { + // Compute the requirement path from a module path in tryUpgrade to the + // error, and the requirement path (if any) from rs.rootModules to the + // tryUpgrade module path. Return a *mvs.BuildListError showing the + // concatenation of the paths (with an upgrade in the middle). + } + return err + } + + for _, r := range upgradeGraph.BuildList() { + // Upgrading to m would upgrade to r, and the caller requested that we + // try to upgrade to m, so it's ok to upgrade to r. + allow(r) + } + } + + if len(mustSelect) > 0 { + mustGraph, err := readModGraph(ctx, depth, mustSelect) + if err != nil { + return err + } + + for _, r := range mustGraph.BuildList() { + // Some module in mustSelect requires r, so we must allow at least r.Version + // unless it conflicts with an entry in mustSelect. + allow(r) + } + } + + return nil +} + +// selectPotentiallyImportedModules increases the limiter-selected version of +// every module in rs that potentially provides a package imported (directly or +// indirectly) by the main module, and every module in tryUpgrade, toward the +// highest version seen in rs or tryUpgrade, but not above the maximums enforced +// by the limiter. +// +// It returns the list of module versions selected by the limiter, sorted by +// path, along with a boolean indicating whether that list is different from the +// list of modules read from rs. +func selectPotentiallyImportedModules(ctx context.Context, limiter *versionLimiter, rs *Requirements, tryUpgrade []module.Version) (mods []module.Version, changed bool, err error) { + for _, m := range tryUpgrade { + if err := limiter.UpgradeToward(ctx, m); err != nil { + return nil, false, err + } + } + + var initial []module.Version + if rs.depth == eager { + mg, err := rs.Graph(ctx) + if err != nil { + return nil, false, err + } + initial = mg.BuildList()[1:] + } else { + initial = rs.rootModules + } + for _, m := range initial { + if err := limiter.UpgradeToward(ctx, m); err != nil { + return nil, false, err + } + } + + mods = make([]module.Version, 0, len(limiter.selected)) + for path, v := range limiter.selected { + if v != "none" && path != Target.Path { + mods = append(mods, module.Version{Path: path, Version: v}) + } + } + + // We've identified acceptable versions for each of the modules, but those + // versions are not necessarily consistent with each other: one upgraded or + // downgraded module may require a higher (but still allowed) version of + // another. The lower version may require extraneous dependencies that aren't + // actually relevant, so we need to compute the actual selected versions. + mg, err := readModGraph(ctx, rs.depth, mods) + if err != nil { + return nil, false, err + } + mods = make([]module.Version, 0, len(limiter.selected)) + for path, _ := range limiter.selected { + if path != Target.Path { + if v := mg.Selected(path); v != "none" { + mods = append(mods, module.Version{Path: path, Version: v}) + } + } + } + module.Sort(mods) + + changed = !reflect.DeepEqual(mods, initial) + + return mods, changed, err +} + +// A versionLimiter tracks the versions that may be selected for each module +// subject to constraints on the maximum versions of transitive dependencies. +type versionLimiter struct { + // depth is the depth at which the dependencies of the modules passed to + // Select and UpgradeToward are loaded. + depth modDepth + + // max maps each module path to the maximum version that may be selected for + // that path. + // + // Paths with no entry are unrestricted, and we assume that they will not be + // promoted to root dependencies (so will not contribute dependencies if the + // main module is lazy). + max map[string]string + + // selected maps each module path to a version of that path (if known) whose + // transitive dependencies do not violate any max version. The version kept + // is the highest one found during any call to UpgradeToward for the given + // module path. + // + // If a higher acceptable version is found during a call to UpgradeToward for + // some *other* module path, that does not update the selected version. + // Ignoring those versions keeps the downgrades computed for two modules + // together close to the individual downgrades that would be computed for each + // module in isolation. (The only way one module can affect another is if the + // final downgraded version of the one module explicitly requires a higher + // version of the other.) + // + // Version "none" of every module is always known not to violate any max + // version, so paths at version "none" are omitted. + selected map[string]string + + // dqReason records whether and why each each encountered version is + // disqualified. + dqReason map[module.Version]dqState + + // requiring maps each not-yet-disqualified module version to the versions + // that directly require it. If that version becomes disqualified, the + // disqualification will be propagated to all of the versions in the list. + requiring map[module.Version][]module.Version +} + +// A dqState indicates whether and why a module version is “disqualified” from +// being used in a way that would incorporate its requirements. +// +// The zero dqState indicates that the module version is not known to be +// disqualified, either because it is ok or because we are currently traversing +// a cycle that includes it. +type dqState struct { + err error // if non-nil, disqualified because the requirements of the module could not be read + conflict module.Version // disqualified because the module (transitively) requires dep, which exceeds the maximum version constraint for its path +} + +func (dq dqState) isDisqualified() bool { + return dq != dqState{} +} + +// newVersionLimiter returns a versionLimiter that restricts the module paths +// that appear as keys in max. +// +// max maps each module path to its maximum version; paths that are not present +// in the map are unrestricted. The limiter assumes that unrestricted paths will +// not be promoted to root dependencies. +// +// If depth is lazy, then if a module passed to UpgradeToward or Select is +// itself lazy, its unrestricted dependencies are skipped when scanning +// requirements. +func newVersionLimiter(depth modDepth, max map[string]string) *versionLimiter { + return &versionLimiter{ + depth: depth, + max: max, + selected: map[string]string{Target.Path: Target.Version}, + dqReason: map[module.Version]dqState{}, + requiring: map[module.Version][]module.Version{}, + } +} + +// UpgradeToward attempts to upgrade the selected version of m.Path as close as +// possible to m.Version without violating l's maximum version limits. +// +// If depth is lazy and m itself is lazy, the the dependencies of unrestricted +// dependencies of m will not be followed. +func (l *versionLimiter) UpgradeToward(ctx context.Context, m module.Version) error { + selected, ok := l.selected[m.Path] + if ok { + if cmpVersion(selected, m.Version) >= 0 { + // The selected version is already at least m, so no upgrade is needed. + return nil + } + } else { + selected = "none" + } + + if l.check(m, l.depth).isDisqualified() { + candidates, err := versions(ctx, m.Path, CheckAllowed) + if err != nil { + // This is likely a transient error reaching the repository, + // rather than a permanent error with the retrieved version. + // + // TODO(golang.org/issue/31730, golang.org/issue/30134): + // decode what to do based on the actual error. + return err + } + + // Skip to candidates < m.Version. + i := sort.Search(len(candidates), func(i int) bool { + return semver.Compare(candidates[i], m.Version) >= 0 + }) + candidates = candidates[:i] + + for l.check(m, l.depth).isDisqualified() { + n := len(candidates) + if n == 0 || cmpVersion(selected, candidates[n-1]) >= 0 { + // We couldn't find a suitable candidate above the already-selected version. + // Retain that version unmodified. + return nil + } + m.Version, candidates = candidates[n-1], candidates[:n-1] + } + } + + l.selected[m.Path] = m.Version + return nil +} + +// Select attempts to set the selected version of m.Path to exactly m.Version. +func (l *versionLimiter) Select(m module.Version) (conflict module.Version, err error) { + dq := l.check(m, l.depth) + if !dq.isDisqualified() { + l.selected[m.Path] = m.Version + } + return dq.conflict, dq.err +} + +// check determines whether m (or its transitive dependencies) would violate l's +// maximum version limits if added to the module requirement graph. +// +// If depth is lazy and m itself is lazy, then the dependencies of unrestricted +// dependencies of m will not be followed. If the lazy loading invariants hold +// for the main module up to this point, the packages in those modules are at +// best only imported by tests of dependencies that are themselves loaded from +// outside modules. Although we would like to keep 'go test all' as reproducible +// as is feasible, we don't want to retain test dependencies that are only +// marginally relevant at best. +func (l *versionLimiter) check(m module.Version, depth modDepth) dqState { + if m.Version == "none" || m == Target { + // version "none" has no requirements, and the dependencies of Target are + // tautological. + return dqState{} + } + + if dq, seen := l.dqReason[m]; seen { + return dq + } + l.dqReason[m] = dqState{} + + if max, ok := l.max[m.Path]; ok && cmpVersion(m.Version, max) > 0 { + return l.disqualify(m, dqState{conflict: m}) + } + + summary, err := goModSummary(m) + if err != nil { + // If we can't load the requirements, we couldn't load the go.mod file. + // There are a number of reasons this can happen, but this usually + // means an older version of the module had a missing or invalid + // go.mod file. For example, if example.com/mod released v2.0.0 before + // migrating to modules (v2.0.0+incompatible), then added a valid go.mod + // in v2.0.1, downgrading from v2.0.1 would cause this error. + // + // TODO(golang.org/issue/31730, golang.org/issue/30134): if the error + // is transient (we couldn't download go.mod), return the error from + // Downgrade. Currently, we can't tell what kind of error it is. + return l.disqualify(m, dqState{err: err}) + } + + if summary.depth == eager { + depth = eager + } + for _, r := range summary.require { + if depth == lazy { + if _, restricted := l.max[r.Path]; !restricted { + // r.Path is unrestricted, so we don't care at what version it is + // selected. We assume that r.Path will not become a root dependency, so + // since m is lazy, r's dependencies won't be followed. + continue + } + } + + if dq := l.check(r, depth); dq.isDisqualified() { + return l.disqualify(m, dq) + } + + // r and its dependencies are (perhaps provisionally) ok. + // + // However, if there are cycles in the requirement graph, we may have only + // checked a portion of the requirement graph so far, and r (and thus m) may + // yet be disqualified by some path we have not yet visited. Remember this edge + // so that we can disqualify m and its dependents if that occurs. + l.requiring[r] = append(l.requiring[r], m) + } + + return dqState{} +} + +// disqualify records that m (or one of its transitive dependencies) +// violates l's maximum version limits. +func (l *versionLimiter) disqualify(m module.Version, dq dqState) dqState { + if dq := l.dqReason[m]; dq.isDisqualified() { + return dq + } + l.dqReason[m] = dq + + for _, p := range l.requiring[m] { + l.disqualify(p, dqState{conflict: m}) + } + // Now that we have disqualified the modules that depend on m, we can forget + // about them — we won't need to disqualify them again. + delete(l.requiring, m) + return dq +} diff --git a/src/cmd/go/internal/modload/help.go b/src/cmd/go/internal/modload/help.go index fd39ddd94ec..886ad62bd90 100644 --- a/src/cmd/go/internal/modload/help.go +++ b/src/cmd/go/internal/modload/help.go @@ -46,7 +46,7 @@ marking the root of the main (current) module. The go.mod file format is described in detail at https://golang.org/ref/mod#go-mod-file. -To create a new go.mod file, use 'go help init'. For details see +To create a new go.mod file, use 'go mod init'. For details see 'go help mod init' or https://golang.org/ref/mod#go-mod-init. To add missing module requirements or remove unneeded requirements, diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go index 182429aee41..f76befcfe35 100644 --- a/src/cmd/go/internal/modload/import.go +++ b/src/cmd/go/internal/modload/import.go @@ -12,6 +12,7 @@ import ( "internal/goroot" "io/fs" "os" + pathpkg "path" "path/filepath" "sort" "strings" @@ -51,7 +52,7 @@ func (e *ImportMissingError) Error() string { if e.isStd { return fmt.Sprintf("package %s is not in GOROOT (%s)", e.Path, filepath.Join(cfg.GOROOT, "src", e.Path)) } - if e.QueryErr != nil { + if e.QueryErr != nil && e.QueryErr != ErrNoModRoot { return fmt.Sprintf("cannot find module providing package %s: %v", e.Path, e.QueryErr) } if cfg.BuildMod == "mod" || (cfg.BuildMod == "readonly" && allowMissingModuleImports) { @@ -60,19 +61,17 @@ func (e *ImportMissingError) Error() string { if e.replaced.Path != "" { suggestArg := e.replaced.Path - if !modfetch.IsZeroPseudoVersion(e.replaced.Version) { + if !module.IsZeroPseudoVersion(e.replaced.Version) { suggestArg = e.replaced.String() } return fmt.Sprintf("module %s provides package %s and is replaced but not required; to add it:\n\tgo get %s", e.replaced.Path, e.Path, suggestArg) } - suggestion := "" - if !HasModRoot() { - suggestion = ": working directory is not part of a module" - } else { - suggestion = fmt.Sprintf("; to add it:\n\tgo get %s", e.Path) + message := fmt.Sprintf("no required module provides package %s", e.Path) + if e.QueryErr != nil { + return fmt.Sprintf("%s: %v", message, e.QueryErr) } - return fmt.Sprintf("no required module provides package %s%s", e.Path, suggestion) + return fmt.Sprintf("%s; to add it:\n\tgo get %s", message, e.Path) } if e.newMissingVersion != "" { @@ -129,6 +128,23 @@ func (e *AmbiguousImportError) Error() string { return buf.String() } +// A DirectImportFromImplicitDependencyError indicates a package directly +// imported by a package or test in the main module that is satisfied by a +// dependency that is not explicit in the main module's go.mod file. +type DirectImportFromImplicitDependencyError struct { + ImporterPath string + ImportedPath string + Module module.Version +} + +func (e *DirectImportFromImplicitDependencyError) Error() string { + return fmt.Sprintf("package %s imports %s from implicitly required module; to add missing requirements, run:\n\tgo get %s@%s", e.ImporterPath, e.ImportedPath, e.Module.Path, e.Module.Version) +} + +func (e *DirectImportFromImplicitDependencyError) ImportPath() string { + return e.ImporterPath +} + // ImportMissingSumError is reported in readonly mode when we need to check // if a module contains a package, but we don't have a sum for its .zip file. // We might need sums for multiple modules to verify the package is unique. @@ -204,28 +220,31 @@ func (e *invalidImportError) Unwrap() error { return e.err } -// importFromBuildList finds the module and directory in the build list -// containing the package with the given import path. The answer must be unique: -// importFromBuildList returns an error if multiple modules attempt to provide -// the same package. +// importFromModules finds the module and directory in the dependency graph of +// rs containing the package with the given import path. If mg is nil, +// importFromModules attempts to locate the module using only the main module +// and the roots of rs before it loads the full graph. // -// importFromBuildList can return a module with an empty m.Path, for packages in +// The answer must be unique: importFromModules returns an error if multiple +// modules are observed to provide the same package. +// +// importFromModules can return a module with an empty m.Path, for packages in // the standard library. // -// importFromBuildList can return an empty directory string, for fake packages +// importFromModules can return an empty directory string, for fake packages // like "C" and "unsafe". // -// If the package cannot be found in buildList, -// importFromBuildList returns an *ImportMissingError. -func importFromBuildList(ctx context.Context, path string, buildList []module.Version) (m module.Version, dir string, err error) { +// If the package is not present in any module selected from the requirement +// graph, importFromModules returns an *ImportMissingError. +func importFromModules(ctx context.Context, path string, rs *Requirements, mg *ModuleGraph) (m module.Version, dir string, err error) { if strings.Contains(path, "@") { return module.Version{}, "", fmt.Errorf("import path should not have @version") } if build.IsLocalImport(path) { return module.Version{}, "", fmt.Errorf("relative import not supported") } - if path == "C" || path == "unsafe" { - // There's no directory for import "C" or import "unsafe". + if path == "C" { + // There's no directory for import "C". return module.Version{}, "", nil } // Before any further lookup, check that the path is valid. @@ -271,54 +290,114 @@ func importFromBuildList(ctx context.Context, path string, buildList []module.Ve // Check each module on the build list. var dirs []string var mods []module.Version - var sumErrMods []module.Version - for _, m := range buildList { - if !maybeInModule(path, m.Path) { - // Avoid possibly downloading irrelevant modules. - continue - } - needSum := true - root, isLocal, err := fetch(ctx, m, needSum) - if err != nil { - if sumErr := (*sumMissingError)(nil); errors.As(err, &sumErr) { - // We are missing a sum needed to fetch a module in the build list. - // We can't verify that the package is unique, and we may not find - // the package at all. Keep checking other modules to decide which - // error to report. Multiple sums may be missing if we need to look in - // multiple nested modules to resolve the import. - sumErrMods = append(sumErrMods, m) + + // Iterate over possible modules for the path, not all selected modules. + // Iterating over selected modules would make the overall loading time + // O(M × P) for M modules providing P imported packages, whereas iterating + // over path prefixes is only O(P × k) with maximum path depth k. For + // large projects both M and P may be very large (note that M ≤ P), but k + // will tend to remain smallish (if for no other reason than filesystem + // path limitations). + // + // We perform this iteration either one or two times. If mg is initially nil, + // then we first attempt to load the package using only the main module and + // its root requirements. If that does not identify the package, or if mg is + // already non-nil, then we attempt to load the package using the full + // requirements in mg. + for { + var sumErrMods []module.Version + for prefix := path; prefix != "."; prefix = pathpkg.Dir(prefix) { + var ( + v string + ok bool + ) + if mg == nil { + v, ok = rs.rootSelected(prefix) + } else { + v, ok = mg.Selected(prefix), true + } + if !ok || v == "none" { continue } - // Report fetch error. - // Note that we don't know for sure this module is necessary, - // but it certainly _could_ provide the package, and even if we - // continue the loop and find the package in some other module, - // we need to look at this module to make sure the import is - // not ambiguous. - return module.Version{}, "", err - } - if dir, ok, err := dirInModule(path, m.Path, root, isLocal); err != nil { - return module.Version{}, "", err - } else if ok { - mods = append(mods, m) - dirs = append(dirs, dir) - } - } - if len(mods) > 1 { - return module.Version{}, "", &AmbiguousImportError{importPath: path, Dirs: dirs, Modules: mods} - } - if len(sumErrMods) > 0 { - return module.Version{}, "", &ImportMissingSumError{ - importPath: path, - mods: sumErrMods, - found: len(mods) > 0, - } - } - if len(mods) == 1 { - return mods[0], dirs[0], nil - } + m := module.Version{Path: prefix, Version: v} - return module.Version{}, "", &ImportMissingError{Path: path, isStd: pathIsStd} + needSum := true + root, isLocal, err := fetch(ctx, m, needSum) + if err != nil { + if sumErr := (*sumMissingError)(nil); errors.As(err, &sumErr) { + // We are missing a sum needed to fetch a module in the build list. + // We can't verify that the package is unique, and we may not find + // the package at all. Keep checking other modules to decide which + // error to report. Multiple sums may be missing if we need to look in + // multiple nested modules to resolve the import; we'll report them all. + sumErrMods = append(sumErrMods, m) + continue + } + // Report fetch error. + // Note that we don't know for sure this module is necessary, + // but it certainly _could_ provide the package, and even if we + // continue the loop and find the package in some other module, + // we need to look at this module to make sure the import is + // not ambiguous. + return module.Version{}, "", err + } + if dir, ok, err := dirInModule(path, m.Path, root, isLocal); err != nil { + return module.Version{}, "", err + } else if ok { + mods = append(mods, m) + dirs = append(dirs, dir) + } + } + + if len(mods) > 1 { + // We produce the list of directories from longest to shortest candidate + // module path, but the AmbiguousImportError should report them from + // shortest to longest. Reverse them now. + for i := 0; i < len(mods)/2; i++ { + j := len(mods) - 1 - i + mods[i], mods[j] = mods[j], mods[i] + dirs[i], dirs[j] = dirs[j], dirs[i] + } + return module.Version{}, "", &AmbiguousImportError{importPath: path, Dirs: dirs, Modules: mods} + } + + if len(sumErrMods) > 0 { + for i := 0; i < len(sumErrMods)/2; i++ { + j := len(sumErrMods) - 1 - i + sumErrMods[i], sumErrMods[j] = sumErrMods[j], sumErrMods[i] + } + return module.Version{}, "", &ImportMissingSumError{ + importPath: path, + mods: sumErrMods, + found: len(mods) > 0, + } + } + + if len(mods) == 1 { + return mods[0], dirs[0], nil + } + + if mg != nil { + // We checked the full module graph and still didn't find the + // requested package. + var queryErr error + if !HasModRoot() { + queryErr = ErrNoModRoot + } + return module.Version{}, "", &ImportMissingError{Path: path, QueryErr: queryErr, isStd: pathIsStd} + } + + // So far we've checked the root dependencies. + // Load the full module graph and try again. + mg, err = rs.Graph(ctx) + if err != nil { + // We might be missing one or more transitive (implicit) dependencies from + // the module graph, so we can't return an ImportMissingError here — one + // of the missing modules might actually contain the package in question, + // in which case we shouldn't go looking for it in some new dependency. + return module.Version{}, "", err + } + } } // queryImport attempts to locate a module that can be added to the current @@ -326,7 +405,7 @@ func importFromBuildList(ctx context.Context, path string, buildList []module.Ve // // Unlike QueryPattern, queryImport prefers to add a replaced version of a // module *before* checking the proxies for a version to add. -func queryImport(ctx context.Context, path string) (module.Version, error) { +func queryImport(ctx context.Context, path string, rs *Requirements) (module.Version, error) { // To avoid spurious remote fetches, try the latest replacement for each // module (golang.org/issue/26241). if index != nil { @@ -342,9 +421,9 @@ func queryImport(ctx context.Context, path string) (module.Version, error) { // used from within some other module, the user will be able to upgrade // the requirement to any real version they choose. if _, pathMajor, ok := module.SplitPathVersion(mp); ok && len(pathMajor) > 0 { - mv = modfetch.ZeroPseudoVersion(pathMajor[1:]) + mv = module.ZeroPseudoVersion(pathMajor[1:]) } else { - mv = modfetch.ZeroPseudoVersion("v0") + mv = module.ZeroPseudoVersion("v0") } } mods = append(mods, module.Version{Path: mp, Version: mv}) @@ -415,7 +494,12 @@ func queryImport(ctx context.Context, path string) (module.Version, error) { // and return m, dir, ImpportMissingError. fmt.Fprintf(os.Stderr, "go: finding module for package %s\n", path) - candidates, err := QueryPackages(ctx, path, "latest", Selected, CheckAllowed) + mg, err := rs.Graph(ctx) + if err != nil { + return module.Version{}, err + } + + candidates, err := QueryPackages(ctx, path, "latest", mg.Selected, CheckAllowed) if err != nil { if errors.Is(err, fs.ErrNotExist) { // Return "cannot find module providing package […]" instead of whatever @@ -428,28 +512,21 @@ func queryImport(ctx context.Context, path string) (module.Version, error) { candidate0MissingVersion := "" for i, c := range candidates { - cm := c.Mod - canAdd := true - for _, bm := range buildList { - if bm.Path == cm.Path && semver.Compare(bm.Version, cm.Version) > 0 { - // QueryPattern proposed that we add module cm to provide the package, - // but we already depend on a newer version of that module (and we don't - // have the package). - // - // This typically happens when a package is present at the "@latest" - // version (e.g., v1.0.0) of a module, but we have a newer version - // of the same module in the build list (e.g., v1.0.1-beta), and - // the package is not present there. - canAdd = false - if i == 0 { - candidate0MissingVersion = bm.Version - } - break + if v := mg.Selected(c.Mod.Path); semver.Compare(v, c.Mod.Version) > 0 { + // QueryPattern proposed that we add module c.Mod to provide the package, + // but we already depend on a newer version of that module (and that + // version doesn't have the package). + // + // This typically happens when a package is present at the "@latest" + // version (e.g., v1.0.0) of a module, but we have a newer version + // of the same module in the build list (e.g., v1.0.1-beta), and + // the package is not present there. + if i == 0 { + candidate0MissingVersion = v } + continue } - if canAdd { - return cm, nil - } + return c.Mod, nil } return module.Version{}, &ImportMissingError{ Path: path, diff --git a/src/cmd/go/internal/modload/import_test.go b/src/cmd/go/internal/modload/import_test.go index 9420dc56460..98145887e9d 100644 --- a/src/cmd/go/internal/modload/import_test.go +++ b/src/cmd/go/internal/modload/import_test.go @@ -69,11 +69,12 @@ func TestQueryImport(t *testing.T) { RootMode = NoRoot ctx := context.Background() + rs := newRequirements(eager, nil, nil) for _, tt := range importTests { t.Run(strings.ReplaceAll(tt.path, "/", "_"), func(t *testing.T) { // Note that there is no build list, so Import should always fail. - m, err := queryImport(ctx, tt.path) + m, err := queryImport(ctx, tt.path, rs) if tt.err == "" { if err != nil { diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index bc8d17e0a5f..5cdea12cd3e 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -15,10 +15,8 @@ import ( "os" "path" "path/filepath" - "sort" "strconv" "strings" - "sync" "cmd/go/internal/base" "cmd/go/internal/cfg" @@ -26,20 +24,37 @@ import ( "cmd/go/internal/lockedfile" "cmd/go/internal/modconv" "cmd/go/internal/modfetch" - "cmd/go/internal/mvs" "cmd/go/internal/search" - "cmd/go/internal/str" "golang.org/x/mod/modfile" "golang.org/x/mod/module" "golang.org/x/mod/semver" ) +// Variables set by other packages. +// +// TODO(#40775): See if these can be plumbed as explicit parameters. +var ( + // RootMode determines whether a module root is needed. + RootMode Root + + // ForceUseModules may be set to force modules to be enabled when + // GO111MODULE=auto or to report an error when GO111MODULE=off. + ForceUseModules bool + + allowMissingModuleImports bool +) + +// Variables set in Init. var ( initialized bool + modRoot string + gopath string +) - modRoot string - Target module.Version +// Variables set in initTarget (during {Load,Create}ModFile). +var ( + Target module.Version // targetPrefix is the path prefix for packages in Target, without a trailing // slash. For most modules, targetPrefix is just Target.Path, but the @@ -49,17 +64,6 @@ var ( // targetInGorootSrc caches whether modRoot is within GOROOT/src. // The "std" module is special within GOROOT/src, but not otherwise. targetInGorootSrc bool - - gopath string - - // RootMode determines whether a module root is needed. - RootMode Root - - // ForceUseModules may be set to force modules to be enabled when - // GO111MODULE=auto or to report an error when GO111MODULE=off. - ForceUseModules bool - - allowMissingModuleImports bool ) type Root int @@ -82,7 +86,7 @@ const ( // ModFile returns the parsed go.mod file. // -// Note that after calling LoadPackages or LoadAllModules, +// Note that after calling LoadPackages or LoadModGraph, // the require statements in the modfile.File are no longer // the source of truth and will be ignored: edits made directly // will be lost at the next call to WriteGoMod. @@ -131,7 +135,7 @@ func Init() { return } - if err := fsys.Init(base.Cwd); err != nil { + if err := fsys.Init(base.Cwd()); err != nil { base.Fatalf("go: %v", err) } @@ -159,7 +163,11 @@ func Init() { // assume they know what they are doing and don't step on it. // But default to turning off ControlMaster. if os.Getenv("GIT_SSH") == "" && os.Getenv("GIT_SSH_COMMAND") == "" { - os.Setenv("GIT_SSH_COMMAND", "ssh -o ControlMaster=no") + os.Setenv("GIT_SSH_COMMAND", "ssh -o ControlMaster=no -o BatchMode=yes") + } + + if os.Getenv("GCM_INTERACTIVE") == "" { + os.Setenv("GCM_INTERACTIVE", "never") } if modRoot != "" { @@ -171,13 +179,13 @@ func Init() { } modRoot = "" } else { - modRoot = findModuleRoot(base.Cwd) + modRoot = findModuleRoot(base.Cwd()) if modRoot == "" { if cfg.ModFile != "" { base.Fatalf("go: cannot find main module, but -modfile was set.\n\t-modfile cannot be used to set the module root directory.") } if RootMode == NeedRoot { - base.Fatalf("go: cannot find main module; see 'go help modules'") + base.Fatalf("go: %v", ErrNoModRoot) } if !mustUseModules { // GO111MODULE is 'auto', and we can't find a module root. @@ -268,7 +276,7 @@ func WillBeEnabled() bool { return false } - if modRoot := findModuleRoot(base.Cwd); modRoot == "" { + if modRoot := findModuleRoot(base.Cwd()); modRoot == "" { // GO111MODULE is 'auto', and we can't find a module root. // Stay in GOPATH mode. return false @@ -327,8 +335,8 @@ func die() { if cfg.Getenv("GO111MODULE") == "off" { base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'") } - if dir, name := findAltConfig(base.Cwd); dir != "" { - rel, err := filepath.Rel(base.Cwd, dir) + if dir, name := findAltConfig(base.Cwd()); dir != "" { + rel, err := filepath.Rel(base.Cwd(), dir) if err != nil { rel = dir } @@ -338,29 +346,69 @@ func die() { } base.Fatalf("go: cannot find main module, but found %s in %s\n\tto create a module there, run:\n\t%sgo mod init", name, dir, cdCmd) } - base.Fatalf("go: cannot find main module; see 'go help modules'") + base.Fatalf("go: %v", ErrNoModRoot) } +var ErrNoModRoot = errors.New("go.mod file not found in current directory or any parent directory; see 'go help modules'") + +type goModDirtyError struct{} + +func (goModDirtyError) Error() string { + if cfg.BuildModExplicit { + return fmt.Sprintf("updates to go.mod needed, disabled by -mod=%v; to update it:\n\tgo mod tidy", cfg.BuildMod) + } + if cfg.BuildModReason != "" { + return fmt.Sprintf("updates to go.mod needed, disabled by -mod=%s\n\t(%s)\n\tto update it:\n\tgo mod tidy", cfg.BuildMod, cfg.BuildModReason) + } + return "updates to go.mod needed; to update it:\n\tgo mod tidy" +} + +var errGoModDirty error = goModDirtyError{} + // LoadModFile sets Target and, if there is a main module, parses the initial // build list from its go.mod file. // // LoadModFile may make changes in memory, like adding a go directive and -// ensuring requirements are consistent. WriteGoMod should be called later to -// write changes out to disk or report errors in readonly mode. +// ensuring requirements are consistent, and will write those changes back to +// disk unless DisallowWriteGoMod is in effect. // // As a side-effect, LoadModFile may change cfg.BuildMod to "vendor" if // -mod wasn't set explicitly and automatic vendoring should be enabled. -func LoadModFile(ctx context.Context) { - if len(buildList) > 0 { - return +// +// If LoadModFile or CreateModFile has already been called, LoadModFile returns +// the existing in-memory requirements (rather than re-reading them from disk). +// +// LoadModFile checks the roots of the module graph for consistency with each +// other, but unlike LoadModGraph does not load the full module graph or check +// it for global consistency. Most callers outside of the modload package should +// use LoadModGraph instead. +func LoadModFile(ctx context.Context) *Requirements { + rs, needCommit := loadModFile(ctx) + if needCommit { + commitRequirements(ctx, modFileGoVersion(), rs) + } + return rs +} + +// loadModFile is like LoadModFile, but does not implicitly commit the +// requirements back to disk after fixing inconsistencies. +// +// If needCommit is true, after the caller makes any other needed changes to the +// returned requirements they should invoke commitRequirements to fix any +// inconsistencies that may be present in the on-disk go.mod file. +func loadModFile(ctx context.Context) (rs *Requirements, needCommit bool) { + if requirements != nil { + return requirements, false } Init() if modRoot == "" { Target = module.Version{Path: "command-line-arguments"} targetPrefix = "command-line-arguments" - buildList = []module.Version{Target} - return + goVersion := latestGoVersion() + rawGoVersion.Store(Target, goVersion) + requirements = newRequirements(modDepthFromGoVersion(goVersion), nil, nil) + return requirements, false } gomod := ModFilePath() @@ -375,24 +423,50 @@ func LoadModFile(ctx context.Context) { // Errors returned by modfile.Parse begin with file:line. base.Fatalf("go: errors parsing go.mod:\n%s\n", err) } - modFile = f - index = indexModFile(data, f, fixed) - if f.Module == nil { // No module declaration. Must add module path. base.Fatalf("go: no module declaration in go.mod. To specify the module path:\n\tgo mod edit -module=example.com/mod") } + modFile = f + initTarget(f.Module.Mod) + index = indexModFile(data, f, fixed) + if err := checkModulePathLax(f.Module.Mod.Path); err != nil { base.Fatalf("go: %v", err) } setDefaultBuildMod() // possibly enable automatic vendoring - modFileToBuildList() + rs = requirementsFromModFile(ctx) + if cfg.BuildMod == "vendor" { readVendorList() checkVendorConsistency() + rs.initVendor(vendorList) } + if index.goVersionV == "" { + // TODO(#45551): Do something more principled instead of checking + // cfg.CmdName directly here. + if cfg.BuildMod == "mod" && cfg.CmdName != "mod graph" && cfg.CmdName != "mod why" { + addGoStmt(latestGoVersion()) + if go117EnableLazyLoading { + // We need to add a 'go' version to the go.mod file, but we must assume + // that its existing contents match something between Go 1.11 and 1.16. + // Go 1.11 through 1.16 have eager requirements, but the latest Go + // version uses lazy requirements instead — so we need to cnvert the + // requirements to be lazy. + rs, err = convertDepth(ctx, rs, lazy) + if err != nil { + base.Fatalf("go: %v", err) + } + } + } else { + rawGoVersion.Store(Target, modFileGoVersion()) + } + } + + requirements = rs + return requirements, true } // CreateModFile initializes a new module by creating a go.mod file. @@ -405,7 +479,7 @@ func LoadModFile(ctx context.Context) { // exactly the same as in the legacy configuration (for example, we can't get // packages at multiple versions from the same module). func CreateModFile(ctx context.Context, modPath string) { - modRoot = base.Cwd + modRoot = base.Cwd() Init() modFilePath := ModFilePath() if _, err := fsys.Stat(modFilePath); err == nil { @@ -425,7 +499,8 @@ func CreateModFile(ctx context.Context, modPath string) { fmt.Fprintf(os.Stderr, "go: creating new go.mod: module %s\n", modPath) modFile = new(modfile.File) modFile.AddModuleStmt(modPath) - addGoStmt() // Add the go directive before converted module requirements. + initTarget(modFile.Module.Mod) + addGoStmt(latestGoVersion()) // Add the go directive before converted module requirements. convertedFrom, err := convertLegacyConfig(modPath) if convertedFrom != "" { @@ -435,8 +510,7 @@ func CreateModFile(ctx context.Context, modPath string) { base.Fatalf("go: %v", err) } - modFileToBuildList() - WriteGoMod() + commitRequirements(ctx, modFileGoVersion(), requirementsFromModFile(ctx)) // Suggest running 'go mod tidy' unless the project is empty. Even if we // imported all the correct requirements above, we're probably missing @@ -481,7 +555,7 @@ func checkModulePathLax(p string) error { // with file systems and subcommands. Disallow file path separators : and \ // because path separators other than / will confuse the module cache. // See fileNameOK in golang.org/x/mod/module/module.go. - shellChars := "`" + `\"'*<>?|` + shellChars := "`" + `"'*<>?|` fsChars := `\:` if i := strings.IndexAny(p, shellChars); i >= 0 { return errorf("contains disallowed shell character %q", p[i]) @@ -539,9 +613,10 @@ func fixVersion(ctx context.Context, fixed *bool) modfile.VersionFixer { } } if vers != "" && module.CanonicalVersion(vers) == vers { - if err := module.CheckPathMajor(vers, pathMajor); err == nil { - return vers, nil + if err := module.CheckPathMajor(vers, pathMajor); err != nil { + return "", module.VersionError(module.Version{Path: path, Version: vers}, err) } + return vers, nil } info, err := Query(ctx, path, vers, "", nil) @@ -556,22 +631,43 @@ func fixVersion(ctx context.Context, fixed *bool) modfile.VersionFixer { // when there is no module root. Normally, this is forbidden because it's slow // and there's no way to make the result reproducible, but some commands // like 'go get' are expected to do this. +// +// This function affects the default cfg.BuildMod when outside of a module, +// so it can only be called prior to Init. func AllowMissingModuleImports() { + if initialized { + panic("AllowMissingModuleImports after Init") + } allowMissingModuleImports = true } -// modFileToBuildList initializes buildList from the modFile. -func modFileToBuildList() { - Target = modFile.Module.Mod - targetPrefix = Target.Path - if rel := search.InDir(base.Cwd, cfg.GOROOTsrc); rel != "" { +// initTarget sets Target and associated variables according to modFile, +func initTarget(m module.Version) { + Target = m + targetPrefix = m.Path + + if rel := search.InDir(base.Cwd(), cfg.GOROOTsrc); rel != "" { targetInGorootSrc = true - if Target.Path == "std" { + if m.Path == "std" { + // The "std" module in GOROOT/src is the Go standard library. Unlike other + // modules, the packages in the "std" module have no import-path prefix. + // + // Modules named "std" outside of GOROOT/src do not receive this special + // treatment, so it is possible to run 'go test .' in other GOROOTs to + // test individual packages using a combination of the modified package + // and the ordinary standard library. + // (See https://golang.org/issue/30756.) targetPrefix = "" } } +} - list := []module.Version{Target} +// requirementsFromModFile returns the set of non-excluded requirements from +// the global modFile. +func requirementsFromModFile(ctx context.Context) *Requirements { + roots := make([]module.Version, 0, len(modFile.Require)) + mPathCount := map[string]int{Target.Path: 1} + direct := map[string]bool{} for _, r := range modFile.Require { if index != nil && index.exclude[r.Mod] { if cfg.BuildMod == "mod" { @@ -579,11 +675,33 @@ func modFileToBuildList() { } else { fmt.Fprintf(os.Stderr, "go: ignoring requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version) } - } else { - list = append(list, r.Mod) + continue + } + + roots = append(roots, r.Mod) + mPathCount[r.Mod.Path]++ + if !r.Indirect { + direct[r.Mod.Path] = true } } - buildList = list + module.Sort(roots) + rs := newRequirements(modDepthFromGoVersion(modFileGoVersion()), roots, direct) + + // If any module path appears more than once in the roots, we know that the + // go.mod file needs to be updated even though we have not yet loaded any + // transitive dependencies. + for _, n := range mPathCount { + if n > 1 { + var err error + rs, err = updateRoots(ctx, rs.direct, rs, nil, nil) + if err != nil { + base.Fatalf("go: %v", err) + } + break + } + } + + return rs } // setDefaultBuildMod sets a default value for cfg.BuildMod if the -mod flag @@ -602,7 +720,11 @@ func setDefaultBuildMod() { return } if modRoot == "" { - cfg.BuildMod = "readonly" + if allowMissingModuleImports { + cfg.BuildMod = "mod" + } else { + cfg.BuildMod = "readonly" + } return } @@ -631,6 +753,17 @@ func setDefaultBuildMod() { // convertLegacyConfig imports module requirements from a legacy vendoring // configuration file, if one is present. func convertLegacyConfig(modPath string) (from string, err error) { + noneSelected := func(path string) (version string) { return "none" } + queryPackage := func(path, rev string) (module.Version, error) { + pkgMods, modOnly, err := QueryPattern(context.Background(), path, rev, noneSelected, nil) + if err != nil { + return module.Version{}, err + } + if len(pkgMods) > 0 { + return pkgMods[0].Mod, nil + } + return modOnly.Mod, nil + } for _, name := range altConfigs { cfg := filepath.Join(modRoot, name) data, err := os.ReadFile(cfg) @@ -640,27 +773,35 @@ func convertLegacyConfig(modPath string) (from string, err error) { return "", nil } cfg = filepath.ToSlash(cfg) - err := modconv.ConvertLegacyConfig(modFile, cfg, data) + err := modconv.ConvertLegacyConfig(modFile, cfg, data, queryPackage) return name, err } } return "", nil } -// addGoStmt adds a go directive to the go.mod file if it does not already include one. -// The 'go' version added, if any, is the latest version supported by this toolchain. -func addGoStmt() { +// addGoStmt adds a go directive to the go.mod file if it does not already +// include one. The 'go' version added, if any, is the latest version supported +// by this toolchain. +func addGoStmt(v string) { if modFile.Go != nil && modFile.Go.Version != "" { return } + if err := modFile.AddGoStmt(v); err != nil { + base.Fatalf("go: internal error: %v", err) + } + rawGoVersion.Store(Target, v) +} + +// latestGoVersion returns the latest version of the Go language supported by +// this toolchain, like "1.17". +func latestGoVersion() string { tags := build.Default.ReleaseTags version := tags[len(tags)-1] if !strings.HasPrefix(version, "go") || !modfile.GoVersionRE.MatchString(version[2:]) { base.Fatalf("go: unrecognized default version %q", version) } - if err := modFile.AddGoStmt(version[2:]); err != nil { - base.Fatalf("go: internal error: %v", err) - } + return version[2:] } var altConfigs = []string{ @@ -844,71 +985,47 @@ func AllowWriteGoMod() { allowWriteGoMod = true } -// MinReqs returns a Reqs with minimal additional dependencies of Target, -// as will be written to go.mod. -func MinReqs() mvs.Reqs { - retain := append([]string{}, additionalExplicitRequirements...) - for _, m := range buildList[1:] { - _, explicit := index.require[m] - if explicit || loaded.direct[m.Path] { - retain = append(retain, m.Path) - } +// WriteGoMod writes the current build list back to go.mod. +func WriteGoMod(ctx context.Context) { + if !allowWriteGoMod { + panic("WriteGoMod called while disallowed") } - sort.Strings(retain) - str.Uniq(&retain) - min, err := mvs.Req(Target, retain, &mvsReqs{buildList: buildList}) - if err != nil { - base.Fatalf("go: %v", err) - } - return &mvsReqs{buildList: append([]module.Version{Target}, min...)} + commitRequirements(ctx, modFileGoVersion(), LoadModFile(ctx)) } -// WriteGoMod writes the current build list back to go.mod. -func WriteGoMod() { - // If we're using -mod=vendor we basically ignored - // go.mod, so definitely don't try to write back our - // incomplete view of the world. - if !allowWriteGoMod || cfg.BuildMod == "vendor" { +// commitRequirements writes sets the global requirements variable to rs and +// writes its contents back to the go.mod file on disk. +func commitRequirements(ctx context.Context, goVersion string, rs *Requirements) { + requirements = rs + + if !allowWriteGoMod { + // Some package outside of modload promised to update the go.mod file later. return } - // If we aren't in a module, we don't have anywhere to write a go.mod file. if modRoot == "" { + // We aren't in a module, so we don't have anywhere to write a go.mod file. return } - if cfg.BuildMod != "readonly" { - addGoStmt() + var list []*modfile.Require + for _, m := range rs.rootModules { + list = append(list, &modfile.Require{ + Mod: m, + Indirect: !rs.direct[m.Path], + }) } - - if loaded != nil { - reqs := MinReqs() - min, err := reqs.Required(Target) - if err != nil { - base.Fatalf("go: %v", err) - } - var list []*modfile.Require - for _, m := range min { - list = append(list, &modfile.Require{ - Mod: m, - Indirect: !loaded.direct[m.Path], - }) - } - modFile.SetRequire(list) + modFile.SetRequire(list) + if goVersion != "" { + modFile.AddGoStmt(goVersion) } modFile.Cleanup() dirty := index.modFileIsDirty(modFile) - if dirty && cfg.BuildMod == "readonly" { + if dirty && cfg.BuildMod != "mod" { // If we're about to fail due to -mod=readonly, // prefer to report a dirty go.mod over a dirty go.sum - if cfg.BuildModExplicit { - base.Fatalf("go: updates to go.mod needed, disabled by -mod=readonly") - } else if cfg.BuildModReason != "" { - base.Fatalf("go: updates to go.mod needed, disabled by -mod=readonly\n\t(%s)", cfg.BuildModReason) - } else { - base.Fatalf("go: updates to go.mod needed; to update it:\n\tgo mod tidy") - } + base.Fatalf("go: %v", errGoModDirty) } if !dirty && cfg.CmdName != "mod tidy" { @@ -917,7 +1034,7 @@ func WriteGoMod() { // Don't write go.mod, but write go.sum in case we added or trimmed sums. // 'go mod init' shouldn't write go.sum, since it will be incomplete. if cfg.CmdName != "mod init" { - modfetch.WriteGoSum(keepSums(true)) + modfetch.WriteGoSum(keepSums(ctx, loaded, rs, addBuildListZipSums)) } return } @@ -933,7 +1050,7 @@ func WriteGoMod() { // Update go.sum after releasing the side lock and refreshing the index. // 'go mod init' shouldn't write go.sum, since it will be incomplete. if cfg.CmdName != "mod init" { - modfetch.WriteGoSum(keepSums(true)) + modfetch.WriteGoSum(keepSums(ctx, loaded, rs, addBuildListZipSums)) } }() @@ -970,100 +1087,100 @@ func WriteGoMod() { } } -// keepSums returns a set of module sums to preserve in go.sum. The set -// includes entries for all modules used to load packages (according to -// the last load function such as LoadPackages or ImportFromFiles). -// It also contains entries for go.mod files needed for MVS (the version -// of these entries ends with "/go.mod"). -// -// If keepBuildListZips is true, the set also includes sums for zip files for -// all modules in the build list with replacements applied. 'go get' and -// 'go mod download' may add sums to this set when adding a requirement on a -// module without a root package or when downloading a direct or indirect -// dependency. -func keepSums(keepBuildListZips bool) map[module.Version]bool { - // Re-derive the build list using the current list of direct requirements. - // Keep the sum for the go.mod of each visited module version (or its - // replacement). - modkey := func(m module.Version) module.Version { - return module.Version{Path: m.Path, Version: m.Version + "/go.mod"} - } +// keepSums returns the set of modules (and go.mod file entries) for which +// checksums would be needed in order to reload the same set of packages +// loaded by the most recent call to LoadPackages or ImportFromFiles, +// including any go.mod files needed to reconstruct the MVS result, +// in addition to the checksums for every module in keepMods. +func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums) map[module.Version]bool { + // Every module in the full module graph contributes its requirements, + // so in order to ensure that the build list itself is reproducible, + // we need sums for every go.mod in the graph (regardless of whether + // that version is selected). keep := make(map[module.Version]bool) - var mu sync.Mutex - reqs := &keepSumReqs{ - Reqs: &mvsReqs{buildList: buildList}, - visit: func(m module.Version) { - // If we build using a replacement module, keep the sum for the replacement, - // since that's the code we'll actually use during a build. - mu.Lock() - r := Replacement(m) - if r.Path == "" { - keep[modkey(m)] = true - } else { - keep[modkey(r)] = true - } - mu.Unlock() - }, - } - buildList, err := mvs.BuildList(Target, reqs) - if err != nil { - panic(fmt.Sprintf("unexpected error reloading build list: %v", err)) - } - - actualMods := make(map[string]module.Version) - for _, m := range buildList[1:] { - if r := Replacement(m); r.Path != "" { - actualMods[m.Path] = r - } else { - actualMods[m.Path] = m - } - } // Add entries for modules in the build list with paths that are prefixes of - // paths of loaded packages. We need to retain sums for modules needed to - // report ambiguous import errors. We use our re-derived build list, - // since the global build list may have been tidied. - if loaded != nil { - for _, pkg := range loaded.pkgs { - if pkg.testOf != nil || pkg.inStd || module.CheckImportPath(pkg.path) != nil { + // paths of loaded packages. We need to retain sums for all of these modules — + // not just the modules containing the actual packages — in order to rule out + // ambiguous import errors the next time we load the package. + if ld != nil { + for _, pkg := range ld.pkgs { + // We check pkg.mod.Path here instead of pkg.inStd because the + // pseudo-package "C" is not in std, but not provided by any module (and + // shouldn't force loading the whole module graph). + if pkg.testOf != nil || (pkg.mod.Path == "" && pkg.err == nil) || module.CheckImportPath(pkg.path) != nil { continue } + + if rs.depth == lazy && pkg.mod.Path != "" { + if v, ok := rs.rootSelected(pkg.mod.Path); ok && v == pkg.mod.Version { + // pkg was loaded from a root module, and because the main module is + // lazy we do not check non-root modules for conflicts for packages + // that can be found in roots. So we only need the checksums for the + // root modules that may contain pkg, not all possible modules. + for prefix := pkg.path; prefix != "."; prefix = path.Dir(prefix) { + if v, ok := rs.rootSelected(prefix); ok && v != "none" { + m := module.Version{Path: prefix, Version: v} + keep[resolveReplacement(m)] = true + } + } + continue + } + } + + mg, _ := rs.Graph(ctx) for prefix := pkg.path; prefix != "."; prefix = path.Dir(prefix) { - if m, ok := actualMods[prefix]; ok { - keep[m] = true + if v := mg.Selected(prefix); v != "none" { + m := module.Version{Path: prefix, Version: v} + keep[resolveReplacement(m)] = true } } } } - // Add entries for the zip of each module in the build list. - // We might not need all of these (tidy does not add them), but they may be - // added by a specific 'go get' or 'go mod download' command to resolve - // missing import sum errors. - if keepBuildListZips { - for _, m := range actualMods { - keep[m] = true + if rs.depth == lazy && rs.graph.Load() == nil { + // The main module is lazy and we haven't needed to load the module graph so + // far. Don't incur the cost of loading it now — since we haven't loaded the + // graph, we probably don't have any checksums to contribute to the distant + // parts of the graph anyway. Instead, just request sums for the roots that + // we know about. + for _, m := range rs.rootModules { + r := resolveReplacement(m) + keep[modkey(r)] = true + if which == addBuildListZipSums { + keep[r] = true + } + } + } else { + mg, _ := rs.Graph(ctx) + mg.WalkBreadthFirst(func(m module.Version) { + if _, ok := mg.RequiredBy(m); ok { + // The requirements from m's go.mod file are present in the module graph, + // so they are relevant to the MVS result regardless of whether m was + // actually selected. + keep[modkey(resolveReplacement(m))] = true + } + }) + + if which == addBuildListZipSums { + for _, m := range mg.BuildList() { + keep[resolveReplacement(m)] = true + } } } return keep } -// keepSumReqs embeds another Reqs implementation. The Required method -// calls visit for each version in the module graph. -type keepSumReqs struct { - mvs.Reqs - visit func(module.Version) -} +type whichSums int8 -func (r *keepSumReqs) Required(m module.Version) ([]module.Version, error) { - r.visit(m) - return r.Reqs.Required(m) -} +const ( + loadedZipSumsOnly = whichSums(iota) + addBuildListZipSums +) -func TrimGoSum() { - // Don't retain sums for the zip file of every module in the build list. - // We may not need them all to build the main module's packages. - keepBuildListZips := false - modfetch.TrimGoSum(keepSums(keepBuildListZips)) +// modKey returns the module.Version under which the checksum for m's go.mod +// file is stored in the go.sum file. +func modkey(m module.Version) module.Version { + return module.Version{Path: m.Path, Version: m.Version + "/go.mod"} } diff --git a/src/cmd/go/internal/modload/list.go b/src/cmd/go/internal/modload/list.go index 3491f941cd3..ccdeb9b1d11 100644 --- a/src/cmd/go/internal/modload/list.go +++ b/src/cmd/go/internal/modload/list.go @@ -20,25 +20,42 @@ import ( "golang.org/x/mod/module" ) -func ListModules(ctx context.Context, args []string, listU, listVersions, listRetracted bool) []*modinfo.ModulePublic { - mods := listModules(ctx, args, listVersions, listRetracted) +type ListMode int + +const ( + ListU ListMode = 1 << iota + ListRetracted + ListDeprecated + ListVersions + ListRetractedVersions +) + +// ListModules returns a description of the modules matching args, if known, +// along with any error preventing additional matches from being identified. +// +// The returned slice can be nonempty even if the error is non-nil. +func ListModules(ctx context.Context, args []string, mode ListMode) ([]*modinfo.ModulePublic, error) { + rs, mods, err := listModules(ctx, LoadModFile(ctx), args, mode) type token struct{} sem := make(chan token, runtime.GOMAXPROCS(0)) - if listU || listVersions || listRetracted { + if mode != 0 { for _, m := range mods { add := func(m *modinfo.ModulePublic) { sem <- token{} go func() { - if listU { + if mode&ListU != 0 { addUpdate(ctx, m) } - if listVersions { - addVersions(ctx, m, listRetracted) + if mode&ListVersions != 0 { + addVersions(ctx, m, mode&ListRetractedVersions != 0) } - if listRetracted || listU { + if mode&ListRetracted != 0 { addRetraction(ctx, m) } + if mode&ListDeprecated != 0 { + addDeprecation(ctx, m) + } <-sem }() } @@ -54,17 +71,18 @@ func ListModules(ctx context.Context, args []string, listU, listVersions, listRe sem <- token{} } - return mods + if err == nil { + commitRequirements(ctx, modFileGoVersion(), rs) + } + return mods, err } -func listModules(ctx context.Context, args []string, listVersions, listRetracted bool) []*modinfo.ModulePublic { - LoadAllModules(ctx) +func listModules(ctx context.Context, rs *Requirements, args []string, mode ListMode) (_ *Requirements, mods []*modinfo.ModulePublic, mgErr error) { if len(args) == 0 { - return []*modinfo.ModulePublic{moduleInfo(ctx, buildList[0], true, listRetracted)} + return rs, []*modinfo.ModulePublic{moduleInfo(ctx, rs, Target, mode)}, nil } - var mods []*modinfo.ModulePublic - matchedBuildList := make([]bool, len(buildList)) + needFullGraph := false for _, arg := range args { if strings.Contains(arg, `\`) { base.Fatalf("go: module paths never use backslash") @@ -72,22 +90,62 @@ func listModules(ctx context.Context, args []string, listVersions, listRetracted if search.IsRelativePath(arg) { base.Fatalf("go: cannot use relative path %s to specify module", arg) } - if !HasModRoot() && (arg == "all" || strings.Contains(arg, "...")) { - base.Fatalf("go: cannot match %q: working directory is not part of a module", arg) + if arg == "all" || strings.Contains(arg, "...") { + needFullGraph = true + if !HasModRoot() { + base.Fatalf("go: cannot match %q: %v", arg, ErrNoModRoot) + } + continue } if i := strings.Index(arg, "@"); i >= 0 { path := arg[:i] vers := arg[i+1:] + if vers == "upgrade" || vers == "patch" { + if _, ok := rs.rootSelected(path); !ok || rs.depth == eager { + needFullGraph = true + if !HasModRoot() { + base.Fatalf("go: cannot match %q: %v", arg, ErrNoModRoot) + } + } + } + continue + } + if _, ok := rs.rootSelected(arg); !ok || rs.depth == eager { + needFullGraph = true + if mode&ListVersions == 0 && !HasModRoot() { + base.Fatalf("go: cannot match %q without -versions or an explicit version: %v", arg, ErrNoModRoot) + } + } + } + + var mg *ModuleGraph + if needFullGraph { + rs, mg, mgErr = expandGraph(ctx, rs) + } + + matchedModule := map[module.Version]bool{} + for _, arg := range args { + if i := strings.Index(arg, "@"); i >= 0 { + path := arg[:i] + vers := arg[i+1:] + var current string - for _, m := range buildList { - if m.Path == path { - current = m.Version - break + if mg == nil { + current, _ = rs.rootSelected(path) + } else { + current = mg.Selected(path) + } + if current == "none" && mgErr != nil { + if vers == "upgrade" || vers == "patch" { + // The module graph is incomplete, so we don't know what version we're + // actually upgrading from. + // mgErr is already set, so just skip this module. + continue } } allowed := CheckAllowed - if IsRevisionQuery(vers) || listRetracted { + if IsRevisionQuery(vers) || mode&ListRetracted != 0 { // Allow excluded and retracted versions if the user asked for a // specific revision or used 'go list -retracted'. allowed = nil @@ -101,75 +159,79 @@ func listModules(ctx context.Context, args []string, listVersions, listRetracted }) continue } - mod := moduleInfo(ctx, module.Version{Path: path, Version: info.Version}, false, listRetracted) + + // Indicate that m was resolved from outside of rs by passing a nil + // *Requirements instead. + var noRS *Requirements + + mod := moduleInfo(ctx, noRS, module.Version{Path: path, Version: info.Version}, mode) mods = append(mods, mod) continue } // Module path or pattern. var match func(string) bool - var literal bool if arg == "all" { match = func(string) bool { return true } } else if strings.Contains(arg, "...") { match = search.MatchPattern(arg) } else { - match = func(p string) bool { return arg == p } - literal = true - } - matched := false - for i, m := range buildList { - if i == 0 && !HasModRoot() { - // The root module doesn't actually exist: omit it. + var v string + if mg == nil { + var ok bool + v, ok = rs.rootSelected(arg) + if !ok { + // We checked rootSelected(arg) in the earlier args loop, so if there + // is no such root we should have loaded a non-nil mg. + panic(fmt.Sprintf("internal error: root requirement expected but not found for %v", arg)) + } + } else { + v = mg.Selected(arg) + } + if v == "none" && mgErr != nil { + // mgErr is already set, so just skip this module. continue } + if v != "none" { + mods = append(mods, moduleInfo(ctx, rs, module.Version{Path: arg, Version: v}, mode)) + } else if cfg.BuildMod == "vendor" { + // In vendor mode, we can't determine whether a missing module is “a + // known dependency” because the module graph is incomplete. + // Give a more explicit error message. + mods = append(mods, &modinfo.ModulePublic{ + Path: arg, + Error: modinfoError(arg, "", errors.New("can't resolve module using the vendor directory\n\t(Use -mod=mod or -mod=readonly to bypass.)")), + }) + } else if mode&ListVersions != 0 { + // Don't make the user provide an explicit '@latest' when they're + // explicitly asking what the available versions are. Instead, return a + // module with version "none", to which we can add the requested list. + mods = append(mods, &modinfo.ModulePublic{Path: arg}) + } else { + mods = append(mods, &modinfo.ModulePublic{ + Path: arg, + Error: modinfoError(arg, "", errors.New("not a known dependency")), + }) + } + continue + } + + matched := false + for _, m := range mg.BuildList() { if match(m.Path) { matched = true - if !matchedBuildList[i] { - matchedBuildList[i] = true - mods = append(mods, moduleInfo(ctx, m, true, listRetracted)) + if !matchedModule[m] { + matchedModule[m] = true + mods = append(mods, moduleInfo(ctx, rs, m, mode)) } } } if !matched { - if literal { - if listVersions { - // Don't make the user provide an explicit '@latest' when they're - // explicitly asking what the available versions are. - // Instead, resolve the module, even if it isn't an existing dependency. - info, err := Query(ctx, arg, "latest", "", nil) - if err == nil { - mod := moduleInfo(ctx, module.Version{Path: arg, Version: info.Version}, false, listRetracted) - mods = append(mods, mod) - } else { - mods = append(mods, &modinfo.ModulePublic{ - Path: arg, - Error: modinfoError(arg, "", err), - }) - } - continue - } - if cfg.BuildMod == "vendor" { - // In vendor mode, we can't determine whether a missing module is “a - // known dependency” because the module graph is incomplete. - // Give a more explicit error message. - mods = append(mods, &modinfo.ModulePublic{ - Path: arg, - Error: modinfoError(arg, "", errors.New("can't resolve module using the vendor directory\n\t(Use -mod=mod or -mod=readonly to bypass.)")), - }) - } else { - mods = append(mods, &modinfo.ModulePublic{ - Path: arg, - Error: modinfoError(arg, "", errors.New("not a known dependency")), - }) - } - } else { - fmt.Fprintf(os.Stderr, "warning: pattern %q matched no module dependencies\n", arg) - } + fmt.Fprintf(os.Stderr, "warning: pattern %q matched no module dependencies\n", arg) } } - return mods + return rs, mods, mgErr } // modinfoError wraps an error to create an error message in diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go index 6d87acc6d3b..f30ac6e0c8b 100644 --- a/src/cmd/go/internal/modload/load.go +++ b/src/cmd/go/internal/modload/load.go @@ -49,7 +49,7 @@ package modload // Because "go mod vendor" prunes out the tests of vendored packages, the // behavior of the "all" pattern with -mod=vendor in Go 1.11–1.15 is the same // as the "all" pattern (regardless of the -mod flag) in 1.16+. -// The allClosesOverTests parameter to the loader indicates whether the "all" +// The loader uses the GoVersion parameter to determine whether the "all" // pattern should close over tests (as in Go 1.11–1.15) or stop at only those // packages transitively imported by the packages and tests in the main module // ("all" in Go 1.16+ and "go mod vendor" in Go 1.11+). @@ -121,19 +121,42 @@ import ( "cmd/go/internal/str" "golang.org/x/mod/module" + "golang.org/x/mod/semver" ) // loaded is the most recently-used package loader. // It holds details about individual packages. +// +// This variable should only be accessed directly in top-level exported +// functions. All other functions that require or produce a *loader should pass +// or return it as an explicit parameter. var loaded *loader // PackageOpts control the behavior of the LoadPackages function. type PackageOpts struct { + // GoVersion is the Go version to which the go.mod file should be updated + // after packages have been loaded. + // + // An empty GoVersion means to use the Go version already specified in the + // main module's go.mod file, or the latest Go version if there is no main + // module. + GoVersion string + // Tags are the build tags in effect (as interpreted by the // cmd/go/internal/imports package). // If nil, treated as equivalent to imports.Tags(). Tags map[string]bool + // Tidy, if true, requests that the build list and go.sum file be reduced to + // the minimial dependencies needed to reproducibly reload the requested + // packages. + Tidy bool + + // VendorModulesInGOROOTSrc indicates that if we are within a module in + // GOROOT/src, packages in the module's vendor directory should be resolved as + // actual module dependencies (instead of standard-library packages). + VendorModulesInGOROOTSrc bool + // ResolveMissingImports indicates that we should attempt to add module // dependencies as needed to resolve imports of packages that are not found. // @@ -166,9 +189,31 @@ type PackageOpts struct { // an error occurs. AllowErrors bool - // SilenceErrors indicates that LoadPackages should not print errors - // that occur while loading packages. SilenceErrors implies AllowErrors. - SilenceErrors bool + // SilencePackageErrors indicates that LoadPackages should not print errors + // that occur while matching or loading packages, and should not terminate the + // process if such an error occurs. + // + // Errors encountered in the module graph will still be reported. + // + // The caller may retrieve the silenced package errors using the Lookup + // function, and matching errors are still populated in the Errs field of the + // associated search.Match.) + SilencePackageErrors bool + + // SilenceMissingStdImports indicates that LoadPackages should not print + // errors or terminate the process if an imported package is missing, and the + // import path looks like it might be in the standard library (perhaps in a + // future version). + SilenceMissingStdImports bool + + // SilenceNoGoErrors indicates that LoadPackages should not print + // imports.ErrNoGo errors. + // This allows the caller to invoke LoadPackages (and report other errors) + // without knowing whether the requested packages exist for the given tags. + // + // Note that if a requested package does not exist *at all*, it will fail + // during module resolution and the error will not be suppressed. + SilenceNoGoErrors bool // SilenceUnmatchedWarnings suppresses the warnings normally emitted for // patterns that did not match any packages. @@ -178,7 +223,6 @@ type PackageOpts struct { // LoadPackages identifies the set of packages matching the given patterns and // loads the packages in the import graph rooted at that set. func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (matches []*search.Match, loadedPackages []string) { - LoadModFile(ctx) if opts.Tags == nil { opts.Tags = imports.Tags() } @@ -193,13 +237,13 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma } } - updateMatches := func(ld *loader) { + updateMatches := func(rs *Requirements, ld *loader) { for _, m := range matches { switch { case m.IsLocal(): // Evaluate list of file system directories on first iteration. if m.Dirs == nil { - matchLocalDirs(m) + matchLocalDirs(ctx, m, rs) } // Make a copy of the directory list and translate to import paths. @@ -210,7 +254,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // the loader iterations. m.Pkgs = m.Pkgs[:0] for _, dir := range m.Dirs { - pkg, err := resolveLocalPackage(dir) + pkg, err := resolveLocalPackage(ctx, dir, rs) if err != nil { if !m.IsLiteral() && (err == errPkgIsBuiltin || err == errPkgIsGorootSrc) { continue // Don't include "builtin" or GOROOT/src in wildcard patterns. @@ -233,7 +277,17 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma case strings.Contains(m.Pattern(), "..."): m.Errs = m.Errs[:0] - matchPackages(ctx, m, opts.Tags, includeStd, buildList) + mg, err := rs.Graph(ctx) + if err != nil { + // The module graph is (or may be) incomplete — perhaps we failed to + // load the requirements of some module. This is an error in matching + // the patterns to packages, because we may be missing some packages + // or we may erroneously match packages in the wrong versions of + // modules. However, for cases like 'go list -e', the error should not + // necessarily prevent us from loading the packages we could find. + m.Errs = append(m.Errs, err) + } + matchPackages(ctx, m, opts.Tags, includeStd, mg.BuildList()) case m.Pattern() == "all": if ld == nil { @@ -258,14 +312,20 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma } } - loaded = loadFromRoots(loaderParams{ - PackageOpts: opts, + initialRS, _ := loadModFile(ctx) // Ignore needCommit — we're going to commit at the end regardless. - allClosesOverTests: index.allPatternClosesOverTests() && !opts.UseVendorAll, - allPatternIsRoot: allPatternIsRoot, + if opts.GoVersion == "" { + opts.GoVersion = modFileGoVersion() + } - listRoots: func() (roots []string) { - updateMatches(nil) + ld := loadFromRoots(ctx, loaderParams{ + PackageOpts: opts, + requirements: initialRS, + + allPatternIsRoot: allPatternIsRoot, + + listRoots: func(rs *Requirements) (roots []string) { + updateMatches(rs, nil) for _, m := range matches { roots = append(roots, m.Pkgs...) } @@ -274,42 +334,14 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma }) // One last pass to finalize wildcards. - updateMatches(loaded) + updateMatches(ld.requirements, ld) - // Report errors, if any. - checkMultiplePaths() - for _, pkg := range loaded.pkgs { - if pkg.err != nil { - if sumErr := (*ImportMissingSumError)(nil); errors.As(pkg.err, &sumErr) { - if importer := pkg.stack; importer != nil { - sumErr.importer = importer.path - sumErr.importerVersion = importer.mod.Version - sumErr.importerIsTest = importer.testOf != nil - } - } - - if !opts.SilenceErrors { - if opts.AllowErrors { - fmt.Fprintf(os.Stderr, "%s: %v\n", pkg.stackText(), pkg.err) - } else { - base.Errorf("%s: %v", pkg.stackText(), pkg.err) - } - } - } - if !pkg.isTest() { - loadedPackages = append(loadedPackages, pkg.path) - } - } - if !opts.SilenceErrors { - // Also list errors in matching patterns (such as directory permission - // errors for wildcard patterns). + // List errors in matching patterns (such as directory permission + // errors for wildcard patterns). + if !ld.SilencePackageErrors { for _, match := range matches { for _, err := range match.Errs { - if opts.AllowErrors { - fmt.Fprintf(os.Stderr, "%v\n", err) - } else { - base.Errorf("%v", err) - } + ld.errorf("%v\n", err) } } } @@ -319,15 +351,49 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma search.WarnUnmatched(matches) } - // Success! Update go.mod (if needed) and return the results. - WriteGoMod() + if opts.Tidy { + if cfg.BuildV { + mg, _ := ld.requirements.Graph(ctx) + + for _, m := range initialRS.rootModules { + var unused bool + if ld.requirements.depth == eager { + // m is unused if it was dropped from the module graph entirely. If it + // was only demoted from direct to indirect, it may still be in use via + // a transitive import. + unused = mg.Selected(m.Path) == "none" + } else { + // m is unused if it was dropped from the roots. If it is still present + // as a transitive dependency, that transitive dependency is not needed + // by any package or test in the main module. + _, ok := ld.requirements.rootSelected(m.Path) + unused = !ok + } + if unused { + fmt.Fprintf(os.Stderr, "unused %s\n", m.Path) + } + } + } + + modfetch.TrimGoSum(keepSums(ctx, ld, ld.requirements, loadedZipSumsOnly)) + } + + // Success! Update go.mod and go.sum (if needed) and return the results. + loaded = ld + commitRequirements(ctx, opts.GoVersion, loaded.requirements) + + for _, pkg := range ld.pkgs { + if !pkg.isTest() { + loadedPackages = append(loadedPackages, pkg.path) + } + } sort.Strings(loadedPackages) return matches, loadedPackages } // matchLocalDirs is like m.MatchDirs, but tries to avoid scanning directories // outside of the standard library and active modules. -func matchLocalDirs(m *search.Match) { +func matchLocalDirs(ctx context.Context, m *search.Match, rs *Requirements) { if !m.IsLocal() { panic(fmt.Sprintf("internal error: resolveLocalDirs on non-local pattern %s", m.Pattern())) } @@ -341,9 +407,9 @@ func matchLocalDirs(m *search.Match) { dir := filepath.Dir(filepath.Clean(m.Pattern()[:i+3])) absDir := dir if !filepath.IsAbs(dir) { - absDir = filepath.Join(base.Cwd, dir) + absDir = filepath.Join(base.Cwd(), dir) } - if search.InDir(absDir, cfg.GOROOTsrc) == "" && search.InDir(absDir, ModRoot()) == "" && pathInModuleCache(absDir) == "" { + if search.InDir(absDir, cfg.GOROOTsrc) == "" && search.InDir(absDir, ModRoot()) == "" && pathInModuleCache(ctx, absDir, rs) == "" { m.Dirs = []string{} m.AddError(fmt.Errorf("directory prefix %s outside available modules", base.ShortPath(absDir))) return @@ -354,12 +420,12 @@ func matchLocalDirs(m *search.Match) { } // resolveLocalPackage resolves a filesystem path to a package path. -func resolveLocalPackage(dir string) (string, error) { +func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (string, error) { var absDir string if filepath.IsAbs(dir) { absDir = filepath.Clean(dir) } else { - absDir = filepath.Join(base.Cwd, dir) + absDir = filepath.Join(base.Cwd(), dir) } bp, err := cfg.BuildContext.ImportDir(absDir, 0) @@ -445,7 +511,7 @@ func resolveLocalPackage(dir string) (string, error) { return pkg, nil } - pkg := pathInModuleCache(absDir) + pkg := pathInModuleCache(ctx, absDir, rs) if pkg == "" { return "", fmt.Errorf("directory %s outside available modules", base.ShortPath(absDir)) } @@ -460,7 +526,7 @@ var ( // pathInModuleCache returns the import path of the directory dir, // if dir is in the module cache copy of a module in our build list. -func pathInModuleCache(dir string) string { +func pathInModuleCache(ctx context.Context, dir string, rs *Requirements) string { tryMod := func(m module.Version) (string, bool) { var root string var err error @@ -490,20 +556,48 @@ func pathInModuleCache(dir string) string { return path.Join(m.Path, filepath.ToSlash(sub)), true } - for _, m := range buildList[1:] { - if importPath, ok := tryMod(m); ok { - // checkMultiplePaths ensures that a module can be used for at most one - // requirement, so this must be it. - return importPath + if rs.depth == lazy { + for _, m := range rs.rootModules { + if v, _ := rs.rootSelected(m.Path); v != m.Version { + continue // m is a root, but we have a higher root for the same path. + } + if importPath, ok := tryMod(m); ok { + // checkMultiplePaths ensures that a module can be used for at most one + // requirement, so this must be it. + return importPath + } } } - return "" + + // None of the roots contained dir, or we're in eager mode and want to load + // the full module graph more aggressively. Either way, check the full graph + // to see if the directory is a non-root dependency. + // + // If the roots are not consistent with the full module graph, the selected + // versions of root modules may differ from what we already checked above. + // Re-check those paths too. + + mg, _ := rs.Graph(ctx) + var importPath string + for _, m := range mg.BuildList() { + var found bool + importPath, found = tryMod(m) + if found { + break + } + } + return importPath } // ImportFromFiles adds modules to the build list as needed // to satisfy the imports in the named Go source files. +// +// Errors in missing dependencies are silenced. +// +// TODO(bcmills): Silencing errors seems off. Take a closer look at this and +// figure out what the error-reporting actually ought to be. func ImportFromFiles(ctx context.Context, gofiles []string) { - LoadModFile(ctx) + rs := LoadModFile(ctx) tags := imports.Tags() imports, testImports, err := imports.ScanFiles(gofiles, tags) @@ -511,31 +605,34 @@ func ImportFromFiles(ctx context.Context, gofiles []string) { base.Fatalf("go: %v", err) } - loaded = loadFromRoots(loaderParams{ + goVersion := modFileGoVersion() + loaded = loadFromRoots(ctx, loaderParams{ PackageOpts: PackageOpts{ + GoVersion: goVersion, Tags: tags, ResolveMissingImports: true, + SilencePackageErrors: true, }, - allClosesOverTests: index.allPatternClosesOverTests(), - listRoots: func() (roots []string) { + requirements: rs, + listRoots: func(*Requirements) (roots []string) { roots = append(roots, imports...) roots = append(roots, testImports...) return roots }, }) - WriteGoMod() + commitRequirements(ctx, goVersion, loaded.requirements) } // DirImportPath returns the effective import path for dir, // provided it is within the main module, or else returns ".". -func DirImportPath(dir string) string { +func DirImportPath(ctx context.Context, dir string) string { if !HasModRoot() { return "." } - LoadModFile(context.TODO()) + LoadModFile(ctx) // Sets targetPrefix. if !filepath.IsAbs(dir) { - dir = filepath.Join(base.Cwd, dir) + dir = filepath.Join(base.Cwd(), dir) } else { dir = filepath.Clean(dir) } @@ -559,8 +656,8 @@ func DirImportPath(dir string) string { func TargetPackages(ctx context.Context, pattern string) *search.Match { // TargetPackages is relative to the main module, so ensure that the main // module is a thing that can contain packages. - LoadModFile(ctx) - ModRoot() + LoadModFile(ctx) // Sets Target. + ModRoot() // Emits an error if Target cannot contain packages. m := search.NewMatch(pattern) matchPackages(ctx, m, imports.AnyTags(), omitStd, []module.Version{Target}) @@ -659,26 +756,29 @@ func Lookup(parentPath string, parentIsStd bool, path string) (dir, realPath str type loader struct { loaderParams + // allClosesOverTests indicates whether the "all" pattern includes + // dependencies of tests outside the main module (as in Go 1.11–1.15). + // (Otherwise — as in Go 1.16+ — the "all" pattern includes only the packages + // transitively *imported by* the packages and tests in the main module.) + allClosesOverTests bool + work *par.Queue // reset on each iteration roots []*loadPkg pkgCache *par.Cache // package path (string) → *loadPkg pkgs []*loadPkg // transitive closure of loaded packages and tests; populated in buildStacks - - // computed at end of iterations - direct map[string]bool // imported directly by main module } // loaderParams configure the packages loaded by, and the properties reported // by, a loader instance. type loaderParams struct { PackageOpts + requirements *Requirements - allClosesOverTests bool // Does the "all" pattern include the transitive closure of tests of packages in "all"? - allPatternIsRoot bool // Is the "all" pattern an additional root? + allPatternIsRoot bool // Is the "all" pattern an additional root? - listRoots func() []string + listRoots func(rs *Requirements) []string } func (ld *loader) reset() { @@ -693,6 +793,16 @@ func (ld *loader) reset() { ld.pkgs = nil } +// errorf reports an error via either os.Stderr or base.Errorf, +// according to whether ld.AllowErrors is set. +func (ld *loader) errorf(format string, args ...interface{}) { + if ld.AllowErrors { + fmt.Fprintf(os.Stderr, format, args...) + } else { + base.Errorf(format, args...) + } +} + // A loadPkg records information about a single loaded package. type loadPkg struct { // Populated at construction time: @@ -785,6 +895,18 @@ func (pkg *loadPkg) isTest() bool { return pkg.testOf != nil } +// fromExternalModule reports whether pkg was loaded from a module other than +// the main module. +func (pkg *loadPkg) fromExternalModule() bool { + if pkg.mod.Path == "" { + return false // loaded from the standard library, not a module + } + if pkg.mod.Path == Target.Path { + return false // loaded from the main module. + } + return true +} + var errMissing = errors.New("cannot find package") // loadFromRoots attempts to load the build graph needed to process a set of @@ -793,20 +915,36 @@ var errMissing = errors.New("cannot find package") // The set of root packages is returned by the params.listRoots function, and // expanded to the full set of packages by tracing imports (and possibly tests) // as needed. -func loadFromRoots(params loaderParams) *loader { +func loadFromRoots(ctx context.Context, params loaderParams) *loader { ld := &loader{ loaderParams: params, work: par.NewQueue(runtime.GOMAXPROCS(0)), } - var err error - reqs := &mvsReqs{buildList: buildList} - buildList, err = mvs.BuildList(Target, reqs) - if err != nil { - base.Fatalf("go: %v", err) + if params.GoVersion != "" { + if semver.Compare("v"+params.GoVersion, narrowAllVersionV) < 0 && !ld.UseVendorAll { + // The module's go version explicitly predates the change in "all" for lazy + // loading, so continue to use the older interpretation. + // (If params.GoVersion is empty, we are probably not in any module at all + // and should use the latest semantics.) + ld.allClosesOverTests = true + } + + var err error + ld.requirements, err = convertDepth(ctx, ld.requirements, modDepthFromGoVersion(params.GoVersion)) + if err != nil { + ld.errorf("go: %v\n", err) + } + } + + if ld.requirements.depth == eager { + var err error + ld.requirements, _, err = expandGraph(ctx, ld.requirements) + if err != nil { + ld.errorf("go: %v\n", err) + } } - addedModuleFor := make(map[string]bool) for { ld.reset() @@ -814,9 +952,29 @@ func loadFromRoots(params loaderParams) *loader { // Note: the returned roots can change on each iteration, // since the expansion of package patterns depends on the // build list we're using. + rootPkgs := ld.listRoots(ld.requirements) + + if ld.requirements.depth == lazy && cfg.BuildMod == "mod" { + // Before we start loading transitive imports of packages, locate all of + // the root packages and promote their containing modules to root modules + // dependencies. If their go.mod files are tidy (the common case) and the + // set of root packages does not change then we can select the correct + // versions of all transitive imports on the first try and complete + // loading in a single iteration. + changedBuildList := ld.preloadRootModules(ctx, rootPkgs) + if changedBuildList { + // The build list has changed, so the set of root packages may have also + // changed. Start over to pick up the changes. (Preloading roots is much + // cheaper than loading the full import graph, so we would rather pay + // for an extra iteration of preloading than potentially end up + // discarding the result of a full iteration of loading.) + continue + } + } + inRoots := map[*loadPkg]bool{} - for _, path := range ld.listRoots() { - root := ld.pkg(path, pkgIsRoot) + for _, path := range rootPkgs { + root := ld.pkg(ctx, path, pkgIsRoot) if !inRoots[root] { ld.roots = append(ld.roots, root) inRoots[root] = true @@ -832,77 +990,314 @@ func loadFromRoots(params loaderParams) *loader { ld.buildStacks() + changed, err := ld.updateRequirements(ctx) + if err != nil { + ld.errorf("go: %v\n", err) + break + } + if changed { + // Don't resolve missing imports until the module graph have stabilized. + // If the roots are still changing, they may turn out to specify a + // requirement on the missing package(s), and we would rather use a + // version specified by a new root than add a new dependency on an + // unrelated version. + continue + } + if !ld.ResolveMissingImports || (!HasModRoot() && !allowMissingModuleImports) { // We've loaded as much as we can without resolving missing imports. break } - modAddedBy := ld.resolveMissingImports(addedModuleFor) + + modAddedBy := ld.resolveMissingImports(ctx) if len(modAddedBy) == 0 { + // The roots are stable, and we've resolved all of the missing packages + // that we can. break } - // Recompute buildList with all our additions. - reqs = &mvsReqs{buildList: buildList} - buildList, err = mvs.BuildList(Target, reqs) + toAdd := make([]module.Version, 0, len(modAddedBy)) + for m, _ := range modAddedBy { + toAdd = append(toAdd, m) + } + module.Sort(toAdd) // to make errors deterministic + + // We ran updateRequirements before resolving missing imports and it didn't + // make any changes, so we know that the requirement graph is already + // consistent with ld.pkgs: we don't need to pass ld.pkgs to updateRoots + // again. (That would waste time looking for changes that we have already + // applied.) + var noPkgs []*loadPkg + // We also know that we're going to call updateRequirements again next + // iteration so we don't need to also update it here. (That would waste time + // computing a "direct" map that we'll have to recompute later anyway.) + direct := ld.requirements.direct + rs, err := updateRoots(ctx, direct, ld.requirements, noPkgs, toAdd) if err != nil { // If an error was found in a newly added module, report the package // import stack instead of the module requirement stack. Packages // are more descriptive. if err, ok := err.(*mvs.BuildListError); ok { if pkg := modAddedBy[err.Module()]; pkg != nil { - base.Fatalf("go: %s: %v", pkg.stackText(), err.Err) + ld.errorf("go: %s: %v\n", pkg.stackText(), err.Err) + break } } - base.Fatalf("go: %v", err) + ld.errorf("go: %v\n", err) + break } + if reflect.DeepEqual(rs.rootModules, ld.requirements.rootModules) { + // Something is deeply wrong. resolveMissingImports gave us a non-empty + // set of modules to add to the graph, but adding those modules had no + // effect — either they were already in the graph, or updateRoots did not + // add them as requested. + panic(fmt.Sprintf("internal error: adding %v to module graph had no effect on root requirements (%v)", toAdd, rs.rootModules)) + } + ld.requirements = rs } - base.ExitIfErrors() + base.ExitIfErrors() // TODO(bcmills): Is this actually needed? - // Compute directly referenced dependency modules. - ld.direct = make(map[string]bool) + // Tidy the build list, if applicable, before we report errors. + // (The process of tidying may remove errors from irrelevant dependencies.) + if ld.Tidy { + rs, err := tidyRoots(ctx, ld.requirements, ld.pkgs) + if err != nil { + ld.errorf("go: %v\n", err) + } + + if ld.requirements.depth == lazy { + // We continuously add tidy roots to ld.requirements during loading, so at + // this point the tidy roots should be a subset of the roots of + // ld.requirements, ensuring that no new dependencies are brought inside + // the lazy-loading horizon. + // If that is not the case, there is a bug in the loading loop above. + for _, m := range rs.rootModules { + if v, ok := ld.requirements.rootSelected(m.Path); !ok || v != m.Version { + ld.errorf("go: internal error: a requirement on %v is needed but was not added during package loading\n", m) + base.ExitIfErrors() + } + } + } + ld.requirements = rs + } + + // Report errors, if any. for _, pkg := range ld.pkgs { - if pkg.mod == Target { - for _, dep := range pkg.imports { - if dep.mod.Path != "" && dep.mod.Path != Target.Path && index != nil { - _, explicit := index.require[dep.mod] - if allowWriteGoMod && cfg.BuildMod == "readonly" && !explicit { - // TODO(#40775): attach error to package instead of using - // base.Errorf. Ideally, 'go list' should not fail because of this, - // but today, LoadPackages calls WriteGoMod unconditionally, which - // would fail with a less clear message. - base.Errorf("go: %[1]s: package %[2]s imported from implicitly required module; to add missing requirements, run:\n\tgo get %[2]s@%[3]s", pkg.path, dep.path, dep.mod.Version) - } - ld.direct[dep.mod.Path] = true - } + if pkg.err == nil { + continue + } + + // Add importer information to checksum errors. + if sumErr := (*ImportMissingSumError)(nil); errors.As(pkg.err, &sumErr) { + if importer := pkg.stack; importer != nil { + sumErr.importer = importer.path + sumErr.importerVersion = importer.mod.Version + sumErr.importerIsTest = importer.testOf != nil } } - } - base.ExitIfErrors() - // If we didn't scan all of the imports from the main module, or didn't use - // imports.AnyTags, then we didn't necessarily load every package that - // contributes “direct” imports — so we can't safely mark existing - // dependencies as indirect-only. - // Conservatively mark those dependencies as direct. - if modFile != nil && (!ld.allPatternIsRoot || !reflect.DeepEqual(ld.Tags, imports.AnyTags())) { - for _, r := range modFile.Require { - if !r.Indirect { - ld.direct[r.Mod.Path] = true - } + if ld.SilencePackageErrors { + continue } + if stdErr := (*ImportMissingError)(nil); errors.As(pkg.err, &stdErr) && + stdErr.isStd && ld.SilenceMissingStdImports { + continue + } + if ld.SilenceNoGoErrors && errors.Is(pkg.err, imports.ErrNoGo) { + continue + } + + ld.errorf("%s: %v\n", pkg.stackText(), pkg.err) } + ld.checkMultiplePaths() return ld } -// resolveMissingImports adds module dependencies to the global build list -// in order to resolve missing packages from pkgs. +// updateRequirements ensures that ld.requirements is consistent with the +// information gained from ld.pkgs and includes the modules in add as roots at +// at least the given versions. +// +// In particular: +// +// - Modules that provide packages directly imported from the main module are +// marked as direct, and are promoted to explicit roots. If a needed root +// cannot be promoted due to -mod=readonly or -mod=vendor, the importing +// package is marked with an error. +// +// - If ld scanned the "all" pattern independent of build constraints, it is +// guaranteed to have seen every direct import. Module dependencies that did +// not provide any directly-imported package are then marked as indirect. +// +// - Root dependencies are updated to their selected versions. +// +// The "changed" return value reports whether the update changed the selected +// version of any module that either provided a loaded package or may now +// provide a package that was previously unresolved. +func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err error) { + rs := ld.requirements + + // direct contains the set of modules believed to provide packages directly + // imported by the main module. + var direct map[string]bool + + // If we didn't scan all of the imports from the main module, or didn't use + // imports.AnyTags, then we didn't necessarily load every package that + // contributes “direct” imports — so we can't safely mark existing direct + // dependencies in ld.requirements as indirect-only. Propagate them as direct. + loadedDirect := ld.allPatternIsRoot && reflect.DeepEqual(ld.Tags, imports.AnyTags()) + if loadedDirect { + direct = make(map[string]bool) + } else { + // TODO(bcmills): It seems like a shame to allocate and copy a map here when + // it will only rarely actually vary from rs.direct. Measure this cost and + // maybe avoid the copy. + direct = make(map[string]bool, len(rs.direct)) + for mPath := range rs.direct { + direct[mPath] = true + } + } + + for _, pkg := range ld.pkgs { + if pkg.mod != Target { + continue + } + for _, dep := range pkg.imports { + if !dep.fromExternalModule() { + continue + } + + if pkg.err == nil && cfg.BuildMod != "mod" { + if v, ok := rs.rootSelected(dep.mod.Path); !ok || v != dep.mod.Version { + // dep.mod is not an explicit dependency, but needs to be. + // Because we are not in "mod" mode, we will not be able to update it. + // Instead, mark the importing package with an error. + // + // TODO(#41688): The resulting error message fails to include the file + // position of the import statement (because that information is not + // tracked by the module loader). Figure out how to plumb the import + // position through. + pkg.err = &DirectImportFromImplicitDependencyError{ + ImporterPath: pkg.path, + ImportedPath: dep.path, + Module: dep.mod, + } + // cfg.BuildMod does not allow us to change dep.mod to be a direct + // dependency, so don't mark it as such. + continue + } + } + + // dep is a package directly imported by a package or test in the main + // module and loaded from some other module (not the standard library). + // Mark its module as a direct dependency. + direct[dep.mod.Path] = true + } + } + + var addRoots []module.Version + if ld.Tidy { + // When we are tidying a lazy module, we may need to add roots to preserve + // the versions of indirect, test-only dependencies that are upgraded + // above or otherwise missing from the go.mod files of direct + // dependencies. (For example, the direct dependency might be a very + // stable codebase that predates modules and thus lacks a go.mod file, or + // the author of the direct dependency may have forgotten to commit a + // change to the go.mod file, or may have made an erroneous hand-edit that + // causes it to be untidy.) + // + // Promoting an indirect dependency to a root adds the next layer of its + // dependencies to the module graph, which may increase the selected + // versions of other modules from which we have already loaded packages. + // So after we promote an indirect dependency to a root, we need to reload + // packages, which means another iteration of loading. + // + // As an extra wrinkle, the upgrades due to promoting a root can cause + // previously-resolved packages to become unresolved. For example, the + // module providing an unstable package might be upgraded to a version + // that no longer contains that package. If we then resolve the missing + // package, we might add yet another root that upgrades away some other + // dependency. (The tests in mod_tidy_convergence*.txt illustrate some + // particularly worrisome cases.) + // + // To ensure that this process of promoting, adding, and upgrading roots + // eventually terminates, during iteration we only ever add modules to the + // root set — we only remove irrelevant roots at the very end of + // iteration, after we have already added every root that we plan to need + // in the (eventual) tidy root set. + // + // Since we do not remove any roots during iteration, even if they no + // longer provide any imported packages, the selected versions of the + // roots can only increase and the set of roots can only expand. The set + // of extant root paths is finite and the set of versions of each path is + // finite, so the iteration *must* reach a stable fixed-point. + tidy, err := tidyRoots(ctx, rs, ld.pkgs) + if err != nil { + return false, err + } + addRoots = tidy.rootModules + } + + rs, err = updateRoots(ctx, direct, rs, ld.pkgs, addRoots) + if err != nil { + // We don't actually know what even the root requirements are supposed to be, + // so we can't proceed with loading. Return the error to the caller + return false, err + } + + if rs != ld.requirements && !reflect.DeepEqual(rs.rootModules, ld.requirements.rootModules) { + // The roots of the module graph have changed in some way (not just the + // "direct" markings). Check whether the changes affected any of the loaded + // packages. + mg, err := rs.Graph(ctx) + if err != nil { + return false, err + } + for _, pkg := range ld.pkgs { + if pkg.fromExternalModule() && mg.Selected(pkg.mod.Path) != pkg.mod.Version { + changed = true + break + } + if pkg.err != nil { + // Promoting a module to a root may resolve an import that was + // previously missing (by pulling in a previously-prune dependency that + // provides it) or ambiguous (by promoting exactly one of the + // alternatives to a root and ignoring the second-level alternatives) or + // otherwise errored out (by upgrading from a version that cannot be + // fetched to one that can be). + // + // Instead of enumerating all of the possible errors, we'll just check + // whether importFromModules returns nil for the package. + // False-positives are ok: if we have a false-positive here, we'll do an + // extra iteration of package loading this time, but we'll still + // converge when the root set stops changing. + // + // In some sense, we can think of this as ‘upgraded the module providing + // pkg.path from "none" to a version higher than "none"’. + if _, _, err = importFromModules(ctx, pkg.path, rs, nil); err == nil { + changed = true + break + } + } + } + } + + ld.requirements = rs + return changed, nil +} + +// resolveMissingImports returns a set of modules that could be added as +// dependencies in order to resolve missing packages from pkgs. // // The newly-resolved packages are added to the addedModuleFor map, and -// resolveMissingImports returns a map from each newly-added module version to -// the first package for which that module was added. -func (ld *loader) resolveMissingImports(addedModuleFor map[string]bool) (modAddedBy map[module.Version]*loadPkg) { - var needPkgs []*loadPkg +// resolveMissingImports returns a map from each new module version to +// the first missing package that module would resolve. +func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[module.Version]*loadPkg) { + type pkgMod struct { + pkg *loadPkg + mod *module.Version + } + var pkgMods []pkgMod for _, pkg := range ld.pkgs { if pkg.err == nil { continue @@ -917,30 +1312,47 @@ func (ld *loader) resolveMissingImports(addedModuleFor map[string]bool) (modAdde continue } - needPkgs = append(needPkgs, pkg) - pkg := pkg + var mod module.Version ld.work.Add(func() { - pkg.mod, pkg.err = queryImport(context.TODO(), pkg.path) + var err error + mod, err = queryImport(ctx, pkg.path, ld.requirements) + if err != nil { + // pkg.err was already non-nil, so we can reasonably attribute the error + // for pkg to either the original error or the one returned by + // queryImport. The existing error indicates only that we couldn't find + // the package, whereas the query error also explains why we didn't fix + // the problem — so we prefer the latter. + pkg.err = err + } + + // err is nil, but we intentionally leave pkg.err non-nil and pkg.mod + // unset: we still haven't satisfied other invariants of a + // successfully-loaded package, such as scanning and loading the imports + // of that package. If we succeed in resolving the new dependency graph, + // the caller can reload pkg and update the error at that point. + // + // Even then, the package might not be loaded from the version we've + // identified here. The module may be upgraded by some other dependency, + // or by a transitive dependency of mod itself, or — less likely — the + // package may be rejected by an AllowPackage hook or rendered ambiguous + // by some other newly-added or newly-upgraded dependency. }) + + pkgMods = append(pkgMods, pkgMod{pkg: pkg, mod: &mod}) } <-ld.work.Idle() modAddedBy = map[module.Version]*loadPkg{} - for _, pkg := range needPkgs { - if pkg.err != nil { + for _, pm := range pkgMods { + pkg, mod := pm.pkg, *pm.mod + if mod.Path == "" { continue } - fmt.Fprintf(os.Stderr, "go: found %s in %s %s\n", pkg.path, pkg.mod.Path, pkg.mod.Version) - if addedModuleFor[pkg.path] { - // TODO(bcmills): This should only be an error if pkg.mod is the same - // version we already tried to add previously. - base.Fatalf("go: %s: looping trying to add package", pkg.stackText()) - } - if modAddedBy[pkg.mod] == nil { - modAddedBy[pkg.mod] = pkg - buildList = append(buildList, pkg.mod) + fmt.Fprintf(os.Stderr, "go: found %s in %s %s\n", pkg.path, mod.Path, mod.Version) + if modAddedBy[mod] == nil { + modAddedBy[mod] = pkg } } @@ -954,7 +1366,7 @@ func (ld *loader) resolveMissingImports(addedModuleFor map[string]bool) (modAdde // ld.work queue, and its test (if requested) will also be populated once // imports have been resolved. When ld.work goes idle, all transitive imports of // the requested package (and its test, if requested) will have been loaded. -func (ld *loader) pkg(path string, flags loadPkgFlags) *loadPkg { +func (ld *loader) pkg(ctx context.Context, path string, flags loadPkgFlags) *loadPkg { if flags.has(pkgImportsLoaded) { panic("internal error: (*loader).pkg called with pkgImportsLoaded flag set") } @@ -963,20 +1375,20 @@ func (ld *loader) pkg(path string, flags loadPkgFlags) *loadPkg { pkg := &loadPkg{ path: path, } - ld.applyPkgFlags(pkg, flags) + ld.applyPkgFlags(ctx, pkg, flags) - ld.work.Add(func() { ld.load(pkg) }) + ld.work.Add(func() { ld.load(ctx, pkg) }) return pkg }).(*loadPkg) - ld.applyPkgFlags(pkg, flags) + ld.applyPkgFlags(ctx, pkg, flags) return pkg } // applyPkgFlags updates pkg.flags to set the given flags and propagate the // (transitive) effects of those flags, possibly loading or enqueueing further // packages as a result. -func (ld *loader) applyPkgFlags(pkg *loadPkg, flags loadPkgFlags) { +func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkgFlags) { if flags == 0 { return } @@ -1028,7 +1440,7 @@ func (ld *loader) applyPkgFlags(pkg *loadPkg, flags loadPkgFlags) { // of packages in "all" if "all" closes over test dependencies. testFlags |= pkgInAll } - ld.pkgTest(pkg, testFlags) + ld.pkgTest(ctx, pkg, testFlags) } } @@ -1036,13 +1448,94 @@ func (ld *loader) applyPkgFlags(pkg *loadPkg, flags loadPkgFlags) { // We have just marked pkg with pkgInAll, or we have just loaded its // imports, or both. Now is the time to propagate pkgInAll to the imports. for _, dep := range pkg.imports { - ld.applyPkgFlags(dep, pkgInAll) + ld.applyPkgFlags(ctx, dep, pkgInAll) } } } +// preloadRootModules loads the module requirements needed to identify the +// selected version of each module providing a package in rootPkgs, +// adding new root modules to the module graph if needed. +func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (changedBuildList bool) { + needc := make(chan map[module.Version]bool, 1) + needc <- map[module.Version]bool{} + for _, path := range rootPkgs { + path := path + ld.work.Add(func() { + // First, try to identify the module containing the package using only roots. + // + // If the main module is tidy and the package is in "all" — or if we're + // lucky — we can identify all of its imports without actually loading the + // full module graph. + m, _, err := importFromModules(ctx, path, ld.requirements, nil) + if err != nil { + var missing *ImportMissingError + if errors.As(err, &missing) && ld.ResolveMissingImports { + // This package isn't provided by any selected module. + // If we can find it, it will be a new root dependency. + m, err = queryImport(ctx, path, ld.requirements) + } + if err != nil { + // We couldn't identify the root module containing this package. + // Leave it unresolved; we will report it during loading. + return + } + } + if m.Path == "" { + // The package is in std or cmd. We don't need to change the root set. + return + } + + v, ok := ld.requirements.rootSelected(m.Path) + if !ok || v != m.Version { + // We found the requested package in m, but m is not a root, so + // loadModGraph will not load its requirements. We need to promote the + // module to a root to ensure that any other packages this package + // imports are resolved from correct dependency versions. + // + // (This is the “argument invariant” from the lazy loading design.) + need := <-needc + need[m] = true + needc <- need + } + }) + } + <-ld.work.Idle() + + need := <-needc + if len(need) == 0 { + return false // No roots to add. + } + + toAdd := make([]module.Version, 0, len(need)) + for m := range need { + toAdd = append(toAdd, m) + } + module.Sort(toAdd) + + rs, err := updateRoots(ctx, ld.requirements.direct, ld.requirements, nil, toAdd) + if err != nil { + // We are missing some root dependency, and for some reason we can't load + // enough of the module dependency graph to add the missing root. Package + // loading is doomed to fail, so fail quickly. + ld.errorf("go: %v\n", err) + base.ExitIfErrors() + return false + } + if reflect.DeepEqual(rs.rootModules, ld.requirements.rootModules) { + // Something is deeply wrong. resolveMissingImports gave us a non-empty + // set of modules to add to the graph, but adding those modules had no + // effect — either they were already in the graph, or updateRoots did not + // add them as requested. + panic(fmt.Sprintf("internal error: adding %v to module graph had no effect on root requirements (%v)", toAdd, rs.rootModules)) + } + + ld.requirements = rs + return true +} + // load loads an individual package. -func (ld *loader) load(pkg *loadPkg) { +func (ld *loader) load(ctx context.Context, pkg *loadPkg) { if strings.Contains(pkg.path, "@") { // Leave for error during load. return @@ -1061,7 +1554,24 @@ func (ld *loader) load(pkg *loadPkg) { return } - pkg.mod, pkg.dir, pkg.err = importFromBuildList(context.TODO(), pkg.path, buildList) + var mg *ModuleGraph + if ld.requirements.depth == eager { + var err error + mg, err = ld.requirements.Graph(ctx) + if err != nil { + // We already checked the error from Graph in loadFromRoots and/or + // updateRequirements, so we ignored the error on purpose and we should + // keep trying to push past it. + // + // However, because mg may be incomplete (and thus may select inaccurate + // versions), we shouldn't use it to load packages. Instead, we pass a nil + // *ModuleGraph, which will cause mg to first try loading from only the + // main module and root dependencies. + mg = nil + } + } + + pkg.mod, pkg.dir, pkg.err = importFromModules(ctx, pkg.path, ld.requirements, mg) if pkg.dir == "" { return } @@ -1075,10 +1585,10 @@ func (ld *loader) load(pkg *loadPkg) { // about (by reducing churn on the flag bits of dependencies), and costs // essentially nothing (these atomic flag ops are essentially free compared // to scanning source code for imports). - ld.applyPkgFlags(pkg, pkgInAll) + ld.applyPkgFlags(ctx, pkg, pkgInAll) } if ld.AllowPackage != nil { - if err := ld.AllowPackage(context.TODO(), pkg.path, pkg.mod); err != nil { + if err := ld.AllowPackage(ctx, pkg.path, pkg.mod); err != nil { pkg.err = err } } @@ -1109,11 +1619,11 @@ func (ld *loader) load(pkg *loadPkg) { // GOROOT/src/vendor even when "std" is not the main module. path = ld.stdVendor(pkg.path, path) } - pkg.imports = append(pkg.imports, ld.pkg(path, importFlags)) + pkg.imports = append(pkg.imports, ld.pkg(ctx, path, importFlags)) } pkg.testImports = testImports - ld.applyPkgFlags(pkg, pkgImportsLoaded) + ld.applyPkgFlags(ctx, pkg, pkgImportsLoaded) } // pkgTest locates the test of pkg, creating it if needed, and updates its state @@ -1121,7 +1631,7 @@ func (ld *loader) load(pkg *loadPkg) { // // pkgTest requires that the imports of pkg have already been loaded (flagged // with pkgImportsLoaded). -func (ld *loader) pkgTest(pkg *loadPkg, testFlags loadPkgFlags) *loadPkg { +func (ld *loader) pkgTest(ctx context.Context, pkg *loadPkg, testFlags loadPkgFlags) *loadPkg { if pkg.isTest() { panic("pkgTest called on a test package") } @@ -1136,7 +1646,7 @@ func (ld *loader) pkgTest(pkg *loadPkg, testFlags loadPkgFlags) *loadPkg { err: pkg.err, inStd: pkg.inStd, } - ld.applyPkgFlags(pkg.test, testFlags) + ld.applyPkgFlags(ctx, pkg.test, testFlags) createdTest = true }) @@ -1151,12 +1661,12 @@ func (ld *loader) pkgTest(pkg *loadPkg, testFlags loadPkgFlags) *loadPkg { if pkg.inStd { path = ld.stdVendor(test.path, path) } - test.imports = append(test.imports, ld.pkg(path, importFlags)) + test.imports = append(test.imports, ld.pkg(ctx, path, importFlags)) } pkg.testImports = nil - ld.applyPkgFlags(test, pkgImportsLoaded) + ld.applyPkgFlags(ctx, test, pkgImportsLoaded) } else { - ld.applyPkgFlags(test, testFlags) + ld.applyPkgFlags(ctx, test, testFlags) } return test @@ -1170,13 +1680,13 @@ func (ld *loader) stdVendor(parentPath, path string) string { } if str.HasPathPrefix(parentPath, "cmd") { - if Target.Path != "cmd" { + if !ld.VendorModulesInGOROOTSrc || Target.Path != "cmd" { vendorPath := pathpkg.Join("cmd", "vendor", path) if _, err := os.Stat(filepath.Join(cfg.GOROOTsrc, filepath.FromSlash(vendorPath))); err == nil { return vendorPath } } - } else if Target.Path != "std" || str.HasPathPrefix(parentPath, "vendor") { + } else if !ld.VendorModulesInGOROOTSrc || Target.Path != "std" || str.HasPathPrefix(parentPath, "vendor") { // If we are outside of the 'std' module, resolve imports from within 'std' // to the vendor directory. // @@ -1211,6 +1721,29 @@ func (ld *loader) computePatternAll() (all []string) { return all } +// checkMultiplePaths verifies that a given module path is used as itself +// or as a replacement for another module, but not both at the same time. +// +// (See https://golang.org/issue/26607 and https://golang.org/issue/34650.) +func (ld *loader) checkMultiplePaths() { + mods := ld.requirements.rootModules + if cached := ld.requirements.graph.Load(); cached != nil { + if mg := cached.(cachedGraph).mg; mg != nil { + mods = mg.BuildList() + } + } + + firstPath := map[module.Version]string{} + for _, mod := range mods { + src := resolveReplacement(mod) + if prev, ok := firstPath[src]; !ok { + firstPath[src] = mod.Path + } else if prev != mod.Path { + ld.errorf("go: %s@%s used for two different module paths (%s and %s)\n", src.Path, src.Version, prev, mod.Path) + } + } +} + // scanDir is like imports.ScanDir but elides known magic imports from the list, // so that we do not go looking for packages that don't really exist. // diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go index c6667d0bf79..bafff3e080e 100644 --- a/src/cmd/go/internal/modload/modfile.go +++ b/src/cmd/go/internal/modload/modfile.go @@ -25,14 +25,53 @@ import ( "golang.org/x/mod/semver" ) -// narrowAllVersionV is the Go version (plus leading "v") at which the -// module-module "all" pattern no longer closes over the dependencies of -// tests outside of the main module. -const narrowAllVersionV = "v1.16" -const go116EnableNarrowAll = true +const ( + // narrowAllVersionV is the Go version (plus leading "v") at which the + // module-module "all" pattern no longer closes over the dependencies of + // tests outside of the main module. + narrowAllVersionV = "v1.16" + + // lazyLoadingVersionV is the Go version (plus leading "v") at which a + // module's go.mod file is expected to list explicit requirements on every + // module that provides any package transitively imported by that module. + lazyLoadingVersionV = "v1.17" +) + +const ( + // go117EnableLazyLoading toggles whether lazy-loading code paths should be + // active. It will be removed once the lazy loading implementation is stable + // and well-tested. + go117EnableLazyLoading = true + + // go1117LazyTODO is a constant that exists only until lazy loading is + // implemented. Its use indicates a condition that will need to change if the + // main module is lazy. + go117LazyTODO = false +) var modFile *modfile.File +// modFileGoVersion returns the (non-empty) Go version at which the requirements +// in modFile are intepreted, or the latest Go version if modFile is nil. +func modFileGoVersion() string { + if modFile == nil { + return latestGoVersion() + } + if modFile.Go == nil || modFile.Go.Version == "" { + // The main module necessarily has a go.mod file, and that file lacks a + // 'go' directive. The 'go' command has been adding that directive + // automatically since Go 1.12, so this module either dates to Go 1.11 or + // has been erroneously hand-edited. + // + // The semantics of the go.mod file are more-or-less the same from Go 1.11 + // through Go 1.16, changing at 1.17 for lazy loading. So even though a + // go.mod file without a 'go' directive is theoretically a Go 1.11 file, + // scripts may assume that it ends up as a Go 1.16 module. + return "1.16" + } + return modFile.Go.Version +} + // A modFileIndex is an index of data corresponding to a modFile // at a specific point in time. type modFileIndex struct { @@ -53,6 +92,24 @@ type requireMeta struct { indirect bool } +// A modDepth indicates which dependencies should be loaded for a go.mod file. +type modDepth uint8 + +const ( + lazy modDepth = iota // load dependencies only as needed + eager // load all transitive dependencies eagerly +) + +func modDepthFromGoVersion(goVersion string) modDepth { + if !go117EnableLazyLoading { + return eager + } + if semver.Compare("v"+goVersion, lazyLoadingVersionV) < 0 { + return eager + } + return lazy +} + // CheckAllowed returns an error equivalent to ErrDisallowed if m is excluded by // the main module's go.mod or retracted by its author. Most version queries use // this to filter out versions that should not be used. @@ -88,76 +145,53 @@ func (e *excludedError) Is(err error) bool { return err == ErrDisallowed } // CheckRetractions returns an error if module m has been retracted by // its author. -func CheckRetractions(ctx context.Context, m module.Version) error { +func CheckRetractions(ctx context.Context, m module.Version) (err error) { + defer func() { + if retractErr := (*ModuleRetractedError)(nil); err == nil || errors.As(err, &retractErr) { + return + } + // Attribute the error to the version being checked, not the version from + // which the retractions were to be loaded. + if mErr := (*module.ModuleError)(nil); errors.As(err, &mErr) { + err = mErr.Err + } + err = &retractionLoadingError{m: m, err: err} + }() + if m.Version == "" { // Main module, standard library, or file replacement module. // Cannot be retracted. return nil } - - // Look up retraction information from the latest available version of - // the module. Cache retraction information so we don't parse the go.mod - // file repeatedly. - type entry struct { - retract []retraction - err error + if repl := Replacement(module.Version{Path: m.Path}); repl.Path != "" { + // All versions of the module were replaced. + // Don't load retractions, since we'd just load the replacement. + return nil } - path := m.Path - e := retractCache.Do(path, func() (v interface{}) { - ctx, span := trace.StartSpan(ctx, "checkRetractions "+path) - defer span.Done() - if repl := Replacement(module.Version{Path: m.Path}); repl.Path != "" { - // All versions of the module were replaced with a local directory. - // Don't load retractions. - return &entry{nil, nil} - } - - // Find the latest version of the module. - // Ignore exclusions from the main module's go.mod. - const ignoreSelected = "" - var allowAll AllowedFunc - rev, err := Query(ctx, path, "latest", ignoreSelected, allowAll) - if err != nil { - return &entry{nil, err} - } - - // Load go.mod for that version. - // If the version is replaced, we'll load retractions from the replacement. - // - // If there's an error loading the go.mod, we'll return it here. - // These errors should generally be ignored by callers of checkRetractions, - // since they happen frequently when we're offline. These errors are not - // equivalent to ErrDisallowed, so they may be distinguished from - // retraction errors. - // - // We load the raw file here: the go.mod file may have a different module - // path that we expect if the module or its repository was renamed. - // We still want to apply retractions to other aliases of the module. - rm := module.Version{Path: path, Version: rev.Version} - if repl := Replacement(rm); repl.Path != "" { - rm = repl - } - summary, err := rawGoModSummary(rm) - if err != nil { - return &entry{nil, err} - } - return &entry{summary.retract, nil} - }).(*entry) - - if err := e.err; err != nil { - // Attribute the error to the version being checked, not the version from - // which the retractions were to be loaded. - var mErr *module.ModuleError - if errors.As(err, &mErr) { - err = mErr.Err - } - return &retractionLoadingError{m: m, err: err} + // Find the latest available version of the module, and load its go.mod. If + // the latest version is replaced, we'll load the replacement. + // + // If there's an error loading the go.mod, we'll return it here. These errors + // should generally be ignored by callers since they happen frequently when + // we're offline. These errors are not equivalent to ErrDisallowed, so they + // may be distinguished from retraction errors. + // + // We load the raw file here: the go.mod file may have a different module + // path that we expect if the module or its repository was renamed. + // We still want to apply retractions to other aliases of the module. + rm, err := queryLatestVersionIgnoringRetractions(ctx, m.Path) + if err != nil { + return err + } + summary, err := rawGoModSummary(rm) + if err != nil { + return err } var rationale []string isRetracted := false - for _, r := range e.retract { + for _, r := range summary.retract { if semver.Compare(r.Low, m.Version) <= 0 && semver.Compare(m.Version, r.High) <= 0 { isRetracted = true if r.Rationale != "" { @@ -171,8 +205,6 @@ func CheckRetractions(ctx context.Context, m module.Version) error { return nil } -var retractCache par.Cache - type ModuleRetractedError struct { Rationale []string } @@ -182,7 +214,7 @@ func (e *ModuleRetractedError) Error() string { if len(e.Rationale) > 0 { // This is meant to be a short error printed on a terminal, so just // print the first rationale. - msg += ": " + ShortRetractionRationale(e.Rationale[0]) + msg += ": " + ShortMessage(e.Rationale[0], "retracted by module author") } return msg } @@ -204,28 +236,67 @@ func (e *retractionLoadingError) Unwrap() error { return e.err } -// ShortRetractionRationale returns a retraction rationale string that is safe -// to print in a terminal. It returns hard-coded strings if the rationale -// is empty, too long, or contains non-printable characters. -func ShortRetractionRationale(rationale string) string { - const maxRationaleBytes = 500 - if i := strings.Index(rationale, "\n"); i >= 0 { - rationale = rationale[:i] +// ShortMessage returns a string from go.mod (for example, a retraction +// rationale or deprecation message) that is safe to print in a terminal. +// +// If the given string is empty, ShortMessage returns the given default. If the +// given string is too long or contains non-printable characters, ShortMessage +// returns a hard-coded string. +func ShortMessage(message, emptyDefault string) string { + const maxLen = 500 + if i := strings.Index(message, "\n"); i >= 0 { + message = message[:i] } - rationale = strings.TrimSpace(rationale) - if rationale == "" { - return "retracted by module author" + message = strings.TrimSpace(message) + if message == "" { + return emptyDefault } - if len(rationale) > maxRationaleBytes { - return "(rationale omitted: too long)" + if len(message) > maxLen { + return "(message omitted: too long)" } - for _, r := range rationale { + for _, r := range message { if !unicode.IsGraphic(r) && !unicode.IsSpace(r) { - return "(rationale omitted: contains non-printable characters)" + return "(message omitted: contains non-printable characters)" } } // NOTE: the go.mod parser rejects invalid UTF-8, so we don't check that here. - return rationale + return message +} + +// CheckDeprecation returns a deprecation message from the go.mod file of the +// latest version of the given module. Deprecation messages are comments +// before or on the same line as the module directives that start with +// "Deprecated:" and run until the end of the paragraph. +// +// CheckDeprecation returns an error if the message can't be loaded. +// CheckDeprecation returns "", nil if there is no deprecation message. +func CheckDeprecation(ctx context.Context, m module.Version) (deprecation string, err error) { + defer func() { + if err != nil { + err = fmt.Errorf("loading deprecation for %s: %w", m.Path, err) + } + }() + + if m.Version == "" { + // Main module, standard library, or file replacement module. + // Don't look up deprecation. + return "", nil + } + if repl := Replacement(module.Version{Path: m.Path}); repl.Path != "" { + // All versions of the module were replaced. + // We'll look up deprecation separately for the replacement. + return "", nil + } + + latest, err := queryLatestVersionIgnoringRetractions(ctx, m.Path) + if err != nil { + return "", err + } + summary, err := rawGoModSummary(latest) + if err != nil { + return "", err + } + return summary.deprecated, nil } // Replacement returns the replacement for mod, if any, from go.mod. @@ -243,6 +314,15 @@ func Replacement(mod module.Version) module.Version { return module.Version{} } +// resolveReplacement returns the module actually used to load the source code +// for m: either m itself, or the replacement for m (iff m is replaced). +func resolveReplacement(m module.Version) module.Version { + if r := Replacement(m); r.Path != "" { + return r + } + return m +} + // indexModFile rebuilds the index of modFile. // If modFile has been changed since it was first read, // modFile.Cleanup must be called before indexModFile. @@ -257,10 +337,13 @@ func indexModFile(data []byte, modFile *modfile.File, needsFix bool) *modFileInd } i.goVersionV = "" - if modFile.Go != nil { + if modFile.Go == nil { + rawGoVersion.Store(Target, "") + } else { // We're going to use the semver package to compare Go versions, so go ahead // and add the "v" prefix it expects once instead of every time. i.goVersionV = "v" + modFile.Go.Version + rawGoVersion.Store(Target, modFile.Go.Version) } i.require = make(map[module.Version]requireMeta, len(modFile.Require)) @@ -292,23 +375,6 @@ func indexModFile(data []byte, modFile *modfile.File, needsFix bool) *modFileInd return i } -// allPatternClosesOverTests reports whether the "all" pattern includes -// dependencies of tests outside the main module (as in Go 1.11–1.15). -// (Otherwise — as in Go 1.16+ — the "all" pattern includes only the packages -// transitively *imported by* the packages and tests in the main module.) -func (i *modFileIndex) allPatternClosesOverTests() bool { - if !go116EnableNarrowAll { - return true - } - if i != nil && semver.Compare(i.goVersionV, narrowAllVersionV) < 0 { - // The module explicitly predates the change in "all" for lazy loading, so - // continue to use the older interpretation. (If i == nil, we not in any - // module at all and should use the latest semantics.) - return true - } - return false -} - // modFileIsDirty reports whether the go.mod file differs meaningfully // from what was indexed. // If modFile has been changed (even cosmetically) since it was first read, @@ -335,7 +401,7 @@ func (i *modFileIndex) modFileIsDirty(modFile *modfile.File) bool { return true } } else if "v"+modFile.Go.Version != i.goVersionV { - if i.goVersionV == "" && cfg.BuildMod == "readonly" { + if i.goVersionV == "" && cfg.BuildMod != "mod" { // go.mod files did not always require a 'go' version, so do not error out // if one is missing — we may be inside an older module in the module // cache, and should bias toward providing useful behavior. @@ -391,9 +457,11 @@ var rawGoVersion sync.Map // map[module.Version]string // module. type modFileSummary struct { module module.Version - goVersionV string // GoVersion with "v" prefix + goVersion string + depth modDepth require []module.Version retract []retraction + deprecated string } // A retraction consists of a retracted version interval and rationale. @@ -433,19 +501,13 @@ func goModSummary(m module.Version) (*modFileSummary, error) { // return the full list of modules from modules.txt. readVendorList() - // TODO(#36876): Load the "go" version from vendor/modules.txt and store it - // in rawGoVersion with the appropriate key. - // We don't know what versions the vendored module actually relies on, // so assume that it requires everything. summary.require = vendorList return summary, nil } - actual := Replacement(m) - if actual.Path == "" { - actual = m - } + actual := resolveReplacement(m) if HasModRoot() && cfg.BuildMod == "readonly" && actual.Version != "" { key := module.Version{Path: actual.Path, Version: actual.Version + "/go.mod"} if !modfetch.HaveSum(key) { @@ -561,10 +623,14 @@ func rawGoModSummary(m module.Version) (*modFileSummary, error) { if f.Module != nil { summary.module = f.Module.Mod + summary.deprecated = f.Module.Deprecated } if f.Go != nil && f.Go.Version != "" { rawGoVersion.LoadOrStore(m, f.Go.Version) - summary.goVersionV = "v" + f.Go.Version + summary.goVersion = f.Go.Version + summary.depth = modDepthFromGoVersion(f.Go.Version) + } else { + summary.depth = eager } if len(f.Require) > 0 { summary.require = make([]module.Version, 0, len(f.Require)) @@ -589,3 +655,47 @@ func rawGoModSummary(m module.Version) (*modFileSummary, error) { } var rawGoModSummaryCache par.Cache // module.Version → rawGoModSummary result + +// queryLatestVersionIgnoringRetractions looks up the latest version of the +// module with the given path without considering retracted or excluded +// versions. +// +// If all versions of the module are replaced, +// queryLatestVersionIgnoringRetractions returns the replacement without making +// a query. +// +// If the queried latest version is replaced, +// queryLatestVersionIgnoringRetractions returns the replacement. +func queryLatestVersionIgnoringRetractions(ctx context.Context, path string) (latest module.Version, err error) { + type entry struct { + latest module.Version + err error + } + e := latestVersionIgnoringRetractionsCache.Do(path, func() interface{} { + ctx, span := trace.StartSpan(ctx, "queryLatestVersionIgnoringRetractions "+path) + defer span.Done() + + if repl := Replacement(module.Version{Path: path}); repl.Path != "" { + // All versions of the module were replaced. + // No need to query. + return &entry{latest: repl} + } + + // Find the latest version of the module. + // Ignore exclusions from the main module's go.mod. + const ignoreSelected = "" + var allowAll AllowedFunc + rev, err := Query(ctx, path, "latest", ignoreSelected, allowAll) + if err != nil { + return &entry{err: err} + } + latest := module.Version{Path: path, Version: rev.Version} + if repl := resolveReplacement(latest); repl.Path != "" { + latest = repl + } + return &entry{latest: latest} + }).(*entry) + return e.latest, e.err +} + +var latestVersionIgnoringRetractionsCache par.Cache // path → queryLatestVersionIgnoringRetractions result diff --git a/src/cmd/go/internal/modload/mvs.go b/src/cmd/go/internal/modload/mvs.go index 31015194f9a..87619b4ace6 100644 --- a/src/cmd/go/internal/modload/mvs.go +++ b/src/cmd/go/internal/modload/mvs.go @@ -16,17 +16,36 @@ import ( "golang.org/x/mod/semver" ) +// cmpVersion implements the comparison for versions in the module loader. +// +// It is consistent with semver.Compare except that as a special case, +// the version "" is considered higher than all other versions. +// The main module (also known as the target) has no version and must be chosen +// over other versions of the same module in the module dependency graph. +func cmpVersion(v1, v2 string) int { + if v2 == "" { + if v1 == "" { + return 0 + } + return -1 + } + if v1 == "" { + return 1 + } + return semver.Compare(v1, v2) +} + // mvsReqs implements mvs.Reqs for module semantic versions, // with any exclusions or replacements applied internally. type mvsReqs struct { - buildList []module.Version + roots []module.Version } func (r *mvsReqs) Required(mod module.Version) ([]module.Version, error) { if mod == Target { // Use the build list as it existed when r was constructed, not the current // global build list. - return r.buildList[1:], nil + return r.roots, nil } if mod.Version == "none" { @@ -47,7 +66,7 @@ func (r *mvsReqs) Required(mod module.Version) ([]module.Version, error) { // be chosen over other versions of the same module in the module dependency // graph. func (*mvsReqs) Max(v1, v2 string) string { - if v1 != "" && (v2 == "" || semver.Compare(v1, v2) == -1) { + if cmpVersion(v1, v2) < 0 { return v2 } return v1 @@ -86,12 +105,12 @@ func versions(ctx context.Context, path string, allowed AllowedFunc) ([]string, return versions, err } -// Previous returns the tagged version of m.Path immediately prior to +// previousVersion returns the tagged version of m.Path immediately prior to // m.Version, or version "none" if no prior version is tagged. // // Since the version of Target is not found in the version list, // it has no previous version. -func (*mvsReqs) Previous(m module.Version) (module.Version, error) { +func previousVersion(m module.Version) (module.Version, error) { // TODO(golang.org/issue/38714): thread tracing context through MVS. if m == Target { @@ -111,3 +130,7 @@ func (*mvsReqs) Previous(m module.Version) (module.Version, error) { } return module.Version{Path: m.Path, Version: "none"}, nil } + +func (*mvsReqs) Previous(m module.Version) (module.Version, error) { + return previousVersion(m) +} diff --git a/src/cmd/go/internal/modload/query.go b/src/cmd/go/internal/modload/query.go index 8affd179bb2..6f6c6e8c98d 100644 --- a/src/cmd/go/internal/modload/query.go +++ b/src/cmd/go/internal/modload/query.go @@ -177,7 +177,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed return nil, err } - if (query == "upgrade" || query == "patch") && modfetch.IsPseudoVersion(current) && !rev.Time.IsZero() { + if (query == "upgrade" || query == "patch") && module.IsPseudoVersion(current) && !rev.Time.IsZero() { // Don't allow "upgrade" or "patch" to move from a pseudo-version // to a chronologically older version or pseudo-version. // @@ -196,7 +196,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed // newer but v1.1.0 is still an “upgrade”; or v1.0.2 might be a revert of // an unsuccessful fix in v1.0.1, in which case the v1.0.2 commit may be // older than the v1.0.1 commit despite the tag itself being newer.) - currentTime, err := modfetch.PseudoVersionTime(current) + currentTime, err := module.PseudoVersionTime(current) if err == nil && rev.Time.Before(currentTime) { if err := allowed(ctx, module.Version{Path: path, Version: current}); errors.Is(err, ErrDisallowed) { return nil, err @@ -325,18 +325,18 @@ func newQueryMatcher(path string, query, current string, allowed AllowedFunc) (* if current == "" || current == "none" { qm.mayUseLatest = true } else { - qm.mayUseLatest = modfetch.IsPseudoVersion(current) + qm.mayUseLatest = module.IsPseudoVersion(current) qm.filter = func(mv string) bool { return semver.Compare(mv, current) >= 0 } } case query == "patch": - if current == "none" { + if current == "" || current == "none" { return nil, &NoPatchBaseError{path} } if current == "" { qm.mayUseLatest = true } else { - qm.mayUseLatest = modfetch.IsPseudoVersion(current) + qm.mayUseLatest = module.IsPseudoVersion(current) qm.prefix = semver.MajorMinor(current) + "." qm.filter = func(mv string) bool { return semver.Compare(mv, current) >= 0 } } @@ -695,7 +695,9 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin // modulePrefixesExcludingTarget returns all prefixes of path that may plausibly // exist as a module, excluding targetPrefix but otherwise including path -// itself, sorted by descending length. +// itself, sorted by descending length. Prefixes that are not valid module paths +// but are valid package paths (like "m" or "example.com/.gen") are included, +// since they might be replaced. func modulePrefixesExcludingTarget(path string) []string { prefixes := make([]string, 0, strings.Count(path, "/")+1) @@ -747,6 +749,7 @@ func queryPrefixModules(ctx context.Context, candidateModules []string, queryMod noPackage *PackageNotInModuleError noVersion *NoMatchingVersionError noPatchBase *NoPatchBaseError + invalidPath *module.InvalidPathError // see comment in case below notExistErr error ) for _, r := range results { @@ -767,6 +770,17 @@ func queryPrefixModules(ctx context.Context, candidateModules []string, queryMod if noPatchBase == nil { noPatchBase = rErr } + case *module.InvalidPathError: + // The prefix was not a valid module path, and there was no replacement. + // Prefixes like this may appear in candidateModules, since we handle + // replaced modules that weren't required in the repo lookup process + // (see lookupRepo). + // + // A shorter prefix may be a valid module path and may contain a valid + // import path, so this is a low-priority error. + if invalidPath == nil { + invalidPath = rErr + } default: if errors.Is(rErr, fs.ErrNotExist) { if notExistErr == nil { @@ -800,6 +814,8 @@ func queryPrefixModules(ctx context.Context, candidateModules []string, queryMod err = noVersion case noPatchBase != nil: err = noPatchBase + case invalidPath != nil: + err = invalidPath case notExistErr != nil: err = notExistErr default: @@ -993,7 +1009,7 @@ func (rr *replacementRepo) Versions(prefix string) ([]string, error) { if index != nil && len(index.replace) > 0 { path := rr.ModulePath() for m, _ := range index.replace { - if m.Path == path && strings.HasPrefix(m.Version, prefix) && m.Version != "" && !modfetch.IsPseudoVersion(m.Version) { + if m.Path == path && strings.HasPrefix(m.Version, prefix) && m.Version != "" && !module.IsPseudoVersion(m.Version) { versions = append(versions, m.Version) } } @@ -1050,9 +1066,9 @@ func (rr *replacementRepo) Latest() (*modfetch.RevInfo, error) { // used from within some other module, the user will be able to upgrade // the requirement to any real version they choose. if _, pathMajor, ok := module.SplitPathVersion(path); ok && len(pathMajor) > 0 { - v = modfetch.PseudoVersion(pathMajor[1:], "", time.Time{}, "000000000000") + v = module.PseudoVersion(pathMajor[1:], "", time.Time{}, "000000000000") } else { - v = modfetch.PseudoVersion("v0", "", time.Time{}, "000000000000") + v = module.PseudoVersion("v0", "", time.Time{}, "000000000000") } } @@ -1067,9 +1083,9 @@ func (rr *replacementRepo) Latest() (*modfetch.RevInfo, error) { func (rr *replacementRepo) replacementStat(v string) (*modfetch.RevInfo, error) { rev := &modfetch.RevInfo{Version: v} - if modfetch.IsPseudoVersion(v) { - rev.Time, _ = modfetch.PseudoVersionTime(v) - rev.Short, _ = modfetch.PseudoVersionRev(v) + if module.IsPseudoVersion(v) { + rev.Time, _ = module.PseudoVersionTime(v) + rev.Short, _ = module.PseudoVersionRev(v) } return rev, nil } diff --git a/src/cmd/go/internal/modload/query_test.go b/src/cmd/go/internal/modload/query_test.go index e225a0e71e7..a3f2f84505a 100644 --- a/src/cmd/go/internal/modload/query_test.go +++ b/src/cmd/go/internal/modload/query_test.go @@ -106,7 +106,7 @@ var queryTests = []struct { {path: queryRepo, query: "v1.9.10-pre2+metadata", vers: "v1.9.10-pre2.0.20190513201126-42abcb6df8ee"}, {path: queryRepo, query: "ed5ffdaa", vers: "v1.9.10-pre2.0.20191220134614-ed5ffdaa1f5e"}, - // golang.org/issue/29262: The major version for for a module without a suffix + // golang.org/issue/29262: The major version for a module without a suffix // should be based on the most recent tag (v1 as appropriate, not v0 // unconditionally). {path: queryRepo, query: "42abcb6df8ee", vers: "v1.9.10-pre2.0.20190513201126-42abcb6df8ee"}, @@ -122,7 +122,7 @@ var queryTests = []struct { {path: queryRepo, query: "upgrade", allow: "NOMATCH", err: `no matching versions for query "upgrade"`}, {path: queryRepo, query: "upgrade", current: "v1.9.9", allow: "NOMATCH", err: `vcs-test.golang.org/git/querytest.git@v1.9.9: disallowed module version`}, {path: queryRepo, query: "upgrade", current: "v1.99.99", err: `vcs-test.golang.org/git/querytest.git@v1.99.99: invalid version: unknown revision v1.99.99`}, - {path: queryRepo, query: "patch", current: "", vers: "v1.9.9"}, + {path: queryRepo, query: "patch", current: "", err: `can't query version "patch" of module vcs-test.golang.org/git/querytest.git: no existing version is required`}, {path: queryRepo, query: "patch", current: "v0.1.0", vers: "v0.1.2"}, {path: queryRepo, query: "patch", current: "v1.9.0", vers: "v1.9.9"}, {path: queryRepo, query: "patch", current: "v1.9.10-pre1", vers: "v1.9.10-pre1"}, diff --git a/src/cmd/go/internal/modload/search.go b/src/cmd/go/internal/modload/search.go index 1fe742dc97d..658fc6f55a9 100644 --- a/src/cmd/go/internal/modload/search.go +++ b/src/cmd/go/internal/modload/search.go @@ -86,7 +86,7 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f } if !fi.IsDir() { - if fi.Mode()&fs.ModeSymlink != 0 && want { + if fi.Mode()&fs.ModeSymlink != 0 && want && strings.Contains(m.Pattern(), "...") { if target, err := fsys.Stat(path); err == nil && target.IsDir() { fmt.Fprintf(os.Stderr, "warning: ignoring symlink %s\n", path) } @@ -187,7 +187,7 @@ func MatchInModule(ctx context.Context, pattern string, m module.Version, tags m matchPackages(ctx, match, tags, includeStd, nil) } - LoadModFile(ctx) + LoadModFile(ctx) // Sets Target, needed by fetch and matchPackages. if !match.IsLiteral() { matchPackages(ctx, match, tags, omitStd, []module.Version{m}) diff --git a/src/cmd/go/internal/modload/stat_openfile.go b/src/cmd/go/internal/modload/stat_openfile.go index 5842b858f0b..368f8931984 100644 --- a/src/cmd/go/internal/modload/stat_openfile.go +++ b/src/cmd/go/internal/modload/stat_openfile.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (js && wasm) || plan9 // +build js,wasm plan9 // On plan9, per http://9p.io/magic/man2html/2/access: “Since file permissions diff --git a/src/cmd/go/internal/modload/stat_unix.go b/src/cmd/go/internal/modload/stat_unix.go index f49278ec3a8..e079d739902 100644 --- a/src/cmd/go/internal/modload/stat_unix.go +++ b/src/cmd/go/internal/modload/stat_unix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package modload diff --git a/src/cmd/go/internal/modload/stat_windows.go b/src/cmd/go/internal/modload/stat_windows.go index 0ac23913475..825e60b27af 100644 --- a/src/cmd/go/internal/modload/stat_windows.go +++ b/src/cmd/go/internal/modload/stat_windows.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build windows // +build windows package modload diff --git a/src/cmd/go/internal/modload/vendor.go b/src/cmd/go/internal/modload/vendor.go index d8fd91f1fea..80713b0812e 100644 --- a/src/cmd/go/internal/modload/vendor.go +++ b/src/cmd/go/internal/modload/vendor.go @@ -31,6 +31,7 @@ var ( type vendorMetadata struct { Explicit bool Replacement module.Version + GoVersion string } // readVendorList reads the list of vendored modules from vendor/modules.txt. @@ -104,6 +105,10 @@ func readVendorList() { if entry == "explicit" { meta.Explicit = true } + if strings.HasPrefix(entry, "go ") { + meta.GoVersion = strings.TrimPrefix(entry, "go ") + rawGoVersion.Store(mod, meta.GoVersion) + } // All other tokens are reserved for future use. } vendorMeta[mod] = meta diff --git a/src/cmd/go/internal/mvs/errors.go b/src/cmd/go/internal/mvs/errors.go index 5564965fb51..bf183cea9e8 100644 --- a/src/cmd/go/internal/mvs/errors.go +++ b/src/cmd/go/internal/mvs/errors.go @@ -31,13 +31,15 @@ type buildListErrorElem struct { // occurred at a module found along the given path of requirements and/or // upgrades, which must be non-empty. // -// The isUpgrade function reports whether a path step is due to an upgrade. -// A nil isUpgrade function indicates that none of the path steps are due to upgrades. -func NewBuildListError(err error, path []module.Version, isUpgrade func(from, to module.Version) bool) *BuildListError { +// The isVersionChange function reports whether a path step is due to an +// explicit upgrade or downgrade (as opposed to an existing requirement in a +// go.mod file). A nil isVersionChange function indicates that none of the path +// steps are due to explicit version changes. +func NewBuildListError(err error, path []module.Version, isVersionChange func(from, to module.Version) bool) *BuildListError { stack := make([]buildListErrorElem, 0, len(path)) for len(path) > 1 { reason := "requires" - if isUpgrade != nil && isUpgrade(path[0], path[1]) { + if isVersionChange != nil && isVersionChange(path[0], path[1]) { reason = "updating to" } stack = append(stack, buildListErrorElem{ diff --git a/src/cmd/go/internal/mvs/graph.go b/src/cmd/go/internal/mvs/graph.go new file mode 100644 index 00000000000..c5de4866bf4 --- /dev/null +++ b/src/cmd/go/internal/mvs/graph.go @@ -0,0 +1,223 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mvs + +import ( + "fmt" + + "golang.org/x/mod/module" +) + +// Graph implements an incremental version of the MVS algorithm, with the +// requirements pushed by the caller instead of pulled by the MVS traversal. +type Graph struct { + cmp func(v1, v2 string) int + roots []module.Version + + required map[module.Version][]module.Version + + isRoot map[module.Version]bool // contains true for roots and false for reachable non-roots + selected map[string]string // path → version +} + +// NewGraph returns an incremental MVS graph containing only a set of root +// dependencies and using the given max function for version strings. +// +// The caller must ensure that the root slice is not modified while the Graph +// may be in use. +func NewGraph(cmp func(v1, v2 string) int, roots []module.Version) *Graph { + g := &Graph{ + cmp: cmp, + roots: roots[:len(roots):len(roots)], + required: make(map[module.Version][]module.Version), + isRoot: make(map[module.Version]bool), + selected: make(map[string]string), + } + + for _, m := range roots { + g.isRoot[m] = true + if g.cmp(g.Selected(m.Path), m.Version) < 0 { + g.selected[m.Path] = m.Version + } + } + + return g +} + +// Require adds the information that module m requires all modules in reqs. +// The reqs slice must not be modified after it is passed to Require. +// +// m must be reachable by some existing chain of requirements from g's target, +// and Require must not have been called for it already. +// +// If any of the modules in reqs has the same path as g's target, +// the target must have higher precedence than the version in req. +func (g *Graph) Require(m module.Version, reqs []module.Version) { + // To help catch disconnected-graph bugs, enforce that all required versions + // are actually reachable from the roots (and therefore should affect the + // selected versions of the modules they name). + if _, reachable := g.isRoot[m]; !reachable { + panic(fmt.Sprintf("%v is not reachable from any root", m)) + } + + // Truncate reqs to its capacity to avoid aliasing bugs if it is later + // returned from RequiredBy and appended to. + reqs = reqs[:len(reqs):len(reqs)] + + if _, dup := g.required[m]; dup { + panic(fmt.Sprintf("requirements of %v have already been set", m)) + } + g.required[m] = reqs + + for _, dep := range reqs { + // Mark dep reachable, regardless of whether it is selected. + if _, ok := g.isRoot[dep]; !ok { + g.isRoot[dep] = false + } + + if g.cmp(g.Selected(dep.Path), dep.Version) < 0 { + g.selected[dep.Path] = dep.Version + } + } +} + +// RequiredBy returns the slice of requirements passed to Require for m, if any, +// with its capacity reduced to its length. +// If Require has not been called for m, RequiredBy(m) returns ok=false. +// +// The caller must not modify the returned slice, but may safely append to it +// and may rely on it not to be modified. +func (g *Graph) RequiredBy(m module.Version) (reqs []module.Version, ok bool) { + reqs, ok = g.required[m] + return reqs, ok +} + +// Selected returns the selected version of the given module path. +// +// If no version is selected, Selected returns version "none". +func (g *Graph) Selected(path string) (version string) { + v, ok := g.selected[path] + if !ok { + return "none" + } + return v +} + +// BuildList returns the selected versions of all modules present in the Graph, +// beginning with the selected versions of each module path in the roots of g. +// +// The order of the remaining elements in the list is deterministic +// but arbitrary. +func (g *Graph) BuildList() []module.Version { + seenRoot := make(map[string]bool, len(g.roots)) + + var list []module.Version + for _, r := range g.roots { + if seenRoot[r.Path] { + // Multiple copies of the same root, with the same or different versions, + // are a bit of a degenerate case: we will take the transitive + // requirements of both roots into account, but only the higher one can + // possibly be selected. However — especially given that we need the + // seenRoot map for later anyway — it is simpler to support this + // degenerate case than to forbid it. + continue + } + + if v := g.Selected(r.Path); v != "none" { + list = append(list, module.Version{Path: r.Path, Version: v}) + } + seenRoot[r.Path] = true + } + uniqueRoots := list + + for path, version := range g.selected { + if !seenRoot[path] { + list = append(list, module.Version{Path: path, Version: version}) + } + } + module.Sort(list[len(uniqueRoots):]) + + return list +} + +// WalkBreadthFirst invokes f once, in breadth-first order, for each module +// version other than "none" that appears in the graph, regardless of whether +// that version is selected. +func (g *Graph) WalkBreadthFirst(f func(m module.Version)) { + var queue []module.Version + enqueued := make(map[module.Version]bool) + for _, m := range g.roots { + if m.Version != "none" { + queue = append(queue, m) + enqueued[m] = true + } + } + + for len(queue) > 0 { + m := queue[0] + queue = queue[1:] + + f(m) + + reqs, _ := g.RequiredBy(m) + for _, r := range reqs { + if !enqueued[r] && r.Version != "none" { + queue = append(queue, r) + enqueued[r] = true + } + } + } +} + +// FindPath reports a shortest requirement path starting at one of the roots of +// the graph and ending at a module version m for which f(m) returns true, or +// nil if no such path exists. +func (g *Graph) FindPath(f func(module.Version) bool) []module.Version { + // firstRequires[a] = b means that in a breadth-first traversal of the + // requirement graph, the module version a was first required by b. + firstRequires := make(map[module.Version]module.Version) + + queue := g.roots + for _, m := range g.roots { + firstRequires[m] = module.Version{} + } + + for len(queue) > 0 { + m := queue[0] + queue = queue[1:] + + if f(m) { + // Construct the path reversed (because we're starting from the far + // endpoint), then reverse it. + path := []module.Version{m} + for { + m = firstRequires[m] + if m.Path == "" { + break + } + path = append(path, m) + } + + i, j := 0, len(path)-1 + for i < j { + path[i], path[j] = path[j], path[i] + i++ + j-- + } + + return path + } + + reqs, _ := g.RequiredBy(m) + for _, r := range reqs { + if _, seen := firstRequires[r]; !seen { + queue = append(queue, r) + firstRequires[r] = m + } + } + } + + return nil +} diff --git a/src/cmd/go/internal/mvs/mvs.go b/src/cmd/go/internal/mvs/mvs.go index ff2c5f963cd..6969f90f2e6 100644 --- a/src/cmd/go/internal/mvs/mvs.go +++ b/src/cmd/go/internal/mvs/mvs.go @@ -10,7 +10,6 @@ import ( "fmt" "sort" "sync" - "sync/atomic" "cmd/go/internal/par" @@ -91,151 +90,91 @@ func BuildList(target module.Version, reqs Reqs) ([]module.Version, error) { } func buildList(target module.Version, reqs Reqs, upgrade func(module.Version) (module.Version, error)) ([]module.Version, error) { - // Explore work graph in parallel in case reqs.Required - // does high-latency network operations. - type modGraphNode struct { - m module.Version - required []module.Version - upgrade module.Version - err error - } - var ( - mu sync.Mutex - modGraph = map[module.Version]*modGraphNode{} - min = map[string]string{} // maps module path to minimum required version - haveErr int32 - ) - setErr := func(n *modGraphNode, err error) { - n.err = err - atomic.StoreInt32(&haveErr, 1) + cmp := func(v1, v2 string) int { + if reqs.Max(v1, v2) != v1 { + return -1 + } + if reqs.Max(v2, v1) != v2 { + return 1 + } + return 0 } + var ( + mu sync.Mutex + g = NewGraph(cmp, []module.Version{target}) + upgrades = map[module.Version]module.Version{} + errs = map[module.Version]error{} // (non-nil errors only) + ) + + // Explore work graph in parallel in case reqs.Required + // does high-latency network operations. var work par.Work work.Add(target) work.Do(10, func(item interface{}) { m := item.(module.Version) - node := &modGraphNode{m: m} - mu.Lock() - modGraph[m] = node + var required []module.Version + var err error if m.Version != "none" { - if v, ok := min[m.Path]; !ok || reqs.Max(v, m.Version) != v { - min[m.Path] = m.Version + required, err = reqs.Required(m) + } + + u := m + if upgrade != nil { + upgradeTo, upErr := upgrade(m) + if upErr == nil { + u = upgradeTo + } else if err == nil { + err = upErr } } + + mu.Lock() + if err != nil { + errs[m] = err + } + if u != m { + upgrades[m] = u + required = append([]module.Version{u}, required...) + } + g.Require(m, required) mu.Unlock() - if m.Version != "none" { - required, err := reqs.Required(m) - if err != nil { - setErr(node, err) - return - } - node.required = required - for _, r := range node.required { - work.Add(r) - } - } - - if upgrade != nil { - u, err := upgrade(m) - if err != nil { - setErr(node, err) - return - } - if u != m { - node.upgrade = u - work.Add(u) - } + for _, r := range required { + work.Add(r) } }) // If there was an error, find the shortest path from the target to the // node where the error occurred so we can report a useful error message. - if haveErr != 0 { - // neededBy[a] = b means a was added to the module graph by b. - neededBy := make(map[*modGraphNode]*modGraphNode) - q := make([]*modGraphNode, 0, len(modGraph)) - q = append(q, modGraph[target]) - for len(q) > 0 { - node := q[0] - q = q[1:] - - if node.err != nil { - pathUpgrade := map[module.Version]module.Version{} - - // Construct the error path reversed (from the error to the main module), - // then reverse it to obtain the usual order (from the main module to - // the error). - errPath := []module.Version{node.m} - for n, prev := neededBy[node], node; n != nil; n, prev = neededBy[n], n { - if n.upgrade == prev.m { - pathUpgrade[n.m] = prev.m - } - errPath = append(errPath, n.m) - } - i, j := 0, len(errPath)-1 - for i < j { - errPath[i], errPath[j] = errPath[j], errPath[i] - i++ - j-- - } - - isUpgrade := func(from, to module.Version) bool { - return pathUpgrade[from] == to - } - - return nil, NewBuildListError(node.err, errPath, isUpgrade) - } - - neighbors := node.required - if node.upgrade.Path != "" { - neighbors = append(neighbors, node.upgrade) - } - for _, neighbor := range neighbors { - nn := modGraph[neighbor] - if neededBy[nn] != nil { - continue - } - neededBy[nn] = node - q = append(q, nn) - } + if len(errs) > 0 { + errPath := g.FindPath(func(m module.Version) bool { + return errs[m] != nil + }) + if len(errPath) == 0 { + panic("internal error: could not reconstruct path to module with error") } + + err := errs[errPath[len(errPath)-1]] + isUpgrade := func(from, to module.Version) bool { + if u, ok := upgrades[from]; ok { + return u == to + } + return false + } + return nil, NewBuildListError(err.(error), errPath, isUpgrade) } // The final list is the minimum version of each module found in the graph. - - if v := min[target.Path]; v != target.Version { + list := g.BuildList() + if v := list[0]; v != target { // target.Version will be "" for modload, the main client of MVS. // "" denotes the main module, which has no version. However, MVS treats // version strings as opaque, so "" is not a special value here. // See golang.org/issue/31491, golang.org/issue/29773. - panic(fmt.Sprintf("mistake: chose version %q instead of target %+v", v, target)) // TODO: Don't panic. + panic(fmt.Sprintf("mistake: chose version %q instead of target %+v", v, target)) } - - list := []module.Version{target} - for path, vers := range min { - if path != target.Path { - list = append(list, module.Version{Path: path, Version: vers}) - } - - n := modGraph[module.Version{Path: path, Version: vers}] - required := n.required - for _, r := range required { - if r.Version == "none" { - continue - } - v := min[r.Path] - if r.Path != target.Path && reqs.Max(v, r.Version) != v { - panic(fmt.Sprintf("mistake: version %q does not satisfy requirement %+v", v, r)) // TODO: Don't panic. - } - } - } - - tail := list[1:] - sort.Slice(tail, func(i, j int) bool { - return tail[i].Path < tail[j].Path - }) return list, nil } @@ -492,6 +431,41 @@ List: downgraded = append(downgraded, r) } + // The downgrades we computed above only downgrade to versions enumerated by + // reqs.Previous. However, reqs.Previous omits some versions — such as + // pseudo-versions and retracted versions — that may be selected as transitive + // requirements of other modules. + // + // If one of those requirements pulls the version back up above the version + // identified by reqs.Previous, then the transitive dependencies of that that + // initially-downgraded version should no longer matter — in particular, we + // should not add new dependencies on module paths that nothing else in the + // updated module graph even requires. + // + // In order to eliminate those spurious dependencies, we recompute the build + // list with the actual versions of the downgraded modules as selected by MVS, + // instead of our initial downgrades. + // (See the downhiddenartifact and downhiddencross test cases). + actual, err := BuildList(target, &override{ + target: target, + list: downgraded, + Reqs: reqs, + }) + if err != nil { + return nil, err + } + actualVersion := make(map[string]string, len(actual)) + for _, m := range actual { + actualVersion[m.Path] = m.Version + } + + downgraded = downgraded[:0] + for _, m := range list { + if v, ok := actualVersion[m.Path]; ok { + downgraded = append(downgraded, module.Version{Path: m.Path, Version: v}) + } + } + return BuildList(target, &override{ target: target, list: downgraded, diff --git a/src/cmd/go/internal/mvs/mvs_test.go b/src/cmd/go/internal/mvs/mvs_test.go index 742e396e0dc..598ed666889 100644 --- a/src/cmd/go/internal/mvs/mvs_test.go +++ b/src/cmd/go/internal/mvs/mvs_test.go @@ -275,6 +275,60 @@ B1: build A: A B2 downgrade A B1: A B1 +# Both B3 and C2 require D2. +# If we downgrade D to D1, then in isolation B3 would downgrade to B1, +# because B2 is hidden — B1 is the next-highest version that is not hidden. +# However, if we downgrade D, we will also downgrade C to C1. +# And C1 requires B2.hidden, and B2.hidden also meets our requirements: +# it is compatible with D1 and a strict downgrade from B3. +# +# Since neither the initial nor the final build list includes B1, +# and the nothing in the final downgraded build list requires E at all, +# no dependency on E1 (required by only B1) should be introduced. +# +name: downhiddenartifact +A: B3 C2 +A1: B3 +B1: E1 +B2.hidden: +B3: D2 +C1: B2.hidden +C2: D2 +D1: +D2: +build A1: A1 B3 D2 +downgrade A1 D1: A1 B1 D1 E1 +build A: A B3 C2 D2 +downgrade A D1: A B2.hidden C1 D1 + +# Both B3 and C3 require D2. +# If we downgrade D to D1, then in isolation B3 would downgrade to B1, +# and C3 would downgrade to C1. +# But C1 requires B2.hidden, and B1 requires C2.hidden, so we can't +# downgrade to either of those without pulling the other back up a little. +# +# B2.hidden and C2.hidden are both compatible with D1, so that still +# meets our requirements — but then we're in an odd state in which +# B and C have both been downgraded to hidden versions, without any +# remaining requirements to explain how those hidden versions got there. +# +# TODO(bcmills): Would it be better to force downgrades to land on non-hidden +# versions? +# In this case, that would remove the dependencies on B and C entirely. +# +name: downhiddencross +A: B3 C3 +B1: C2.hidden +B2.hidden: +B3: D2 +C1: B2.hidden +C2.hidden: +C3: D2 +D1: +D2: +build A: A B3 C3 D2 +downgrade A D1: A B2.hidden C2.hidden D1 + # golang.org/issue/25542. name: noprev1 A: B4 C2 diff --git a/src/cmd/go/internal/renameio/renameio.go b/src/cmd/go/internal/renameio/renameio.go deleted file mode 100644 index 9788171d6e2..00000000000 --- a/src/cmd/go/internal/renameio/renameio.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package renameio writes files atomically by renaming temporary files. -package renameio - -import ( - "bytes" - "io" - "io/fs" - "math/rand" - "os" - "path/filepath" - "strconv" - - "cmd/go/internal/robustio" -) - -const patternSuffix = ".tmp" - -// Pattern returns a glob pattern that matches the unrenamed temporary files -// created when writing to filename. -func Pattern(filename string) string { - return filepath.Join(filepath.Dir(filename), filepath.Base(filename)+patternSuffix) -} - -// WriteFile is like os.WriteFile, but first writes data to an arbitrary -// file in the same directory as filename, then renames it atomically to the -// final name. -// -// That ensures that the final location, if it exists, is always a complete file. -func WriteFile(filename string, data []byte, perm fs.FileMode) (err error) { - return WriteToFile(filename, bytes.NewReader(data), perm) -} - -// WriteToFile is a variant of WriteFile that accepts the data as an io.Reader -// instead of a slice. -func WriteToFile(filename string, data io.Reader, perm fs.FileMode) (err error) { - f, err := tempFile(filepath.Dir(filename), filepath.Base(filename), perm) - if err != nil { - return err - } - defer func() { - // Only call os.Remove on f.Name() if we failed to rename it: otherwise, - // some other process may have created a new file with the same name after - // that. - if err != nil { - f.Close() - os.Remove(f.Name()) - } - }() - - if _, err := io.Copy(f, data); err != nil { - return err - } - // Sync the file before renaming it: otherwise, after a crash the reader may - // observe a 0-length file instead of the actual contents. - // See https://golang.org/issue/22397#issuecomment-380831736. - if err := f.Sync(); err != nil { - return err - } - if err := f.Close(); err != nil { - return err - } - - return robustio.Rename(f.Name(), filename) -} - -// ReadFile is like os.ReadFile, but on Windows retries spurious errors that -// may occur if the file is concurrently replaced. -// -// Errors are classified heuristically and retries are bounded, so even this -// function may occasionally return a spurious error on Windows. -// If so, the error will likely wrap one of: -// - syscall.ERROR_ACCESS_DENIED -// - syscall.ERROR_FILE_NOT_FOUND -// - internal/syscall/windows.ERROR_SHARING_VIOLATION -func ReadFile(filename string) ([]byte, error) { - return robustio.ReadFile(filename) -} - -// tempFile creates a new temporary file with given permission bits. -func tempFile(dir, prefix string, perm fs.FileMode) (f *os.File, err error) { - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+strconv.Itoa(rand.Intn(1000000000))+patternSuffix) - f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm) - if os.IsExist(err) { - continue - } - break - } - return -} diff --git a/src/cmd/go/internal/renameio/renameio_test.go b/src/cmd/go/internal/renameio/renameio_test.go deleted file mode 100644 index 5b2ed836242..00000000000 --- a/src/cmd/go/internal/renameio/renameio_test.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9 - -package renameio - -import ( - "encoding/binary" - "errors" - "internal/testenv" - "math/rand" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - "sync/atomic" - "syscall" - "testing" - "time" - - "cmd/go/internal/robustio" -) - -func TestConcurrentReadsAndWrites(t *testing.T) { - if runtime.GOOS == "darwin" && strings.HasSuffix(testenv.Builder(), "-10_14") { - testenv.SkipFlaky(t, 33041) - } - - dir, err := os.MkdirTemp("", "renameio") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - path := filepath.Join(dir, "blob.bin") - - const chunkWords = 8 << 10 - buf := make([]byte, 2*chunkWords*8) - for i := uint64(0); i < 2*chunkWords; i++ { - binary.LittleEndian.PutUint64(buf[i*8:], i) - } - - var attempts int64 = 128 - if !testing.Short() { - attempts *= 16 - } - const parallel = 32 - - var sem = make(chan bool, parallel) - - var ( - writeSuccesses, readSuccesses int64 // atomic - writeErrnoSeen, readErrnoSeen sync.Map - ) - - for n := attempts; n > 0; n-- { - sem <- true - go func() { - defer func() { <-sem }() - - time.Sleep(time.Duration(rand.Intn(100)) * time.Microsecond) - offset := rand.Intn(chunkWords) - chunk := buf[offset*8 : (offset+chunkWords)*8] - if err := WriteFile(path, chunk, 0666); err == nil { - atomic.AddInt64(&writeSuccesses, 1) - } else if robustio.IsEphemeralError(err) { - var ( - errno syscall.Errno - dup bool - ) - if errors.As(err, &errno) { - _, dup = writeErrnoSeen.LoadOrStore(errno, true) - } - if !dup { - t.Logf("ephemeral error: %v", err) - } - } else { - t.Errorf("unexpected error: %v", err) - } - - time.Sleep(time.Duration(rand.Intn(100)) * time.Microsecond) - data, err := ReadFile(path) - if err == nil { - atomic.AddInt64(&readSuccesses, 1) - } else if robustio.IsEphemeralError(err) { - var ( - errno syscall.Errno - dup bool - ) - if errors.As(err, &errno) { - _, dup = readErrnoSeen.LoadOrStore(errno, true) - } - if !dup { - t.Logf("ephemeral error: %v", err) - } - return - } else { - t.Errorf("unexpected error: %v", err) - return - } - - if len(data) != 8*chunkWords { - t.Errorf("read %d bytes, but each write is a %d-byte file", len(data), 8*chunkWords) - return - } - - u := binary.LittleEndian.Uint64(data) - for i := 1; i < chunkWords; i++ { - next := binary.LittleEndian.Uint64(data[i*8:]) - if next != u+1 { - t.Errorf("wrote sequential integers, but read integer out of sequence at offset %d", i) - return - } - u = next - } - }() - } - - for n := parallel; n > 0; n-- { - sem <- true - } - - var minWriteSuccesses int64 = attempts - if runtime.GOOS == "windows" { - // Windows produces frequent "Access is denied" errors under heavy rename load. - // As long as those are the only errors and *some* of the writes succeed, we're happy. - minWriteSuccesses = attempts / 4 - } - - if writeSuccesses < minWriteSuccesses { - t.Errorf("%d (of %d) writes succeeded; want ≥ %d", writeSuccesses, attempts, minWriteSuccesses) - } else { - t.Logf("%d (of %d) writes succeeded (ok: ≥ %d)", writeSuccesses, attempts, minWriteSuccesses) - } - - var minReadSuccesses int64 = attempts - - switch runtime.GOOS { - case "windows": - // Windows produces frequent "Access is denied" errors under heavy rename load. - // As long as those are the only errors and *some* of the reads succeed, we're happy. - minReadSuccesses = attempts / 4 - - case "darwin", "ios": - // The filesystem on certain versions of macOS (10.14) and iOS (affected - // versions TBD) occasionally fail with "no such file or directory" errors. - // See https://golang.org/issue/33041 and https://golang.org/issue/42066. - // The flake rate is fairly low, so ensure that at least 75% of attempts - // succeed. - minReadSuccesses = attempts - (attempts / 4) - } - - if readSuccesses < minReadSuccesses { - t.Errorf("%d (of %d) reads succeeded; want ≥ %d", readSuccesses, attempts, minReadSuccesses) - } else { - t.Logf("%d (of %d) reads succeeded (ok: ≥ %d)", readSuccesses, attempts, minReadSuccesses) - } -} diff --git a/src/cmd/go/internal/renameio/umask_test.go b/src/cmd/go/internal/renameio/umask_test.go deleted file mode 100644 index 65e4fa587b7..00000000000 --- a/src/cmd/go/internal/renameio/umask_test.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9,!windows,!js - -package renameio - -import ( - "io/fs" - "os" - "path/filepath" - "syscall" - "testing" -) - -func TestWriteFileModeAppliesUmask(t *testing.T) { - dir, err := os.MkdirTemp("", "renameio") - if err != nil { - t.Fatalf("Failed to create temporary directory: %v", err) - } - defer os.RemoveAll(dir) - - const mode = 0644 - const umask = 0007 - defer syscall.Umask(syscall.Umask(umask)) - - file := filepath.Join(dir, "testWrite") - err = WriteFile(file, []byte("go-build"), mode) - if err != nil { - t.Fatalf("Failed to write file: %v", err) - } - - fi, err := os.Stat(file) - if err != nil { - t.Fatalf("Stat %q (looking for mode %#o): %s", file, mode, err) - } - - if fi.Mode()&fs.ModePerm != 0640 { - t.Errorf("Stat %q: mode %#o want %#o", file, fi.Mode()&fs.ModePerm, 0640) - } -} diff --git a/src/cmd/go/internal/robustio/robustio_flaky.go b/src/cmd/go/internal/robustio/robustio_flaky.go index 5bd44bd3453..d5c241857b4 100644 --- a/src/cmd/go/internal/robustio/robustio_flaky.go +++ b/src/cmd/go/internal/robustio/robustio_flaky.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build windows || darwin // +build windows darwin package robustio diff --git a/src/cmd/go/internal/robustio/robustio_other.go b/src/cmd/go/internal/robustio/robustio_other.go index 6fe7b7e4e4e..3a20cac6cf8 100644 --- a/src/cmd/go/internal/robustio/robustio_other.go +++ b/src/cmd/go/internal/robustio/robustio_other.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !windows && !darwin // +build !windows,!darwin package robustio diff --git a/src/cmd/go/internal/run/run.go b/src/cmd/go/internal/run/run.go index 99578b244c8..784f7162dfd 100644 --- a/src/cmd/go/internal/run/run.go +++ b/src/cmd/go/internal/run/run.go @@ -8,13 +8,16 @@ package run import ( "context" "fmt" + "go/build" "os" "path" + "path/filepath" "strings" "cmd/go/internal/base" "cmd/go/internal/cfg" "cmd/go/internal/load" + "cmd/go/internal/modload" "cmd/go/internal/str" "cmd/go/internal/work" ) @@ -24,10 +27,21 @@ var CmdRun = &base.Command{ Short: "compile and run Go program", Long: ` Run compiles and runs the named main Go package. -Typically the package is specified as a list of .go source files from a single directory, -but it may also be an import path, file system path, or pattern +Typically the package is specified as a list of .go source files from a single +directory, but it may also be an import path, file system path, or pattern matching a single known package, as in 'go run .' or 'go run my/cmd'. +If the package argument has a version suffix (like @latest or @v1.0.0), +"go run" builds the program in module-aware mode, ignoring the go.mod file in +the current directory or any parent directory, if there is one. This is useful +for running programs without affecting the dependencies of the main module. + +If the package argument doesn't have a version suffix, "go run" may run in +module-aware mode or GOPATH mode, depending on the GO111MODULE environment +variable and the presence of a go.mod file. See 'go help modules' for details. +If module-aware mode is enabled, "go run" runs in the context of the main +module. + By default, 'go run' runs the compiled binary directly: 'a.out arguments...'. If the -exec flag is given, 'go run' invokes the binary using xprog: 'xprog a.out arguments...'. @@ -59,14 +73,26 @@ func printStderr(args ...interface{}) (int, error) { } func runRun(ctx context.Context, cmd *base.Command, args []string) { + if shouldUseOutsideModuleMode(args) { + // Set global module flags for 'go run cmd@version'. + // This must be done before modload.Init, but we need to call work.BuildInit + // before loading packages, since it affects package locations, e.g., + // for -race and -msan. + modload.ForceUseModules = true + modload.RootMode = modload.NoRoot + modload.AllowMissingModuleImports() + modload.Init() + } work.BuildInit() var b work.Builder b.Init() b.Print = printStderr + i := 0 for i < len(args) && strings.HasSuffix(args[i], ".go") { i++ } + pkgOpts := load.PackageOpts{MainOnly: true} var p *load.Package if i > 0 { files := args[:i] @@ -77,18 +103,29 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) { base.Fatalf("go run: cannot run *_test.go files (%s)", file) } } - p = load.GoFilesPackage(ctx, files) + p = load.GoFilesPackage(ctx, pkgOpts, files) } else if len(args) > 0 && !strings.HasPrefix(args[0], "-") { - pkgs := load.PackagesAndErrors(ctx, args[:1]) + arg := args[0] + var pkgs []*load.Package + if strings.Contains(arg, "@") && !build.IsLocalImport(arg) && !filepath.IsAbs(arg) { + var err error + pkgs, err = load.PackagesAndErrorsOutsideModule(ctx, pkgOpts, args[:1]) + if err != nil { + base.Fatalf("go run: %v", err) + } + } else { + pkgs = load.PackagesAndErrors(ctx, pkgOpts, args[:1]) + } + if len(pkgs) == 0 { - base.Fatalf("go run: no packages loaded from %s", args[0]) + base.Fatalf("go run: no packages loaded from %s", arg) } if len(pkgs) > 1 { var names []string for _, p := range pkgs { names = append(names, p.ImportPath) } - base.Fatalf("go run: pattern %s matches multiple packages:\n\t%s", args[0], strings.Join(names, "\n\t")) + base.Fatalf("go run: pattern %s matches multiple packages:\n\t%s", arg, strings.Join(names, "\n\t")) } p = pkgs[0] i++ @@ -96,28 +133,9 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) { base.Fatalf("go run: no go files listed") } cmdArgs := args[i:] - if p.Error != nil { - base.Fatalf("%s", p.Error) - } + load.CheckPackageErrors([]*load.Package{p}) p.Internal.OmitDebug = true - if len(p.DepsErrors) > 0 { - // Since these are errors in dependencies, - // the same error might show up multiple times, - // once in each package that depends on it. - // Only print each once. - printed := map[*load.PackageError]bool{} - for _, err := range p.DepsErrors { - if !printed[err] { - printed[err] = true - base.Errorf("%s", err) - } - } - } - base.ExitIfErrors() - if p.Name != "main" { - base.Fatalf("go run: cannot run non-main package") - } p.Target = "" // must build - not up to date if p.Internal.CmdlineFiles { //set executable name if go file is given as cmd-argument @@ -139,11 +157,34 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) { } else { p.Internal.ExeName = path.Base(p.ImportPath) } + a1 := b.LinkAction(work.ModeBuild, work.ModeBuild, p) a := &work.Action{Mode: "go run", Func: buildRunProgram, Args: cmdArgs, Deps: []*work.Action{a1}} b.Do(ctx, a) } +// shouldUseOutsideModuleMode returns whether 'go run' will load packages in +// module-aware mode, ignoring the go.mod file in the current directory. It +// returns true if the first argument contains "@", does not begin with "-" +// (resembling a flag) or end with ".go" (a file). The argument must not be a +// local or absolute file path. +// +// These rules are slightly different than other commands. Whether or not +// 'go run' uses this mode, it interprets arguments ending with ".go" as files +// and uses arguments up to the last ".go" argument to comprise the package. +// If there are no ".go" arguments, only the first argument is interpreted +// as a package path, since there can be only one package. +func shouldUseOutsideModuleMode(args []string) bool { + // NOTE: "@" not allowed in import paths, but it is allowed in non-canonical + // versions. + return len(args) > 0 && + !strings.HasSuffix(args[0], ".go") && + !strings.HasPrefix(args[0], "-") && + strings.Contains(args[0], "@") && + !build.IsLocalImport(args[0]) && + !filepath.IsAbs(args[0]) +} + // buildRunProgram is the action for running a binary that has already // been compiled. We ignore exit status. func buildRunProgram(b *work.Builder, ctx context.Context, a *work.Action) error { diff --git a/src/cmd/go/internal/search/search.go b/src/cmd/go/internal/search/search.go index 18738cf59ec..a0c806a2593 100644 --- a/src/cmd/go/internal/search/search.go +++ b/src/cmd/go/internal/search/search.go @@ -155,7 +155,7 @@ func (m *Match) MatchPackages() { } if !fi.IsDir() { - if fi.Mode()&fs.ModeSymlink != 0 && want { + if fi.Mode()&fs.ModeSymlink != 0 && want && strings.Contains(m.pattern, "...") { if target, err := fsys.Stat(path); err == nil && target.IsDir() { fmt.Fprintf(os.Stderr, "warning: ignoring symlink %s\n", path) } @@ -445,7 +445,7 @@ func ImportPathsQuiet(patterns []string) []*Match { for i, dir := range m.Dirs { absDir := dir if !filepath.IsAbs(dir) { - absDir = filepath.Join(base.Cwd, dir) + absDir = filepath.Join(base.Cwd(), dir) } if bp, _ := cfg.BuildContext.ImportDir(absDir, build.FindOnly); bp.ImportPath != "" && bp.ImportPath != "." { m.Pkgs[i] = bp.ImportPath @@ -571,7 +571,6 @@ func IsRelativePath(pattern string) bool { // If so, InDir returns an equivalent path relative to dir. // If not, InDir returns an empty string. // InDir makes some effort to succeed even in the presence of symbolic links. -// TODO(rsc): Replace internal/test.inDir with a call to this function for Go 1.12. func InDir(path, dir string) string { if rel := inDirLex(path, dir); rel != "" { return rel diff --git a/src/cmd/go/internal/test/cover.go b/src/cmd/go/internal/test/cover.go index 9841791552d..657d22a6b4d 100644 --- a/src/cmd/go/internal/test/cover.go +++ b/src/cmd/go/internal/test/cover.go @@ -26,8 +26,8 @@ func initCoverProfile() { if testCoverProfile == "" || testC { return } - if !filepath.IsAbs(testCoverProfile) && testOutputDir != "" { - testCoverProfile = filepath.Join(testOutputDir, testCoverProfile) + if !filepath.IsAbs(testCoverProfile) { + testCoverProfile = filepath.Join(testOutputDir.getAbs(), testCoverProfile) } // No mutex - caller's responsibility to call with no racing goroutines. diff --git a/src/cmd/go/internal/test/flagdefs.go b/src/cmd/go/internal/test/flagdefs.go index 8a0a07683b7..37ac81c2678 100644 --- a/src/cmd/go/internal/test/flagdefs.go +++ b/src/cmd/go/internal/test/flagdefs.go @@ -28,6 +28,7 @@ var passFlagToTest = map[string]bool{ "parallel": true, "run": true, "short": true, + "shuffle": true, "timeout": true, "trace": true, "v": true, diff --git a/src/cmd/go/internal/test/genflags.go b/src/cmd/go/internal/test/genflags.go index 30334b0f305..9277de7fee8 100644 --- a/src/cmd/go/internal/test/genflags.go +++ b/src/cmd/go/internal/test/genflags.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build ignore // +build ignore package main diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go index 7fc9e8fbdcd..59ea1ef5445 100644 --- a/src/cmd/go/internal/test/test.go +++ b/src/cmd/go/internal/test/test.go @@ -29,6 +29,7 @@ import ( "cmd/go/internal/cfg" "cmd/go/internal/load" "cmd/go/internal/lockedfile" + "cmd/go/internal/search" "cmd/go/internal/str" "cmd/go/internal/trace" "cmd/go/internal/work" @@ -117,8 +118,8 @@ elapsed time in the summary line. The rule for a match in the cache is that the run involves the same test binary and the flags on the command line come entirely from a -restricted set of 'cacheable' test flags, defined as -cpu, -list, --parallel, -run, -short, and -v. If a run of go test has any test +restricted set of 'cacheable' test flags, defined as -benchtime, -cpu, +-list, -parallel, -run, -short, and -v. If a run of go test has any test or non-test flags outside this set, the result is not cached. To disable test caching, use any test flag or argument other than the cacheable flags. The idiomatic way to disable test caching explicitly @@ -271,6 +272,13 @@ control the execution of any test: the Go tree can run a sanity check but not spend time running exhaustive tests. + -shuffle off,on,N + Randomize the execution order of tests and benchmarks. + It is off by default. If -shuffle is set to on, then it will seed + the randomizer using the system clock. If -shuffle is set to an + integer N, then N will be used as the seed value. In both cases, + the seed will be reported for reproducibility. + -timeout d If a test binary runs longer than duration d, panic. If d is 0, the timeout is disabled. @@ -478,7 +486,8 @@ var ( testJSON bool // -json flag testList string // -list flag testO string // -o flag - testOutputDir = base.Cwd // -outputdir flag + testOutputDir outputdirFlag // -outputdir flag + testShuffle shuffleFlag // -shuffle flag testTimeout time.Duration // -timeout flag testV bool // -v flag testVet = vetFlag{flags: defaultVetFlags} // -vet flag @@ -568,8 +577,6 @@ var defaultVetFlags = []string{ } func runTest(ctx context.Context, cmd *base.Command, args []string) { - load.ModResolveTests = true - pkgArgs, testArgs = testFlags(args) if cfg.DebugTrace != "" { @@ -595,7 +602,8 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { work.VetFlags = testVet.flags work.VetExplicit = testVet.explicit - pkgs = load.PackagesAndErrors(ctx, pkgArgs) + pkgOpts := load.PackageOpts{ModResolveTests: true} + pkgs = load.PackagesAndErrors(ctx, pkgOpts, pkgArgs) load.CheckPackageErrors(pkgs) if len(pkgs) == 0 { base.Fatalf("no packages to test") @@ -679,7 +687,7 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { sort.Strings(all) a := &work.Action{Mode: "go test -i"} - pkgs := load.PackagesAndErrors(ctx, all) + pkgs := load.PackagesAndErrors(ctx, pkgOpts, all) load.CheckPackageErrors(pkgs) for _, p := range pkgs { if cfg.BuildToolchainName == "gccgo" && p.Standard { @@ -702,11 +710,11 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { match := make([]func(*load.Package) bool, len(testCoverPaths)) matched := make([]bool, len(testCoverPaths)) for i := range testCoverPaths { - match[i] = load.MatchPackage(testCoverPaths[i], base.Cwd) + match[i] = load.MatchPackage(testCoverPaths[i], base.Cwd()) } // Select for coverage all dependencies matching the testCoverPaths patterns. - for _, p := range load.TestPackageList(ctx, pkgs) { + for _, p := range load.TestPackageList(ctx, pkgOpts, pkgs) { haveMatch := false for i := range testCoverPaths { if match[i](p) { @@ -715,6 +723,12 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { } } + // A package which only has test files can't be imported + // as a dependency, nor can it be instrumented for coverage. + if len(p.GoFiles)+len(p.CgoFiles) == 0 { + continue + } + // Silently ignore attempts to run coverage on // sync/atomic when using atomic coverage mode. // Atomic coverage mode uses sync/atomic, so @@ -768,7 +782,7 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { ensureImport(p, "sync/atomic") } - buildTest, runTest, printTest, err := builderTest(&b, ctx, p) + buildTest, runTest, printTest, err := builderTest(&b, ctx, pkgOpts, p) if err != nil { str := err.Error() str = strings.TrimPrefix(str, "\n") @@ -835,7 +849,7 @@ var windowsBadWords = []string{ "update", } -func builderTest(b *work.Builder, ctx context.Context, p *load.Package) (buildAction, runAction, printAction *work.Action, err error) { +func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts, p *load.Package) (buildAction, runAction, printAction *work.Action, err error) { if len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 { build := b.CompileAction(work.ModeBuild, work.ModeBuild, p) run := &work.Action{Mode: "test run", Package: p, Deps: []*work.Action{build}} @@ -858,7 +872,7 @@ func builderTest(b *work.Builder, ctx context.Context, p *load.Package) (buildAc DeclVars: declareCoverVars, } } - pmain, ptest, pxtest, err := load.TestPackagesFor(ctx, p, cover) + pmain, ptest, pxtest, err := load.TestPackagesFor(ctx, pkgOpts, p, cover) if err != nil { return nil, nil, nil, err } @@ -931,11 +945,11 @@ func builderTest(b *work.Builder, ctx context.Context, p *load.Package) (buildAc var installAction, cleanAction *work.Action if testC || testNeedBinary() { // -c or profiling flag: create action to copy binary to ./test.out. - target := filepath.Join(base.Cwd, testBinary+cfg.ExeSuffix) + target := filepath.Join(base.Cwd(), testBinary+cfg.ExeSuffix) if testO != "" { target = testO if !filepath.IsAbs(target) { - target = filepath.Join(base.Cwd, target) + target = filepath.Join(base.Cwd(), target) } } if target == os.DevNull { @@ -1326,7 +1340,8 @@ func (c *runCache) tryCacheWithID(b *work.Builder, a *work.Action, id string) bo return false } switch arg[:i] { - case "-test.cpu", + case "-test.benchtime", + "-test.cpu", "-test.list", "-test.parallel", "-test.run", @@ -1499,7 +1514,7 @@ func computeTestInputsID(a *work.Action, testlog []byte) (cache.ActionID, error) if !filepath.IsAbs(name) { name = filepath.Join(pwd, name) } - if a.Package.Root == "" || !inDir(name, a.Package.Root) { + if a.Package.Root == "" || search.InDir(name, a.Package.Root) == "" { // Do not recheck files outside the module, GOPATH, or GOROOT root. break } @@ -1508,7 +1523,7 @@ func computeTestInputsID(a *work.Action, testlog []byte) (cache.ActionID, error) if !filepath.IsAbs(name) { name = filepath.Join(pwd, name) } - if a.Package.Root == "" || !inDir(name, a.Package.Root) { + if a.Package.Root == "" || search.InDir(name, a.Package.Root) == "" { // Do not recheck files outside the module, GOPATH, or GOROOT root. break } @@ -1526,18 +1541,6 @@ func computeTestInputsID(a *work.Action, testlog []byte) (cache.ActionID, error) return sum, nil } -func inDir(path, dir string) bool { - if str.HasFilePathPrefix(path, dir) { - return true - } - xpath, err1 := filepath.EvalSymlinks(path) - xdir, err2 := filepath.EvalSymlinks(dir) - if err1 == nil && err2 == nil && str.HasFilePathPrefix(xpath, xdir) { - return true - } - return false -} - func hashGetenv(name string) cache.ActionID { h := cache.NewHash("getenv") v, ok := os.LookupEnv(name) diff --git a/src/cmd/go/internal/test/testflag.go b/src/cmd/go/internal/test/testflag.go index 10e6604da5f..08f1efa2c0d 100644 --- a/src/cmd/go/internal/test/testflag.go +++ b/src/cmd/go/internal/test/testflag.go @@ -10,6 +10,7 @@ import ( "fmt" "os" "path/filepath" + "strconv" "strings" "time" @@ -61,15 +62,16 @@ func init() { cf.String("memprofilerate", "", "") cf.StringVar(&testMutexProfile, "mutexprofile", "", "") cf.String("mutexprofilefraction", "", "") - cf.Var(outputdirFlag{&testOutputDir}, "outputdir", "") + cf.Var(&testOutputDir, "outputdir", "") cf.Int("parallel", 0, "") cf.String("run", "", "") cf.Bool("short", false, "") cf.DurationVar(&testTimeout, "timeout", 10*time.Minute, "") cf.StringVar(&testTrace, "trace", "", "") cf.BoolVar(&testV, "v", false, "") + cf.Var(&testShuffle, "shuffle", "") - for name, _ := range passFlagToTest { + for name := range passFlagToTest { cf.Var(cf.Lookup(name).Value, "test."+name, "") } } @@ -126,19 +128,26 @@ func (f stringFlag) Set(value string) error { // outputdirFlag implements the -outputdir flag. // It interprets an empty value as the working directory of the 'go' command. type outputdirFlag struct { - resolved *string + abs string } -func (f outputdirFlag) String() string { return *f.resolved } -func (f outputdirFlag) Set(value string) (err error) { +func (f *outputdirFlag) String() string { + return f.abs +} +func (f *outputdirFlag) Set(value string) (err error) { if value == "" { - // The empty string implies the working directory of the 'go' command. - *f.resolved = base.Cwd + f.abs = "" } else { - *f.resolved, err = filepath.Abs(value) + f.abs, err = filepath.Abs(value) } return err } +func (f *outputdirFlag) getAbs() string { + if f.abs == "" { + return base.Cwd() + } + return f.abs +} // vetFlag implements the special parsing logic for the -vet flag: // a comma-separated list, with a distinguished value "off" and @@ -194,6 +203,41 @@ func (f *vetFlag) Set(value string) error { return nil } +type shuffleFlag struct { + on bool + seed *int64 +} + +func (f *shuffleFlag) String() string { + if !f.on { + return "off" + } + if f.seed == nil { + return "on" + } + return fmt.Sprintf("%d", *f.seed) +} + +func (f *shuffleFlag) Set(value string) error { + if value == "off" { + *f = shuffleFlag{on: false} + return nil + } + + if value == "on" { + *f = shuffleFlag{on: true} + return nil + } + + seed, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return fmt.Errorf(`-shuffle argument must be "on", "off", or an int64: %v`, err) + } + + *f = shuffleFlag{on: true, seed: &seed} + return nil +} + // testFlags processes the command line, grabbing -x and -c, rewriting known flags // to have "test" before them, and reading the command line for the test binary. // Unfortunately for us, we need to do our own flag processing because go test @@ -367,7 +411,7 @@ func testFlags(args []string) (packageNames, passToTest []string) { // command. Set it explicitly if it is needed due to some other flag that // requests output. if testProfile() != "" && !outputDirSet { - injectedFlags = append(injectedFlags, "-test.outputdir="+testOutputDir) + injectedFlags = append(injectedFlags, "-test.outputdir="+testOutputDir.getAbs()) } // If the user is explicitly passing -help or -h, show output diff --git a/src/cmd/go/internal/vcs/vcs.go b/src/cmd/go/internal/vcs/vcs.go index 9feffe07656..91485f6f745 100644 --- a/src/cmd/go/internal/vcs/vcs.go +++ b/src/cmd/go/internal/vcs/vcs.go @@ -1176,7 +1176,7 @@ func expand(match map[string]string, s string) string { // and import paths referring to a fully-qualified importPath // containing a VCS type (foo.com/repo.git/dir) var vcsPaths = []*vcsPath{ - // Github + // GitHub { pathPrefix: "github.com", regexp: lazyregexp.New(`^(?Pgithub\.com/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(/[A-Za-z0-9_.\-]+)*$`), diff --git a/src/cmd/go/internal/vet/vet.go b/src/cmd/go/internal/vet/vet.go index 4257c90c97a..1d419dddb98 100644 --- a/src/cmd/go/internal/vet/vet.go +++ b/src/cmd/go/internal/vet/vet.go @@ -53,8 +53,6 @@ See also: go fmt, go fix. } func runVet(ctx context.Context, cmd *base.Command, args []string) { - load.ModResolveTests = true - vetFlags, pkgArgs := vetFlags(args) if cfg.DebugTrace != "" { @@ -87,7 +85,8 @@ func runVet(ctx context.Context, cmd *base.Command, args []string) { } } - pkgs := load.PackagesAndErrors(ctx, pkgArgs) + pkgOpts := load.PackageOpts{ModResolveTests: true} + pkgs := load.PackagesAndErrors(ctx, pkgOpts, pkgArgs) load.CheckPackageErrors(pkgs) if len(pkgs) == 0 { base.Fatalf("no packages to vet") @@ -98,7 +97,7 @@ func runVet(ctx context.Context, cmd *base.Command, args []string) { root := &work.Action{Mode: "go vet"} for _, p := range pkgs { - _, ptest, pxtest, err := load.TestPackagesFor(ctx, p, nil) + _, ptest, pxtest, err := load.TestPackagesFor(ctx, pkgOpts, p, nil) if err != nil { base.Errorf("%v", err) continue diff --git a/src/cmd/go/internal/web/bootstrap.go b/src/cmd/go/internal/web/bootstrap.go index 781702100a0..08686cdfcf9 100644 --- a/src/cmd/go/internal/web/bootstrap.go +++ b/src/cmd/go/internal/web/bootstrap.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build cmd_go_bootstrap // +build cmd_go_bootstrap // This code is compiled only into the bootstrap 'go' binary. diff --git a/src/cmd/go/internal/web/http.go b/src/cmd/go/internal/web/http.go index 72fa2b2ca6a..f177278eba1 100644 --- a/src/cmd/go/internal/web/http.go +++ b/src/cmd/go/internal/web/http.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !cmd_go_bootstrap // +build !cmd_go_bootstrap // This code is compiled into the real 'go' binary, but it is not @@ -27,7 +28,7 @@ import ( "cmd/internal/browser" ) -// impatientInsecureHTTPClient is used in -insecure mode, +// impatientInsecureHTTPClient is used with GOINSECURE, // when we're connecting to https servers that might not be there // or might be using self-signed certificates. var impatientInsecureHTTPClient = &http.Client{ diff --git a/src/cmd/go/internal/web/url_other.go b/src/cmd/go/internal/web/url_other.go index 2641ee62bfa..453af402b43 100644 --- a/src/cmd/go/internal/web/url_other.go +++ b/src/cmd/go/internal/web/url_other.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !windows // +build !windows package web diff --git a/src/cmd/go/internal/web/url_other_test.go b/src/cmd/go/internal/web/url_other_test.go index aa5663355ef..4d6ed2ec7f8 100644 --- a/src/cmd/go/internal/web/url_other_test.go +++ b/src/cmd/go/internal/web/url_other_test.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !windows // +build !windows package web diff --git a/src/cmd/go/internal/work/action.go b/src/cmd/go/internal/work/action.go index 9d141ae233d..69940cb0015 100644 --- a/src/cmd/go/internal/work/action.go +++ b/src/cmd/go/internal/work/action.go @@ -344,7 +344,7 @@ func readpkglist(shlibpath string) (pkgs []*load.Package) { if strings.HasPrefix(t, "pkgpath ") { t = strings.TrimPrefix(t, "pkgpath ") t = strings.TrimSuffix(t, ";") - pkgs = append(pkgs, load.LoadImportWithFlags(t, base.Cwd, nil, &stk, nil, 0)) + pkgs = append(pkgs, load.LoadImportWithFlags(t, base.Cwd(), nil, &stk, nil, 0)) } } } else { @@ -355,7 +355,7 @@ func readpkglist(shlibpath string) (pkgs []*load.Package) { scanner := bufio.NewScanner(bytes.NewBuffer(pkglistbytes)) for scanner.Scan() { t := scanner.Text() - pkgs = append(pkgs, load.LoadImportWithFlags(t, base.Cwd, nil, &stk, nil, 0)) + pkgs = append(pkgs, load.LoadImportWithFlags(t, base.Cwd(), nil, &stk, nil, 0)) } } return @@ -776,7 +776,7 @@ func (b *Builder) linkSharedAction(mode, depMode BuildMode, shlib string, a1 *Ac } } var stk load.ImportStack - p := load.LoadImportWithFlags(pkg, base.Cwd, nil, &stk, nil, 0) + p := load.LoadImportWithFlags(pkg, base.Cwd(), nil, &stk, nil, 0) if p.Error != nil { base.Fatalf("load %s: %v", pkg, p.Error) } diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go index 0e7af6d33f5..1babbda8899 100644 --- a/src/cmd/go/internal/work/build.go +++ b/src/cmd/go/internal/work/build.go @@ -10,9 +10,7 @@ import ( "fmt" "go/build" exec "internal/execabs" - "internal/goroot" "os" - "path" "path/filepath" "runtime" "strings" @@ -21,13 +19,9 @@ import ( "cmd/go/internal/cfg" "cmd/go/internal/fsys" "cmd/go/internal/load" - "cmd/go/internal/modfetch" "cmd/go/internal/modload" "cmd/go/internal/search" "cmd/go/internal/trace" - - "golang.org/x/mod/modfile" - "golang.org/x/mod/module" ) var CmdBuild = &base.Command{ @@ -158,6 +152,8 @@ and test commands: a program to use to invoke toolchain programs like vet and asm. For example, instead of running asm, the go command will run 'cmd args /path/to/asm '. + The TOOLEXEC_IMPORTPATH environment variable will be set, + matching 'go list -f {{.ImportPath}}' for the package being built. The -asmflags, -gccgoflags, -gcflags, and -ldflags flags accept a space-separated list of arguments to pass to an underlying tool @@ -372,7 +368,7 @@ func runBuild(ctx context.Context, cmd *base.Command, args []string) { var b Builder b.Init() - pkgs := load.PackagesAndErrors(ctx, args) + pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{}, args) load.CheckPackageErrors(pkgs) explicitO := len(cfg.BuildO) > 0 @@ -482,18 +478,22 @@ To eliminate ambiguity about which module versions are used in the build, the arguments must satisfy the following constraints: - Arguments must be package paths or package patterns (with "..." wildcards). - They must not be standard packages (like fmt), meta-patterns (std, cmd, - all), or relative or absolute file paths. +They must not be standard packages (like fmt), meta-patterns (std, cmd, +all), or relative or absolute file paths. + - All arguments must have the same version suffix. Different queries are not - allowed, even if they refer to the same version. +allowed, even if they refer to the same version. + - All arguments must refer to packages in the same module at the same version. + - No module is considered the "main" module. If the module containing - packages named on the command line has a go.mod file, it must not contain - directives (replace and exclude) that would cause it to be interpreted - differently than if it were the main module. The module must not require - a higher version of itself. +packages named on the command line has a go.mod file, it must not contain +directives (replace and exclude) that would cause it to be interpreted +differently than if it were the main module. The module must not require +a higher version of itself. + - Package path arguments must refer to main packages. Pattern arguments - will only match main packages. +will only match main packages. If the arguments don't have version suffixes, "go install" may run in module-aware mode or GOPATH mode, depending on the GO111MODULE environment @@ -588,7 +588,7 @@ func runInstall(ctx context.Context, cmd *base.Command, args []string) { } BuildInit() - pkgs := load.PackagesAndErrors(ctx, args) + pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{}, args) if cfg.ModulesEnabled && !modload.HasModRoot() { haveErrors := false allMissingErrors := true @@ -758,145 +758,27 @@ func installOutsideModule(ctx context.Context, args []string) { modload.RootMode = modload.NoRoot modload.AllowMissingModuleImports() modload.Init() - - // Check that the arguments satisfy syntactic constraints. - var version string - for _, arg := range args { - if i := strings.Index(arg, "@"); i >= 0 { - version = arg[i+1:] - if version == "" { - base.Fatalf("go install %s: version must not be empty", arg) - } - break - } - } - patterns := make([]string, len(args)) - for i, arg := range args { - if !strings.HasSuffix(arg, "@"+version) { - base.Errorf("go install %s: all arguments must have the same version (@%s)", arg, version) - continue - } - p := arg[:len(arg)-len(version)-1] - switch { - case build.IsLocalImport(p): - base.Errorf("go install %s: argument must be a package path, not a relative path", arg) - case filepath.IsAbs(p): - base.Errorf("go install %s: argument must be a package path, not an absolute path", arg) - case search.IsMetaPackage(p): - base.Errorf("go install %s: argument must be a package path, not a meta-package", arg) - case path.Clean(p) != p: - base.Errorf("go install %s: argument must be a clean package path", arg) - case !strings.Contains(p, "...") && search.IsStandardImportPath(p) && goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, p): - base.Errorf("go install %s: argument must not be a package in the standard library", arg) - default: - patterns[i] = p - } - } - base.ExitIfErrors() BuildInit() - // Query the module providing the first argument, load its go.mod file, and - // check that it doesn't contain directives that would cause it to be - // interpreted differently if it were the main module. - // - // If multiple modules match the first argument, accept the longest match - // (first result). It's possible this module won't provide packages named by - // later arguments, and other modules would. Let's not try to be too - // magical though. - allowed := modload.CheckAllowed - if modload.IsRevisionQuery(version) { - // Don't check for retractions if a specific revision is requested. - allowed = nil - } - noneSelected := func(path string) (version string) { return "none" } - qrs, err := modload.QueryPackages(ctx, patterns[0], version, noneSelected, allowed) - if err != nil { - base.Fatalf("go install %s: %v", args[0], err) - } - installMod := qrs[0].Mod - data, err := modfetch.GoMod(installMod.Path, installMod.Version) - if err != nil { - base.Fatalf("go install %s: %v", args[0], err) - } - f, err := modfile.Parse("go.mod", data, nil) - if err != nil { - base.Fatalf("go install %s: %s: %v", args[0], installMod, err) - } - directiveFmt := "go install %s: %s\n" + - "\tThe go.mod file for the module providing named packages contains one or\n" + - "\tmore %s directives. It must not contain directives that would cause\n" + - "\tit to be interpreted differently than if it were the main module." - if len(f.Replace) > 0 { - base.Fatalf(directiveFmt, args[0], installMod, "replace") - } - if len(f.Exclude) > 0 { - base.Fatalf(directiveFmt, args[0], installMod, "exclude") - } - - // Since we are in NoRoot mode, the build list initially contains only - // the dummy command-line-arguments module. Add a requirement on the - // module that provides the packages named on the command line. - if err := modload.EditBuildList(ctx, nil, []module.Version{installMod}); err != nil { - base.Fatalf("go install %s: %v", args[0], err) - } - - // Load packages for all arguments. Ignore non-main packages. + // Load packages. Ignore non-main packages. // Print a warning if an argument contains "..." and matches no main packages. // PackagesAndErrors already prints warnings for patterns that don't match any // packages, so be careful not to double print. - matchers := make([]func(string) bool, len(patterns)) - for i, p := range patterns { - if strings.Contains(p, "...") { - matchers[i] = search.MatchPattern(p) - } - } - // TODO(golang.org/issue/40276): don't report errors loading non-main packages // matched by a pattern. - pkgs := load.PackagesAndErrors(ctx, patterns) + pkgOpts := load.PackageOpts{MainOnly: true} + pkgs, err := load.PackagesAndErrorsOutsideModule(ctx, pkgOpts, args) + if err != nil { + base.Fatalf("go install: %v", err) + } load.CheckPackageErrors(pkgs) - mainPkgs := make([]*load.Package, 0, len(pkgs)) - mainCount := make([]int, len(patterns)) - nonMainCount := make([]int, len(patterns)) - for _, pkg := range pkgs { - if pkg.Name == "main" { - mainPkgs = append(mainPkgs, pkg) - for i := range patterns { - if matchers[i] != nil && matchers[i](pkg.ImportPath) { - mainCount[i]++ - } - } - } else { - for i := range patterns { - if matchers[i] == nil && patterns[i] == pkg.ImportPath { - base.Errorf("go install: package %s is not a main package", pkg.ImportPath) - } else if matchers[i] != nil && matchers[i](pkg.ImportPath) { - nonMainCount[i]++ - } - } - } + patterns := make([]string, len(args)) + for i, arg := range args { + patterns[i] = arg[:strings.Index(arg, "@")] } - base.ExitIfErrors() - for i, p := range patterns { - if matchers[i] != nil && mainCount[i] == 0 && nonMainCount[i] > 0 { - fmt.Fprintf(os.Stderr, "go: warning: %q matched no main packages\n", p) - } - } - - // Check that named packages are all provided by the same module. - for _, pkg := range mainPkgs { - if pkg.Module == nil { - // Packages in std, cmd, and their vendored dependencies - // don't have this field set. - base.Errorf("go install: package %s not provided by module %s", pkg.ImportPath, installMod) - } else if pkg.Module.Path != installMod.Path || pkg.Module.Version != installMod.Version { - base.Errorf("go install: package %s provided by module %s@%s\n\tAll packages must be provided by the same module (%s).", pkg.ImportPath, pkg.Module.Path, pkg.Module.Version, installMod) - } - } - base.ExitIfErrors() // Build and install the packages. - InstallPackages(ctx, patterns, mainPkgs) + InstallPackages(ctx, patterns, pkgs) } // ExecCmd is the command to use to run user binaries. diff --git a/src/cmd/go/internal/work/build_test.go b/src/cmd/go/internal/work/build_test.go index eaf2639e9e0..600fc3083f0 100644 --- a/src/cmd/go/internal/work/build_test.go +++ b/src/cmd/go/internal/work/build_test.go @@ -173,10 +173,11 @@ func TestSharedLibName(t *testing.T) { if err != nil { t.Fatal(err) } + cwd := base.Cwd() oldGopath := cfg.BuildContext.GOPATH defer func() { cfg.BuildContext.GOPATH = oldGopath - os.Chdir(base.Cwd) + os.Chdir(cwd) err := os.RemoveAll(tmpGopath) if err != nil { t.Error(err) diff --git a/src/cmd/go/internal/work/buildid.go b/src/cmd/go/internal/work/buildid.go index c555d4a9f1e..4e9189a3632 100644 --- a/src/cmd/go/internal/work/buildid.go +++ b/src/cmd/go/internal/work/buildid.go @@ -204,7 +204,7 @@ func (b *Builder) toolID(name string) string { // In order to get reproducible builds for released compilers, we // detect a released compiler by the absence of "experimental" in the // --version output, and in that case we just use the version string. -func (b *Builder) gccgoToolID(name, language string) (string, error) { +func (b *Builder) gccToolID(name, language string) (string, error) { key := name + "." + language b.id.Lock() id := b.toolIDCache[key] diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go index 422e83c224f..b506b836561 100644 --- a/src/cmd/go/internal/work/exec.go +++ b/src/cmd/go/internal/work/exec.go @@ -8,11 +8,11 @@ package work import ( "bytes" - "cmd/go/internal/fsys" "context" "encoding/json" "errors" "fmt" + "internal/buildcfg" exec "internal/execabs" "internal/lazyregexp" "io" @@ -31,6 +31,7 @@ import ( "cmd/go/internal/base" "cmd/go/internal/cache" "cmd/go/internal/cfg" + "cmd/go/internal/fsys" "cmd/go/internal/load" "cmd/go/internal/modload" "cmd/go/internal/str" @@ -245,17 +246,30 @@ func (b *Builder) buildActionID(a *Action) cache.ActionID { if p.Internal.ForceLibrary { fmt.Fprintf(h, "forcelibrary\n") } - if len(p.CgoFiles)+len(p.SwigFiles) > 0 { + if len(p.CgoFiles)+len(p.SwigFiles)+len(p.SwigCXXFiles) > 0 { fmt.Fprintf(h, "cgo %q\n", b.toolID("cgo")) cppflags, cflags, cxxflags, fflags, ldflags, _ := b.CFlags(p) - fmt.Fprintf(h, "CC=%q %q %q %q\n", b.ccExe(), cppflags, cflags, ldflags) - if len(p.CXXFiles)+len(p.SwigFiles) > 0 { - fmt.Fprintf(h, "CXX=%q %q\n", b.cxxExe(), cxxflags) + + ccExe := b.ccExe() + fmt.Fprintf(h, "CC=%q %q %q %q\n", ccExe, cppflags, cflags, ldflags) + if ccID, err := b.gccToolID(ccExe[0], "c"); err == nil { + fmt.Fprintf(h, "CC ID=%q\n", ccID) + } + if len(p.CXXFiles)+len(p.SwigCXXFiles) > 0 { + cxxExe := b.cxxExe() + fmt.Fprintf(h, "CXX=%q %q\n", cxxExe, cxxflags) + if cxxID, err := b.gccToolID(cxxExe[0], "c++"); err == nil { + fmt.Fprintf(h, "CXX ID=%q\n", cxxID) + } } if len(p.FFiles) > 0 { - fmt.Fprintf(h, "FC=%q %q\n", b.fcExe(), fflags) + fcExe := b.fcExe() + fmt.Fprintf(h, "FC=%q %q\n", fcExe, fflags) + if fcID, err := b.gccToolID(fcExe[0], "f95"); err == nil { + fmt.Fprintf(h, "FC ID=%q\n", fcID) + } } - // TODO(rsc): Should we include the SWIG version or Fortran/GCC/G++/Objective-C compiler versions? + // TODO(rsc): Should we include the SWIG version? } if p.Internal.CoverMode != "" { fmt.Fprintf(h, "cover %q %q\n", p.Internal.CoverMode, b.toolID("cover")) @@ -276,6 +290,10 @@ func (b *Builder) buildActionID(a *Action) cache.ActionID { key, val := cfg.GetArchEnv() fmt.Fprintf(h, "%s=%s\n", key, val) + if goexperiment := buildcfg.GOEXPERIMENT(); goexperiment != "" { + fmt.Fprintf(h, "GOEXPERIMENT=%q\n", goexperiment) + } + // TODO(rsc): Convince compiler team not to add more magic environment variables, // or perhaps restrict the environment variables passed to subprocesses. // Because these are clumsy, undocumented special-case hacks @@ -284,7 +302,7 @@ func (b *Builder) buildActionID(a *Action) cache.ActionID { magic := []string{ "GOCLOBBERDEADHASH", "GOSSAFUNC", - "GO_SSA_PHI_LOC_CUTOFF", + "GOSSADIR", "GOSSAHASH", } for _, env := range magic { @@ -311,7 +329,7 @@ func (b *Builder) buildActionID(a *Action) cache.ActionID { } case "gccgo": - id, err := b.gccgoToolID(BuildToolchain.compiler(), "go") + id, err := b.gccToolID(BuildToolchain.compiler(), "go") if err != nil { base.Fatalf("%v", err) } @@ -319,7 +337,7 @@ func (b *Builder) buildActionID(a *Action) cache.ActionID { fmt.Fprintf(h, "pkgpath %s\n", gccgoPkgpath(p)) fmt.Fprintf(h, "ar %q\n", BuildToolchain.(gccgoToolchain).ar()) if len(p.SFiles) > 0 { - id, _ = b.gccgoToolID(BuildToolchain.compiler(), "assembler-with-cpp") + id, _ = b.gccToolID(BuildToolchain.compiler(), "assembler-with-cpp") // Ignore error; different assembler versions // are unlikely to make any difference anyhow. fmt.Fprintf(h, "asm %q\n", id) @@ -649,6 +667,10 @@ OverlayLoop: } outGo, outObj, err := b.cgo(a, base.Tool("cgo"), objdir, pcCFLAGS, pcLDFLAGS, mkAbsFiles(a.Package.Dir, cgofiles), gccfiles, cxxfiles, a.Package.MFiles, a.Package.FFiles) + + // The files in cxxfiles have now been handled by b.cgo. + cxxfiles = nil + if err != nil { return err } @@ -1246,6 +1268,10 @@ func (b *Builder) printLinkerConfig(h io.Writer, p *load.Package) { key, val := cfg.GetArchEnv() fmt.Fprintf(h, "%s=%s\n", key, val) + if goexperiment := buildcfg.GOEXPERIMENT(); goexperiment != "" { + fmt.Fprintf(h, "GOEXPERIMENT=%q\n", goexperiment) + } + // The linker writes source file paths that say GOROOT_FINAL, but // only if -trimpath is not specified (see ld() in gc.go). gorootFinal := cfg.GOROOT_FINAL @@ -1261,7 +1287,7 @@ func (b *Builder) printLinkerConfig(h io.Writer, p *load.Package) { // Or external linker settings and flags? case "gccgo": - id, err := b.gccgoToolID(BuildToolchain.linker(), "go") + id, err := b.gccToolID(BuildToolchain.linker(), "go") if err != nil { base.Fatalf("%v", err) } @@ -1841,6 +1867,7 @@ var objectMagic = [][]byte{ {0xCE, 0xFA, 0xED, 0xFE}, // Mach-O little-endian 32-bit {0xCF, 0xFA, 0xED, 0xFE}, // Mach-O little-endian 64-bit {0x4d, 0x5a, 0x90, 0x00, 0x03, 0x00}, // PE (Windows) as generated by 6l/8l and gcc + {0x4d, 0x5a, 0x78, 0x00, 0x01, 0x00}, // PE (Windows) as generated by llvm for dll {0x00, 0x00, 0x01, 0xEB}, // Plan 9 i386 {0x00, 0x00, 0x8a, 0x97}, // Plan 9 amd64 {0x00, 0x00, 0x06, 0x47}, // Plan 9 arm @@ -2057,8 +2084,11 @@ func (b *Builder) runOut(a *Action, dir string, env []string, cmdargs ...interfa // Add the TOOLEXEC_IMPORTPATH environment variable for -toolexec tools. // It doesn't really matter if -toolexec isn't being used. + // Note that a.Package.Desc is not really an import path, + // but this is consistent with 'go list -f {{.ImportPath}}'. + // Plus, it is useful to uniquely identify packages in 'go list -json'. if a != nil && a.Package != nil { - cmd.Env = append(cmd.Env, "TOOLEXEC_IMPORTPATH="+a.Package.ImportPath) + cmd.Env = append(cmd.Env, "TOOLEXEC_IMPORTPATH="+a.Package.Desc()) } cmd.Env = append(cmd.Env, env...) @@ -2347,7 +2377,7 @@ func (b *Builder) gccld(a *Action, p *load.Package, objdir, outfile string, flag cmdargs := []interface{}{cmd, "-o", outfile, objs, flags} dir := p.Dir - out, err := b.runOut(a, base.Cwd, b.cCompilerEnv(), cmdargs...) + out, err := b.runOut(a, base.Cwd(), b.cCompilerEnv(), cmdargs...) if len(out) > 0 { // Filter out useless linker warnings caused by bugs outside Go. @@ -2598,9 +2628,19 @@ func (b *Builder) gccArchArgs() []string { case "s390x": return []string{"-m64", "-march=z196"} case "mips64", "mips64le": - return []string{"-mabi=64"} + args := []string{"-mabi=64"} + if cfg.GOMIPS64 == "hardfloat" { + return append(args, "-mhard-float") + } else if cfg.GOMIPS64 == "softfloat" { + return append(args, "-msoft-float") + } case "mips", "mipsle": - return []string{"-mabi=32", "-march=mips32"} + args := []string{"-mabi=32", "-march=mips32"} + if cfg.GOMIPS == "hardfloat" { + return append(args, "-mhard-float", "-mfp32", "-mno-odd-spreg") + } else if cfg.GOMIPS == "softfloat" { + return append(args, "-msoft-float") + } case "ppc64": if cfg.Goos == "aix" { return []string{"-maix64"} @@ -2951,7 +2991,7 @@ func (b *Builder) dynimport(a *Action, p *load.Package, objdir, importGo, cgoExe if p.Standard && p.ImportPath == "runtime/cgo" { cgoflags = []string{"-dynlinker"} // record path to dynamic linker } - return b.run(a, base.Cwd, p.ImportPath, b.cCompilerEnv(), cfg.BuildToolexec, cgoExe, "-dynpackage", p.Name, "-dynimport", dynobj, "-dynout", importGo, cgoflags) + return b.run(a, base.Cwd(), p.ImportPath, b.cCompilerEnv(), cfg.BuildToolexec, cgoExe, "-dynpackage", p.Name, "-dynimport", dynobj, "-dynout", importGo, cgoflags) } // Run SWIG on all SWIG input files. @@ -3085,7 +3125,7 @@ func (b *Builder) swigDoIntSize(objdir string) (intsize string, err error) { } srcs := []string{src} - p := load.GoFilesPackage(context.TODO(), srcs) + p := load.GoFilesPackage(context.TODO(), load.PackageOpts{}, srcs) if _, _, e := BuildToolchain.gc(b, &Action{Mode: "swigDoIntSize", Package: p, Objdir: objdir}, "", nil, nil, "", false, srcs); e != nil { return "32", nil diff --git a/src/cmd/go/internal/work/gc.go b/src/cmd/go/internal/work/gc.go index 2087855b3c0..85da4f89f99 100644 --- a/src/cmd/go/internal/work/gc.go +++ b/src/cmd/go/internal/work/gc.go @@ -8,6 +8,7 @@ import ( "bufio" "bytes" "fmt" + "internal/buildcfg" "io" "log" "os" @@ -63,15 +64,33 @@ func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg, embedcfg pkgpath := pkgPath(a) gcargs := []string{"-p", pkgpath} - if p.Module != nil && p.Module.GoVersion != "" && allowedVersion(p.Module.GoVersion) { - gcargs = append(gcargs, "-lang=go"+p.Module.GoVersion) + if p.Module != nil { + v := p.Module.GoVersion + if v == "" { + // We started adding a 'go' directive to the go.mod file unconditionally + // as of Go 1.12, so any module that still lacks such a directive must + // either have been authored before then, or have a hand-edited go.mod + // file that hasn't been updated by cmd/go since that edit. + // + // Unfortunately, through at least Go 1.16 we didn't add versions to + // vendor/modules.txt. So this could also be a vendored 1.16 dependency. + // + // Fortunately, there were no breaking changes to the language between Go + // 1.11 and 1.16, so if we assume Go 1.16 semantics we will not introduce + // any spurious errors — we will only mask errors, and not particularly + // important ones at that. + v = "1.16" + } + if allowedVersion(v) { + gcargs = append(gcargs, "-lang=go"+v) + } } if p.Standard { gcargs = append(gcargs, "-std") } compilingRuntime := p.Standard && (p.ImportPath == "runtime" || strings.HasPrefix(p.ImportPath, "runtime/internal")) // The runtime package imports a couple of general internal packages. - if p.Standard && (p.ImportPath == "internal/cpu" || p.ImportPath == "internal/bytealg") { + if p.Standard && (p.ImportPath == "internal/cpu" || p.ImportPath == "internal/bytealg" || p.ImportPath == "internal/abi") { compilingRuntime = true } if compilingRuntime { @@ -178,7 +197,7 @@ func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg, embedcfg args = append(args, f) } - output, err = b.runOut(a, base.Cwd, nil, args...) + output, err = b.runOut(a, base.Cwd(), nil, args...) return ofile, output, err } @@ -213,7 +232,7 @@ CheckFlags: } // TODO: Test and delete these conditions. - if objabi.Fieldtrack_enabled != 0 || objabi.Preemptibleloops_enabled != 0 { + if buildcfg.Experiment.FieldTrack || buildcfg.Experiment.PreemptibleLoops { canDashC = false } @@ -272,10 +291,11 @@ func (a *Action) trimpath() string { rewriteDir := a.Package.Dir if cfg.BuildTrimpath { + importPath := a.Package.Internal.OrigImportPath if m := a.Package.Module; m != nil && m.Version != "" { - rewriteDir = m.Path + "@" + m.Version + strings.TrimPrefix(a.Package.ImportPath, m.Path) + rewriteDir = m.Path + "@" + m.Version + strings.TrimPrefix(importPath, m.Path) } else { - rewriteDir = a.Package.ImportPath + rewriteDir = importPath } rewrite += a.Package.Dir + "=>" + rewriteDir + ";" } @@ -343,18 +363,6 @@ func asmArgs(a *Action, p *load.Package) []interface{} { } if objabi.IsRuntimePackagePath(pkgpath) { args = append(args, "-compiling-runtime") - if objabi.Regabi_enabled != 0 { - // In order to make it easier to port runtime assembly - // to the register ABI, we introduce a macro - // indicating the experiment is enabled. - // - // Note: a similar change also appears in - // cmd/dist/build.go. - // - // TODO(austin): Remove this once we commit to the - // register ABI (#40724). - args = append(args, "-D=GOEXPERIMENT_REGABI=1") - } } if cfg.Goarch == "mips" || cfg.Goarch == "mipsle" { diff --git a/src/cmd/go/internal/work/gccgo.go b/src/cmd/go/internal/work/gccgo.go index b58c8aa8854..1499536932d 100644 --- a/src/cmd/go/internal/work/gccgo.go +++ b/src/cmd/go/internal/work/gccgo.go @@ -102,7 +102,7 @@ func (tools gccgoToolchain) gc(b *Builder, a *Action, archive string, importcfg, if b.gccSupportsFlag(args[:1], "-ffile-prefix-map=a=b") { if cfg.BuildTrimpath { - args = append(args, "-ffile-prefix-map="+base.Cwd+"=.") + args = append(args, "-ffile-prefix-map="+base.Cwd()+"=.") args = append(args, "-ffile-prefix-map="+b.WorkDir+"=/tmp/go-build") } if fsys.OverlayFile != "" { @@ -114,9 +114,9 @@ func (tools gccgoToolchain) gc(b *Builder, a *Action, archive string, importcfg, } toPath := absPath // gccgo only applies the last matching rule, so also handle the case where - // BuildTrimpath is true and the path is relative to base.Cwd. - if cfg.BuildTrimpath && str.HasFilePathPrefix(toPath, base.Cwd) { - toPath = "." + toPath[len(base.Cwd):] + // BuildTrimpath is true and the path is relative to base.Cwd(). + if cfg.BuildTrimpath && str.HasFilePathPrefix(toPath, base.Cwd()) { + toPath = "." + toPath[len(base.Cwd()):] } args = append(args, "-ffile-prefix-map="+overlayPath+"="+toPath) } @@ -572,7 +572,7 @@ func (tools gccgoToolchain) cc(b *Builder, a *Action, ofile, cfile string) error } defs = tools.maybePIC(defs) if b.gccSupportsFlag(compiler, "-ffile-prefix-map=a=b") { - defs = append(defs, "-ffile-prefix-map="+base.Cwd+"=.") + defs = append(defs, "-ffile-prefix-map="+base.Cwd()+"=.") defs = append(defs, "-ffile-prefix-map="+b.WorkDir+"=/tmp/go-build") } else if b.gccSupportsFlag(compiler, "-fdebug-prefix-map=a=b") { defs = append(defs, "-fdebug-prefix-map="+b.WorkDir+"=/tmp/go-build") diff --git a/src/cmd/go/internal/work/init.go b/src/cmd/go/internal/work/init.go index ba7c7c2fbb1..37a3e2d0ffd 100644 --- a/src/cmd/go/internal/work/init.go +++ b/src/cmd/go/internal/work/init.go @@ -11,21 +11,19 @@ import ( "cmd/go/internal/cfg" "cmd/go/internal/fsys" "cmd/go/internal/modload" - "cmd/internal/objabi" "cmd/internal/sys" "flag" "fmt" "os" "path/filepath" "runtime" - "strings" ) func BuildInit() { modload.Init() instrumentInit() buildModeInit() - if err := fsys.Init(base.Cwd); err != nil { + if err := fsys.Init(base.Cwd()); err != nil { base.Fatalf("go: %v", err) } @@ -47,20 +45,6 @@ func BuildInit() { base.Fatalf("go %s: %s environment variable is relative; must be absolute path: %s\n", flag.Args()[0], key, path) } } - - // For each experiment that has been enabled in the toolchain, define a - // build tag with the same name but prefixed by "goexperiment." which can be - // used for compiling alternative files for the experiment. This allows - // changes for the experiment, like extra struct fields in the runtime, - // without affecting the base non-experiment code at all. [2:] strips the - // leading "X:" from objabi.Expstring(). - exp := objabi.Expstring()[2:] - if exp != "none" { - experiments := strings.Split(exp, ",") - for _, expt := range experiments { - cfg.BuildContext.BuildTags = append(cfg.BuildContext.BuildTags, "goexperiment."+expt) - } - } } func instrumentInit() { diff --git a/src/cmd/go/internal/work/testgo.go b/src/cmd/go/internal/work/testgo.go index 931f49a0691..8b77871b23f 100644 --- a/src/cmd/go/internal/work/testgo.go +++ b/src/cmd/go/internal/work/testgo.go @@ -4,6 +4,7 @@ // This file contains extra hooks for testing the go command. +//go:build testgo // +build testgo package work diff --git a/src/cmd/go/main.go b/src/cmd/go/main.go index 9cc44da84db..02174a56ff0 100644 --- a/src/cmd/go/main.go +++ b/src/cmd/go/main.go @@ -10,6 +10,7 @@ import ( "context" "flag" "fmt" + "internal/buildcfg" "log" "os" "path/filepath" @@ -144,6 +145,11 @@ func main() { os.Exit(2) } + if err := buildcfg.Error; err != nil { + fmt.Fprintf(os.Stderr, "go: %v\n", buildcfg.Error) + os.Exit(2) + } + // Set environment (GOOS, GOARCH, etc) explicitly. // In theory all the commands we invoke should have // the same default computation of these as we do, diff --git a/src/cmd/go/proxy_test.go b/src/cmd/go/proxy_test.go index e390c73a9cf..74bfecc08db 100644 --- a/src/cmd/go/proxy_test.go +++ b/src/cmd/go/proxy_test.go @@ -23,7 +23,6 @@ import ( "sync" "testing" - "cmd/go/internal/modfetch" "cmd/go/internal/modfetch/codehost" "cmd/go/internal/par" "cmd/go/internal/txtar" @@ -229,7 +228,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) { if m.Path != modPath { continue } - if modfetch.IsPseudoVersion(m.Version) && (latestPseudo == "" || semver.Compare(latestPseudo, m.Version) > 0) { + if module.IsPseudoVersion(m.Version) && (latestPseudo == "" || semver.Compare(latestPseudo, m.Version) > 0) { latestPseudo = m.Version } else if semver.Prerelease(m.Version) != "" && (latestPrerelease == "" || semver.Compare(latestPrerelease, m.Version) > 0) { latestPrerelease = m.Version @@ -282,7 +281,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) { continue } found = true - if !modfetch.IsPseudoVersion(m.Version) { + if !module.IsPseudoVersion(m.Version) { if err := module.Check(m.Path, m.Version); err == nil { fmt.Fprintf(w, "%s\n", m.Version) } @@ -315,7 +314,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) { for _, m := range modList { if m.Path == path && semver.Compare(best, m.Version) < 0 { var hash string - if modfetch.IsPseudoVersion(m.Version) { + if module.IsPseudoVersion(m.Version) { hash = m.Version[strings.LastIndex(m.Version, "-")+1:] } else { hash = findHash(m) @@ -362,7 +361,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) { var buf bytes.Buffer z := zip.NewWriter(&buf) for _, f := range a.Files { - if strings.HasPrefix(f.Name, ".") { + if f.Name == ".info" || f.Name == ".mod" || f.Name == ".zip" { continue } var zipName string diff --git a/src/cmd/go/script_test.go b/src/cmd/go/script_test.go index dfaa40548e4..327eaff4450 100644 --- a/src/cmd/go/script_test.go +++ b/src/cmd/go/script_test.go @@ -32,7 +32,6 @@ import ( "cmd/go/internal/robustio" "cmd/go/internal/txtar" "cmd/go/internal/work" - "cmd/internal/objabi" "cmd/internal/sys" ) @@ -41,6 +40,33 @@ func TestScript(t *testing.T) { testenv.MustHaveGoBuild(t) testenv.SkipIfShortAndSlow(t) + var ( + ctx = context.Background() + gracePeriod = 100 * time.Millisecond + ) + if deadline, ok := t.Deadline(); ok { + timeout := time.Until(deadline) + + // If time allows, increase the termination grace period to 5% of the + // remaining time. + if gp := timeout / 20; gp > gracePeriod { + gracePeriod = gp + } + + // When we run commands that execute subprocesses, we want to reserve two + // grace periods to clean up. We will send the first termination signal when + // the context expires, then wait one grace period for the process to + // produce whatever useful output it can (such as a stack trace). After the + // first grace period expires, we'll escalate to os.Kill, leaving the second + // grace period for the test function to record its output before the test + // process itself terminates. + timeout -= 2 * gracePeriod + + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, timeout) + t.Cleanup(cancel) + } + files, err := filepath.Glob("testdata/script/*.txt") if err != nil { t.Fatal(err) @@ -50,40 +76,51 @@ func TestScript(t *testing.T) { name := strings.TrimSuffix(filepath.Base(file), ".txt") t.Run(name, func(t *testing.T) { t.Parallel() - ts := &testScript{t: t, name: name, file: file} + ctx, cancel := context.WithCancel(ctx) + ts := &testScript{ + t: t, + ctx: ctx, + cancel: cancel, + gracePeriod: gracePeriod, + name: name, + file: file, + } ts.setup() if !*testWork { defer removeAll(ts.workdir) } ts.run() + cancel() }) } } // A testScript holds execution state for a single test script. type testScript struct { - t *testing.T - workdir string // temporary work dir ($WORK) - log bytes.Buffer // test execution log (printed at end of test) - mark int // offset of next log truncation - cd string // current directory during test execution; initially $WORK/gopath/src - name string // short name of test ("foo") - file string // full file name ("testdata/script/foo.txt") - lineno int // line number currently executing - line string // line currently executing - env []string // environment list (for os/exec) - envMap map[string]string // environment mapping (matches env) - stdout string // standard output from last 'go' command; for 'stdout' command - stderr string // standard error from last 'go' command; for 'stderr' command - stopped bool // test wants to stop early - start time.Time // time phase started - background []*backgroundCmd // backgrounded 'exec' and 'go' commands + t *testing.T + ctx context.Context + cancel context.CancelFunc + gracePeriod time.Duration + workdir string // temporary work dir ($WORK) + log bytes.Buffer // test execution log (printed at end of test) + mark int // offset of next log truncation + cd string // current directory during test execution; initially $WORK/gopath/src + name string // short name of test ("foo") + file string // full file name ("testdata/script/foo.txt") + lineno int // line number currently executing + line string // line currently executing + env []string // environment list (for os/exec) + envMap map[string]string // environment mapping (matches env) + stdout string // standard output from last 'go' command; for 'stdout' command + stderr string // standard error from last 'go' command; for 'stderr' command + stopped bool // test wants to stop early + start time.Time // time phase started + background []*backgroundCmd // backgrounded 'exec' and 'go' commands } type backgroundCmd struct { want simpleStatus args []string - cancel context.CancelFunc done <-chan struct{} err error stdout, stderr strings.Builder @@ -109,6 +146,10 @@ var extraEnvKeys = []string{ // setup sets up the test execution temporary directory and environment. func (ts *testScript) setup() { + if err := ts.ctx.Err(); err != nil { + ts.t.Fatalf("test interrupted during setup: %v", err) + } + StartProxy() ts.workdir = filepath.Join(testTmpDir, "script-"+ts.name) ts.check(os.MkdirAll(filepath.Join(ts.workdir, "tmp"), 0777)) @@ -123,13 +164,13 @@ func (ts *testScript) setup() { "GOCACHE=" + testGOCACHE, "GODEBUG=" + os.Getenv("GODEBUG"), "GOEXE=" + cfg.ExeSuffix, - "GOEXPSTRING=" + objabi.Expstring()[2:], "GOOS=" + runtime.GOOS, "GOPATH=" + filepath.Join(ts.workdir, "gopath"), "GOPROXY=" + proxyURL, "GOPRIVATE=", "GOROOT=" + testGOROOT, "GOROOT_FINAL=" + os.Getenv("GOROOT_FINAL"), // causes spurious rebuilds and breaks the "stale" built-in if not propagated + "GOTRACEBACK=system", "TESTGO_GOROOT=" + testGOROOT, "GOSUMDB=" + testSumDBVerifierKey, "GONOPROXY=", @@ -200,9 +241,7 @@ func (ts *testScript) run() { // On a normal exit from the test loop, background processes are cleaned up // before we print PASS. If we return early (e.g., due to a test failure), // don't print anything about the processes that were still running. - for _, bg := range ts.background { - bg.cancel() - } + ts.cancel() for _, bg := range ts.background { <-bg.done } @@ -275,6 +314,10 @@ Script: fmt.Fprintf(&ts.log, "> %s\n", line) for _, cond := range parsed.conds { + if err := ts.ctx.Err(); err != nil { + ts.fatalf("test interrupted: %v", err) + } + // Known conds are: $GOOS, $GOARCH, runtime.Compiler, and 'short' (for testing.Short). // // NOTE: If you make changes here, update testdata/script/README too! @@ -356,9 +399,7 @@ Script: } } - for _, bg := range ts.background { - bg.cancel() - } + ts.cancel() ts.cmdWait(success, nil) // Final phase ended. @@ -798,9 +839,7 @@ func (ts *testScript) cmdSkip(want simpleStatus, args []string) { // Before we mark the test as skipped, shut down any background processes and // make sure they have returned the correct status. - for _, bg := range ts.background { - bg.cancel() - } + ts.cancel() ts.cmdWait(success, nil) if len(args) == 1 { @@ -1065,38 +1104,9 @@ func (ts *testScript) exec(command string, args ...string) (stdout, stderr strin func (ts *testScript) startBackground(want simpleStatus, command string, args ...string) (*backgroundCmd, error) { done := make(chan struct{}) bg := &backgroundCmd{ - want: want, - args: append([]string{command}, args...), - done: done, - cancel: func() {}, - } - - ctx := context.Background() - gracePeriod := 100 * time.Millisecond - if deadline, ok := ts.t.Deadline(); ok { - timeout := time.Until(deadline) - // If time allows, increase the termination grace period to 5% of the - // remaining time. - if gp := timeout / 20; gp > gracePeriod { - gracePeriod = gp - } - - // Send the first termination signal with two grace periods remaining. - // If it still hasn't finished after the first period has elapsed, - // we'll escalate to os.Kill with a second period remaining until the - // test deadline.. - timeout -= 2 * gracePeriod - - if timeout <= 0 { - // The test has less than the grace period remaining. There is no point in - // even starting the command, because it will be terminated immediately. - // Save the expense of starting it in the first place. - bg.err = context.DeadlineExceeded - close(done) - return bg, nil - } - - ctx, bg.cancel = context.WithTimeout(ctx, timeout) + want: want, + args: append([]string{command}, args...), + done: done, } cmd := exec.Command(command, args...) @@ -1105,29 +1115,16 @@ func (ts *testScript) startBackground(want simpleStatus, command string, args .. cmd.Stdout = &bg.stdout cmd.Stderr = &bg.stderr if err := cmd.Start(); err != nil { - bg.cancel() return nil, err } go func() { - bg.err = waitOrStop(ctx, cmd, stopSignal(), gracePeriod) + bg.err = waitOrStop(ts.ctx, cmd, quitSignal(), ts.gracePeriod) close(done) }() return bg, nil } -// stopSignal returns the appropriate signal to use to request that a process -// stop execution. -func stopSignal() os.Signal { - if runtime.GOOS == "windows" { - // Per https://golang.org/pkg/os/#Signal, “Interrupt is not implemented on - // Windows; using it with os.Process.Signal will return an error.” - // Fall back to Kill instead. - return os.Kill - } - return os.Interrupt -} - // waitOrStop waits for the already-started command cmd by calling its Wait method. // // If cmd does not return before ctx is done, waitOrStop sends it the given interrupt signal. diff --git a/src/cmd/go/stop_other_test.go b/src/cmd/go/stop_other_test.go new file mode 100644 index 00000000000..e1cc6cf8ba7 --- /dev/null +++ b/src/cmd/go/stop_other_test.go @@ -0,0 +1,33 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris) +// +build !aix +// +build !darwin +// +build !dragonfly +// +build !freebsd +// +build !js !wasm +// +build !linux +// +build !netbsd +// +build !openbsd +// +build !solaris + +package main_test + +import ( + "os" + "runtime" +) + +// quitSignal returns the appropriate signal to use to request that a process +// quit execution. +func quitSignal() os.Signal { + if runtime.GOOS == "windows" { + // Per https://golang.org/pkg/os/#Signal, “Interrupt is not implemented on + // Windows; using it with os.Process.Signal will return an error.” + // Fall back to Kill instead. + return os.Kill + } + return os.Interrupt +} diff --git a/src/cmd/go/stop_unix_test.go b/src/cmd/go/stop_unix_test.go new file mode 100644 index 00000000000..ac35b240f0a --- /dev/null +++ b/src/cmd/go/stop_unix_test.go @@ -0,0 +1,17 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris +// +build aix darwin dragonfly freebsd js,wasm linux netbsd openbsd solaris + +package main_test + +import ( + "os" + "syscall" +) + +func quitSignal() os.Signal { + return syscall.SIGQUIT +} diff --git a/src/cmd/go/testdata/addmod.go b/src/cmd/go/testdata/addmod.go index 09fc8e713bc..03869e68def 100644 --- a/src/cmd/go/testdata/addmod.go +++ b/src/cmd/go/testdata/addmod.go @@ -22,10 +22,10 @@ import ( "bytes" "flag" "fmt" + exec "internal/execabs" "io/fs" "log" "os" - exec "internal/execabs" "path/filepath" "strings" diff --git a/src/cmd/go/testdata/mod/example.com_cmd_v1.0.0.txt b/src/cmd/go/testdata/mod/example.com_cmd_v1.0.0.txt index ee439384d25..c1981391a13 100644 --- a/src/cmd/go/testdata/mod/example.com_cmd_v1.0.0.txt +++ b/src/cmd/go/testdata/mod/example.com_cmd_v1.0.0.txt @@ -16,11 +16,15 @@ go 1.16 -- a/a.go -- package main -func main() {} +import "fmt" + +func main() { fmt.Println("a@v1.0.0") } -- b/b.go -- package main -func main() {} +import "fmt" + +func main() { fmt.Println("b@v1.0.0") } -- err/err.go -- package err diff --git a/src/cmd/go/testdata/mod/example.com_deprecated_a_v1.0.0.txt b/src/cmd/go/testdata/mod/example.com_deprecated_a_v1.0.0.txt new file mode 100644 index 00000000000..7c29621e83d --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_deprecated_a_v1.0.0.txt @@ -0,0 +1,12 @@ +-- .info -- +{"Version":"v1.0.0"} +-- .mod -- +module example.com/deprecated/a + +go 1.17 +-- go.mod -- +module example.com/deprecated/a + +go 1.17 +-- a.go -- +package a diff --git a/src/cmd/go/testdata/mod/example.com_deprecated_a_v1.9.0.txt b/src/cmd/go/testdata/mod/example.com_deprecated_a_v1.9.0.txt new file mode 100644 index 00000000000..0613389d1f3 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_deprecated_a_v1.9.0.txt @@ -0,0 +1,14 @@ +-- .info -- +{"Version":"v1.9.0"} +-- .mod -- +// Deprecated: in example.com/deprecated/a@v1.9.0 +module example.com/deprecated/a + +go 1.17 +-- go.mod -- +// Deprecated: in example.com/deprecated/a@v1.9.0 +module example.com/deprecated/a + +go 1.17 +-- a.go -- +package a diff --git a/src/cmd/go/testdata/mod/example.com_deprecated_b_v1.0.0.txt b/src/cmd/go/testdata/mod/example.com_deprecated_b_v1.0.0.txt new file mode 100644 index 00000000000..50006aefb5f --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_deprecated_b_v1.0.0.txt @@ -0,0 +1,12 @@ +-- .info -- +{"Version":"v1.0.0"} +-- .mod -- +module example.com/deprecated/b + +go 1.17 +-- go.mod -- +module example.com/deprecated/b + +go 1.17 +-- b.go -- +package b diff --git a/src/cmd/go/testdata/mod/example.com_deprecated_b_v1.9.0.txt b/src/cmd/go/testdata/mod/example.com_deprecated_b_v1.9.0.txt new file mode 100644 index 00000000000..163d6b543eb --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_deprecated_b_v1.9.0.txt @@ -0,0 +1,14 @@ +-- .info -- +{"Version":"v1.9.0"} +-- .mod -- +// Deprecated: in example.com/deprecated/b@v1.9.0 +module example.com/deprecated/b + +go 1.17 +-- go.mod -- +// Deprecated: in example.com/deprecated/b@v1.9.0 +module example.com/deprecated/b + +go 1.17 +-- b.go -- +package b diff --git a/src/cmd/go/testdata/mod/example.com_dotname_v1.0.0.txt b/src/cmd/go/testdata/mod/example.com_dotname_v1.0.0.txt new file mode 100644 index 00000000000..2ada3a3f812 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_dotname_v1.0.0.txt @@ -0,0 +1,12 @@ +-- .info -- +{"Version":"v1.0.0"} +-- .mod -- +module example.com/dotname + +go 1.16 +-- go.mod -- +module example.com/dotname + +go 1.16 +-- .dot/dot.go -- +package dot diff --git a/src/cmd/go/testdata/mod/example.com_split-incompatible_subpkg_v0.1.0.txt b/src/cmd/go/testdata/mod/example.com_split-incompatible_subpkg_v0.1.0.txt index 8f9e49176c7..edf5d487885 100644 --- a/src/cmd/go/testdata/mod/example.com_split-incompatible_subpkg_v0.1.0.txt +++ b/src/cmd/go/testdata/mod/example.com_split-incompatible_subpkg_v0.1.0.txt @@ -1,6 +1,6 @@ Written by hand. Test case for getting a package that has been moved to a nested module, -with a +incompatible verison (and thus no go.mod file) at the root module. +with a +incompatible version (and thus no go.mod file) at the root module. -- .mod -- module example.com/split-incompatible/subpkg diff --git a/src/cmd/go/testdata/mod/example.com_undeprecated_v1.0.0.txt b/src/cmd/go/testdata/mod/example.com_undeprecated_v1.0.0.txt new file mode 100644 index 00000000000..a68588eedb4 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_undeprecated_v1.0.0.txt @@ -0,0 +1,14 @@ +-- .info -- +{"Version":"v1.0.0"} +-- .mod -- +// Deprecated: in v1.0.0 +module example.com/undeprecated + +go 1.17 +-- go.mod -- +// Deprecated: in v1.0.0 +module example.com/undeprecated + +go 1.17 +-- undeprecated.go -- +package undeprecated diff --git a/src/cmd/go/testdata/mod/example.com_undeprecated_v1.0.1.txt b/src/cmd/go/testdata/mod/example.com_undeprecated_v1.0.1.txt new file mode 100644 index 00000000000..ecabf322ec4 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_undeprecated_v1.0.1.txt @@ -0,0 +1,14 @@ +-- .info -- +{"Version":"v1.0.1"} +-- .mod -- +// no longer deprecated +module example.com/undeprecated + +go 1.17 +-- go.mod -- +// no longer deprecated +module example.com/undeprecated + +go 1.17 +-- undeprecated.go -- +package undeprecated diff --git a/src/cmd/go/testdata/script/README b/src/cmd/go/testdata/script/README index d658cebfce0..d7e67bb7b60 100644 --- a/src/cmd/go/testdata/script/README +++ b/src/cmd/go/testdata/script/README @@ -29,7 +29,6 @@ Scripts also have access to these other environment variables: GOARCH= GOCACHE= GOEXE= - GOEXPSTRING= GOOS= GOPATH=$WORK/gopath GOPROXY= diff --git a/src/cmd/go/testdata/script/bug.txt b/src/cmd/go/testdata/script/bug.txt index b9bbaaad335..571d507358a 100644 --- a/src/cmd/go/testdata/script/bug.txt +++ b/src/cmd/go/testdata/script/bug.txt @@ -1,9 +1,11 @@ # Verify that go bug creates the appropriate URL issue body [!linux] skip +[short] skip go install -env BROWSER=$GOPATH/bin/browser +go build -o $TMPDIR/go ./go +env BROWSER=$GOPATH/bin/browser PATH=$TMPDIR:$PATH go bug exists $TMPDIR/browser grep '^go version' $TMPDIR/browser @@ -44,3 +46,13 @@ func main() { } } +-- go/main.go -- +package main + +import ( + "os" +) + +func main() { + os.Exit(1) +} diff --git a/src/cmd/go/testdata/script/build_overlay.txt b/src/cmd/go/testdata/script/build_overlay.txt index b11cd960141..2932b94e6c4 100644 --- a/src/cmd/go/testdata/script/build_overlay.txt +++ b/src/cmd/go/testdata/script/build_overlay.txt @@ -238,7 +238,7 @@ void say_hello(); void say_hello() { puts("hello cgo\n"); fflush(stdout); } -- m/overlay/asm_gc.s -- -// +build !gccgo +// +build gc TEXT ·foo(SB),0,$0 RET diff --git a/src/cmd/go/testdata/script/build_tag_goexperiment.txt b/src/cmd/go/testdata/script/build_tag_goexperiment.txt index 26ad029845e..bee218f4c1f 100644 --- a/src/cmd/go/testdata/script/build_tag_goexperiment.txt +++ b/src/cmd/go/testdata/script/build_tag_goexperiment.txt @@ -1,85 +1,20 @@ -# compile_ext will fail if the buildtags that are enabled (or not enabled) for the -# framepointer and fieldtrack experiments are not consistent with the value of -# GOEXPSTRING (which comes from objabi.Expstring()). - [short] skip +# Reset all experiments so fieldtrack is definitely off. +env GOEXPERIMENT=none go run m - --- expt_main.go -- -package main - -import ( - "os" - "strings" -) - -func main() { - fp() - ft() -} - -func hasExpEntry(s string) bool { - // script_test.go defines GOEXPSTRING to be the value of - // objabi.Expstring(), which gives the enabled experiments baked into the - // toolchain. - g := os.Getenv("GOEXPSTRING") - for _, f := range strings.Split(g, ",") { - if f == s { - return true - } - } - return false -} - --- fp_off.go -- -// +build !goexperiment.framepointer - -package main - -import ( - "fmt" - "os" -) - -func fp() { - if hasExpEntry("framepointer") { - fmt.Println("in !framepointer build, but objabi.Expstring() has 'framepointer'") - os.Exit(1) - } -} - --- fp_on.go -- -// +build goexperiment.framepointer - -package main - -import ( - "fmt" - "os" -) - -func fp() { - if !hasExpEntry("framepointer") { - fmt.Println("in framepointer build, but objabi.Expstring() does not have 'framepointer', is", os.Getenv("GOEXPSTRING")) - os.Exit(1) - } -} +stderr 'fieldtrack off' +# Turn fieldtrack on. +env GOEXPERIMENT=none,fieldtrack +go run m +stderr 'fieldtrack on' -- ft_off.go -- // +build !goexperiment.fieldtrack package main -import ( - "fmt" - "os" -) - -func ft() { - if hasExpEntry("fieldtrack") { - fmt.Println("in !fieldtrack build, but objabi.Expstring() has 'fieldtrack'") - os.Exit(1) - } +func main() { + println("fieldtrack off") } -- ft_on.go -- @@ -87,16 +22,8 @@ func ft() { package main -import ( - "fmt" - "os" -) - -func ft() { - if !hasExpEntry("fieldtrack") { - fmt.Println("in fieldtrack build, but objabi.Expstring() does not have 'fieldtrack', is", os.Getenv("GOEXPSTRING")) - os.Exit(1) - } +func main() { + println("fieldtrack on") } -- go.mod -- diff --git a/src/cmd/go/testdata/script/cover_pkgall_imports.txt b/src/cmd/go/testdata/script/cover_pkgall_imports.txt new file mode 100644 index 00000000000..4e51726b29c --- /dev/null +++ b/src/cmd/go/testdata/script/cover_pkgall_imports.txt @@ -0,0 +1,48 @@ +# This test checks that -coverpkg=all can be used +# when the package pattern includes packages +# which only have tests. +# Verifies golang.org/issue/27333, golang.org/issue/43242. + +[short] skip +cd $GOPATH/src/example.com/cov + +env GO111MODULE=on +go test -coverpkg=all ./... + +env GO111MODULE=off +go test -coverpkg=all ./... + +-- $GOPATH/src/example.com/cov/go.mod -- +module example.com/cov + +-- $GOPATH/src/example.com/cov/notest/notest.go -- +package notest + +func Foo() {} + +-- $GOPATH/src/example.com/cov/onlytest/onlytest_test.go -- +package onlytest_test + +import ( + "testing" + + "example.com/cov/notest" +) + +func TestFoo(t *testing.T) { + notest.Foo() +} + +-- $GOPATH/src/example.com/cov/withtest/withtest.go -- +package withtest + +func Bar() {} + +-- $GOPATH/src/example.com/cov/withtest/withtest_test.go -- +package withtest + +import "testing" + +func TestBar(t *testing.T) { + Bar() +} diff --git a/src/cmd/go/testdata/script/embed.txt b/src/cmd/go/testdata/script/embed.txt index 6ad42e9cd18..04b17cd62b3 100644 --- a/src/cmd/go/testdata/script/embed.txt +++ b/src/cmd/go/testdata/script/embed.txt @@ -107,3 +107,4 @@ import _ "m" -- go.mod -- module m +go 1.16 diff --git a/src/cmd/go/testdata/script/env_write.txt b/src/cmd/go/testdata/script/env_write.txt index bda1e57826e..4fa39df1044 100644 --- a/src/cmd/go/testdata/script/env_write.txt +++ b/src/cmd/go/testdata/script/env_write.txt @@ -173,3 +173,9 @@ go env -w GOOS=linux GOARCH=mips env GOOS=windows ! go env -u GOOS stderr 'unsupported GOOS/GOARCH.*windows/mips$' + +# go env -w should reject relative paths in GOMODCACHE environment. +! go env -w GOMODCACHE=~/test +stderr 'go env -w: GOMODCACHE entry is relative; must be absolute path: "~/test"' +! go env -w GOMODCACHE=./test +stderr 'go env -w: GOMODCACHE entry is relative; must be absolute path: "./test"' diff --git a/src/cmd/go/testdata/script/fmt_load_errors.txt b/src/cmd/go/testdata/script/fmt_load_errors.txt index 297ec0fe3c7..84bf41cfbaf 100644 --- a/src/cmd/go/testdata/script/fmt_load_errors.txt +++ b/src/cmd/go/testdata/script/fmt_load_errors.txt @@ -6,6 +6,17 @@ go fmt -n exclude stdout 'exclude[/\\]x\.go' stdout 'exclude[/\\]x_linux\.go' +# Test edge cases with gofmt. +# Note that this execs GOROOT/bin/gofmt. + +! exec gofmt does-not-exist + +exec gofmt gofmt-dir/no-extension +stdout 'package x' + +exec gofmt gofmt-dir +! stdout 'package x' + -- exclude/empty/x.txt -- -- exclude/ignore/_x.go -- package x @@ -17,3 +28,5 @@ package x // +build windows package x +-- gofmt-dir/no-extension -- +package x diff --git a/src/cmd/go/testdata/script/generate.txt b/src/cmd/go/testdata/script/generate.txt index c3c563e5f4a..73f5bbd57a9 100644 --- a/src/cmd/go/testdata/script/generate.txt +++ b/src/cmd/go/testdata/script/generate.txt @@ -26,6 +26,10 @@ stdout 'yes' # flag.go should select yes go generate './generate/env_test.go' stdout 'main_test' +# Test go generate provides the right "$PWD" +go generate './generate/env_pwd.go' +stdout $WORK'[/\\]gopath[/\\]src[/\\]generate' + -- echo.go -- package main @@ -88,4 +92,8 @@ package p -- generate/env_test.go -- package main_test -//go:generate echo $GOPACKAGE \ No newline at end of file +//go:generate echo $GOPACKAGE +-- generate/env_pwd.go -- +package p + +//go:generate echo $PWD diff --git a/src/cmd/go/testdata/script/get_404_meta.txt b/src/cmd/go/testdata/script/get_404_meta.txt index b71cc7fe010..ec4f8d32432 100644 --- a/src/cmd/go/testdata/script/get_404_meta.txt +++ b/src/cmd/go/testdata/script/get_404_meta.txt @@ -3,9 +3,10 @@ [!net] skip [!exec:git] skip +env GONOSUMDB=bazil.org,github.com,golang.org env GO111MODULE=off -go get -d -insecure bazil.org/fuse/fs/fstestutil +go get -d bazil.org/fuse/fs/fstestutil env GO111MODULE=on env GOPROXY=direct -go get -d -insecure bazil.org/fuse/fs/fstestutil +go get -d bazil.org/fuse/fs/fstestutil diff --git a/src/cmd/go/testdata/script/get_insecure.txt b/src/cmd/go/testdata/script/get_insecure.txt index 36ad2c05b7b..69930f7107a 100644 --- a/src/cmd/go/testdata/script/get_insecure.txt +++ b/src/cmd/go/testdata/script/get_insecure.txt @@ -12,10 +12,12 @@ env GO111MODULE=off # GOPATH: Try go get -d of HTTP-only repo (should fail). ! go get -d insecure.go-get-issue-15410.appspot.com/pkg/p -# GOPATH: Try again with -insecure (should succeed). -go get -d -insecure insecure.go-get-issue-15410.appspot.com/pkg/p +# GOPATH: Try again with GOINSECURE (should succeed). +env GOINSECURE=insecure.go-get-issue-15410.appspot.com +go get -d insecure.go-get-issue-15410.appspot.com/pkg/p -# GOPATH: Try updating without -insecure (should fail). +# GOPATH: Try updating without GOINSECURE (should fail). +env GOINSECURE='' ! go get -d -u -f insecure.go-get-issue-15410.appspot.com/pkg/p # Modules: Set up @@ -29,10 +31,14 @@ env GOPROXY='' # Modules: Try go get -d of HTTP-only repo (should fail). ! go get -d insecure.go-get-issue-15410.appspot.com/pkg/p -# Modules: Try again with -insecure (should succeed). -go get -d -insecure insecure.go-get-issue-15410.appspot.com/pkg/p +# Modules: Try again with GOINSECURE (should succeed). +env GOINSECURE=insecure.go-get-issue-15410.appspot.com +env GONOSUMDB=insecure.go-get-issue-15410.appspot.com +go get -d insecure.go-get-issue-15410.appspot.com/pkg/p -# Modules: Try updating without -insecure (should fail). +# Modules: Try updating without GOINSECURE (should fail). +env GOINSECURE='' +env GONOSUMDB='' ! go get -d -u -f insecure.go-get-issue-15410.appspot.com/pkg/p go list -m ... @@ -48,4 +54,4 @@ func main() { os.Exit(1) } -- module_file -- -module m \ No newline at end of file +module m diff --git a/src/cmd/go/testdata/script/get_insecure_custom_domain.txt b/src/cmd/go/testdata/script/get_insecure_custom_domain.txt index a4a6fd428f7..7eba42e873e 100644 --- a/src/cmd/go/testdata/script/get_insecure_custom_domain.txt +++ b/src/cmd/go/testdata/script/get_insecure_custom_domain.txt @@ -3,4 +3,6 @@ env GO111MODULE=off ! go get -d insecure.go-get-issue-15410.appspot.com/pkg/p -go get -d -insecure insecure.go-get-issue-15410.appspot.com/pkg/p + +env GOINSECURE=insecure.go-get-issue-15410.appspot.com +go get -d insecure.go-get-issue-15410.appspot.com/pkg/p diff --git a/src/cmd/go/testdata/script/get_insecure_deprecated.txt b/src/cmd/go/testdata/script/get_insecure_deprecated.txt deleted file mode 100644 index 7f5f5c7877e..00000000000 --- a/src/cmd/go/testdata/script/get_insecure_deprecated.txt +++ /dev/null @@ -1,21 +0,0 @@ -# GOPATH: Set up -env GO111MODULE=off - -# GOPATH: Fetch without insecure, no warning -! go get test -! stderr 'go get: -insecure flag is deprecated; see ''go help get'' for details' - -# GOPATH: Fetch with insecure, should warn -! go get -insecure test -stderr 'go get: -insecure flag is deprecated; see ''go help get'' for details' - -# Modules: Set up -env GO111MODULE=on - -# Modules: Fetch without insecure, no warning -! go get test -! stderr 'go get: -insecure flag is deprecated; see ''go help get'' for details' - -# Modules: Fetch with insecure, should warn -! go get -insecure test -stderr 'go get: -insecure flag is deprecated; see ''go help get'' for details' diff --git a/src/cmd/go/testdata/script/get_insecure_no_longer_supported.txt b/src/cmd/go/testdata/script/get_insecure_no_longer_supported.txt new file mode 100644 index 00000000000..2517664dd02 --- /dev/null +++ b/src/cmd/go/testdata/script/get_insecure_no_longer_supported.txt @@ -0,0 +1,13 @@ +# GOPATH: Set up +env GO111MODULE=off + +# GOPATH: Fetch with insecure, should error +! go get -insecure test +stderr 'go get: -insecure flag is no longer supported; use GOINSECURE instead' + +# Modules: Set up +env GO111MODULE=on + +# Modules: Fetch with insecure, should error +! go get -insecure test +stderr 'go get: -insecure flag is no longer supported; use GOINSECURE instead' diff --git a/src/cmd/go/testdata/script/get_insecure_redirect.txt b/src/cmd/go/testdata/script/get_insecure_redirect.txt index 0478d1f75de..fb5f26951cd 100644 --- a/src/cmd/go/testdata/script/get_insecure_redirect.txt +++ b/src/cmd/go/testdata/script/get_insecure_redirect.txt @@ -1,4 +1,4 @@ -# golang.org/issue/29591: 'go get' was following plain-HTTP redirects even without -insecure. +# golang.org/issue/29591: 'go get' was following plain-HTTP redirects even without -insecure (now replaced by GOINSECURE). # golang.org/issue/34049: 'go get' would panic in case of an insecure redirect in GOPATH mode [!net] skip @@ -9,4 +9,5 @@ env GO111MODULE=off ! go get -d vcs-test.golang.org/insecure/go/insecure stderr 'redirected .* to insecure URL' -go get -d -insecure vcs-test.golang.org/insecure/go/insecure +env GOINSECURE=vcs-test.golang.org/insecure/go/insecure +go get -d vcs-test.golang.org/insecure/go/insecure diff --git a/src/cmd/go/testdata/script/get_insecure_update.txt b/src/cmd/go/testdata/script/get_insecure_update.txt index 4511c98c568..e1a1a23d47e 100644 --- a/src/cmd/go/testdata/script/get_insecure_update.txt +++ b/src/cmd/go/testdata/script/get_insecure_update.txt @@ -5,8 +5,10 @@ env GO111MODULE=off # Clone the repo via HTTP manually. exec git clone -q http://github.com/golang/example github.com/golang/example -# Update without -insecure should fail. -# Update with -insecure should succeed. +# Update without GOINSECURE should fail. # We need -f to ignore import comments. ! go get -d -u -f github.com/golang/example/hello -go get -d -u -f -insecure github.com/golang/example/hello + +# Update with GOINSECURE should succeed. +env GOINSECURE=github.com/golang/example/hello +go get -d -u -f github.com/golang/example/hello diff --git a/src/cmd/go/testdata/script/list_err_cycle.txt b/src/cmd/go/testdata/script/list_err_cycle.txt new file mode 100644 index 00000000000..44b82a62b0b --- /dev/null +++ b/src/cmd/go/testdata/script/list_err_cycle.txt @@ -0,0 +1,15 @@ +# Check that we don't get infinite recursion when loading a package with +# an import cycle and another error. Verifies #25830. +! go list +stderr 'found packages a \(a.go\) and b \(b.go\)' + +-- go.mod -- +module errcycle + +go 1.16 +-- a.go -- +package a + +import _ "errcycle" +-- b.go -- +package b \ No newline at end of file diff --git a/src/cmd/go/testdata/script/list_json_with_f.txt b/src/cmd/go/testdata/script/list_json_with_f.txt new file mode 100644 index 00000000000..2011a6e808b --- /dev/null +++ b/src/cmd/go/testdata/script/list_json_with_f.txt @@ -0,0 +1,20 @@ +[short] skip + +# list -json should generate output on stdout +go list -json ./... +stdout . +# list -f should generate output on stdout +go list -f '{{.}}' ./... +stdout . + +# test passing first -json then -f +! go list -json -f '{{.}}' ./... +stderr '^go list -f cannot be used with -json$' + +# test passing first -f then -json +! go list -f '{{.}}' -json ./... +stderr '^go list -f cannot be used with -json$' +-- go.mod -- +module m +-- list_test.go -- +package list_test diff --git a/src/cmd/go/testdata/script/list_load_err.txt b/src/cmd/go/testdata/script/list_load_err.txt index b3b72713e54..0cfa7fbed2f 100644 --- a/src/cmd/go/testdata/script/list_load_err.txt +++ b/src/cmd/go/testdata/script/list_load_err.txt @@ -2,26 +2,42 @@ # other files in the same package cause go/build.Import to return an error. # Verfifies golang.org/issue/38568 - go list -e -deps ./scan stdout m/want - go list -e -deps ./multi stdout m/want - go list -e -deps ./constraint stdout m/want - [cgo] go list -e -test -deps ./cgotest [cgo] stdout m/want - [cgo] go list -e -deps ./cgoflag [cgo] stdout m/want + +# go list -e should include files with errors in GoFiles, TestGoFiles, and +# other lists, assuming they match constraints. +# Verifies golang.org/issue/39986 +go list -e -f '{{range .GoFiles}}{{.}},{{end}}' ./scan +stdout '^good.go,scan.go,$' + +go list -e -f '{{range .GoFiles}}{{.}},{{end}}' ./multi +stdout '^a.go,b.go,$' + +go list -e -f '{{range .GoFiles}}{{.}},{{end}}' ./constraint +stdout '^good.go,$' +go list -e -f '{{range .IgnoredGoFiles}}{{.}},{{end}}' ./constraint +stdout '^constraint.go,$' + +[cgo] go list -e -f '{{range .XTestGoFiles}}{{.}},{{end}}' ./cgotest +[cgo] stdout '^cgo_test.go,$' + +[cgo] go list -e -f '{{range .GoFiles}}{{.}},{{end}}' ./cgoflag +[cgo] stdout '^cgoflag.go,$' + -- go.mod -- module m diff --git a/src/cmd/go/testdata/script/list_module_when_error.txt b/src/cmd/go/testdata/script/list_module_when_error.txt new file mode 100644 index 00000000000..844164cd6a2 --- /dev/null +++ b/src/cmd/go/testdata/script/list_module_when_error.txt @@ -0,0 +1,19 @@ +# The Module field should be populated even if there is an error loading the package. + +env GO111MODULE=on + +go list -e -f {{.Module}} +stdout '^mod.com$' + +-- go.mod -- +module mod.com + +go 1.16 + +-- blah.go -- +package blah + +import _ "embed" + +//go:embed README.md +var readme string diff --git a/src/cmd/go/testdata/script/list_std_stale.txt b/src/cmd/go/testdata/script/list_std_stale.txt new file mode 100644 index 00000000000..e5c1f334fd4 --- /dev/null +++ b/src/cmd/go/testdata/script/list_std_stale.txt @@ -0,0 +1,31 @@ +# https://golang.org/issue/44725: packages in std should not be reported as stale, +# regardless of whether they are listed from within or outside GOROOT/src. + +# Control case: net should not be stale at the start of the test, +# and should depend on vendor/golang.org/… instead of golang.org/…. + +! stale net + +go list -deps net +stdout '^vendor/golang.org/x/net' +! stdout '^golang.org/x/net' + +# Net should also not be stale when viewed from within GOROOT/src, +# and should still report the same package dependencies. + +cd $GOROOT/src +! stale net + +go list -deps net +stdout '^vendor/golang.org/x/net' +! stdout '^golang.org/x/net' + + +# However, 'go mod' and 'go get' subcommands should report the original module +# dependencies, not the vendored packages. + +[!net] stop + +env GOPROXY= +go mod why -m golang.org/x/net +stdout '^# golang.org/x/net\nnet\ngolang.org/x/net' diff --git a/src/cmd/go/testdata/script/list_swigcxx.txt b/src/cmd/go/testdata/script/list_swigcxx.txt new file mode 100644 index 00000000000..c6acd9ecdba --- /dev/null +++ b/src/cmd/go/testdata/script/list_swigcxx.txt @@ -0,0 +1,27 @@ +# go list should not report SWIG-generated C++ files in CompiledGoFiles. + +[!exec:swig] skip +[!exec:g++] skip + +# CompiledGoFiles should contain 4 files: +# a.go +# a.swigcxx.go +# _cgo_gotypes.go +# a.cgo1.go + +go list -f '{{.CompiledGoFiles}}' -compiled=true example/swig + +# These names we see here, other than a.go, will be from the build cache, +# so we just count them. +stdout a\.go +stdout -count=3 $GOCACHE + +-- go.mod -- +module example + +go 1.16 + +-- swig/a.go -- +package swig + +-- swig/a.swigcxx -- diff --git a/src/cmd/go/testdata/script/list_symlink_issue35941.txt b/src/cmd/go/testdata/script/list_symlink_issue35941.txt new file mode 100644 index 00000000000..eb12bde6cef --- /dev/null +++ b/src/cmd/go/testdata/script/list_symlink_issue35941.txt @@ -0,0 +1,18 @@ +[!symlink] skip +env GO111MODULE=off + +# Issue 35941: suppress symlink warnings when running 'go list all'. +symlink goproj/css -> $GOPATH/src/css + +go list all +! stderr 'warning: ignoring symlink' + +# Show symlink warnings when patterns contain '...'. +go list goproj/... +stderr 'warning: ignoring symlink' + +-- goproj/a.go -- +package a + +-- css/index.css -- +body {} diff --git a/src/cmd/go/testdata/script/mod_all.txt b/src/cmd/go/testdata/script/mod_all.txt index aac66292d66..090eeee22df 100644 --- a/src/cmd/go/testdata/script/mod_all.txt +++ b/src/cmd/go/testdata/script/mod_all.txt @@ -189,19 +189,22 @@ stdout '^example.com/main/testonly_test \[example.com/main/testonly.test\]$' rm vendor -# Convert all modules to go 1.16 to enable lazy loading. -go mod edit -go=1.16 a/go.mod -go mod edit -go=1.16 b/go.mod -go mod edit -go=1.16 c/go.mod -go mod edit -go=1.16 d/go.mod -go mod edit -go=1.16 q/go.mod -go mod edit -go=1.16 r/go.mod -go mod edit -go=1.16 s/go.mod -go mod edit -go=1.16 t/go.mod -go mod edit -go=1.16 u/go.mod -go mod edit -go=1.16 w/go.mod -go mod edit -go=1.16 x/go.mod -go mod edit -go=1.16 +# Convert all modules to go 1.17 to enable lazy loading. +go mod edit -go=1.17 a/go.mod +go mod edit -go=1.17 b/go.mod +go mod edit -go=1.17 c/go.mod +go mod edit -go=1.17 d/go.mod +go mod edit -go=1.17 q/go.mod +go mod edit -go=1.17 r/go.mod +go mod edit -go=1.17 s/go.mod +go mod edit -go=1.17 t/go.mod +go mod edit -go=1.17 u/go.mod +go mod edit -go=1.17 w/go.mod +go mod edit -go=1.17 x/go.mod +go mod edit -go=1.17 +cp go.mod go.mod.orig +go mod tidy +cmp go.mod go.mod.orig # With lazy loading, 'go list all' with neither -mod=vendor nor -test should # match -mod=vendor without -test in 1.15. @@ -282,20 +285,41 @@ stdout '^example.com/t_test \[example.com/t.test\]$' stdout '^example.com/u.test$' stdout '^example.com/u_test \[example.com/u.test\]$' +# 'go list -m all' should cover all of the modules providing packages in +# 'go list -test -deps all', but should exclude modules d and x, +# which are not relevant to the main module and are outside of the +# lazy-loading horizon. -# TODO(#36460): -# 'go list -m all' should exactly cover the packages in 'go list -test all'. +go list -m -f $MODFMT all +stdout -count=10 '^.' +stdout '^example.com/a$' +stdout '^example.com/b$' +stdout '^example.com/c$' +! stdout '^example.com/d$' +stdout '^example.com/main$' +stdout '^example.com/q$' +stdout '^example.com/r$' +stdout '^example.com/s$' +stdout '^example.com/t$' +stdout '^example.com/u$' +stdout '^example.com/w$' +! stdout '^example.com/x$' -- go.mod -- module example.com/main +// Note: this go.mod file initially specifies go 1.15, +// but includes some redundant roots so that it +// also already obeys the 1.17 lazy loading invariants. go 1.15 require ( example.com/a v0.1.0 example.com/b v0.1.0 example.com/q v0.1.0 + example.com/r v0.1.0 // indirect example.com/t v0.1.0 + example.com/u v0.1.0 // indirect ) replace ( diff --git a/src/cmd/go/testdata/script/mod_cache_dir.txt b/src/cmd/go/testdata/script/mod_cache_dir.txt new file mode 100644 index 00000000000..7284ccf8bab --- /dev/null +++ b/src/cmd/go/testdata/script/mod_cache_dir.txt @@ -0,0 +1,11 @@ +env GO111MODULE=on + +# Go should reject relative paths in GOMODCACHE environment. + +env GOMODCACHE="~/test" +! go get example.com/tools/cmd/hello +stderr 'must be absolute path' + +env GOMODCACHE="./test" +! go get example.com/tools/cmd/hello +stderr 'must be absolute path' diff --git a/src/cmd/go/testdata/script/mod_convert.txt b/src/cmd/go/testdata/script/mod_convert.txt new file mode 100644 index 00000000000..f60fe87637c --- /dev/null +++ b/src/cmd/go/testdata/script/mod_convert.txt @@ -0,0 +1,66 @@ +[short] skip +[!net] skip +[!exec:git] skip + +env GO111MODULE=on +env GOPROXY= +env GOSUMDB= + +go mod download github.com/docker/distribution@v0.0.0-20150410205453-85de3967aa93 +mkdir x/Godeps +cp $GOPATH/pkg/mod/github.com/docker/distribution@v0.0.0-20150410205453-85de3967aa93/Godeps/Godeps.json x/Godeps +cd x +go mod init github.com/docker/distribution +cmpenv go.mod go.mod.want + +go mod download github.com/fishy/gcsbucket@v0.0.0-20180217031846-618d60fe84e0 +cp $GOPATH/pkg/mod/github.com/fishy/gcsbucket@v0.0.0-20180217031846-618d60fe84e0/Gopkg.lock ../y +cd ../y +go mod init github.com/fishy/gcsbucket +cmpenv go.mod go.mod.want + +-- x/go.mod.want -- +module github.com/docker/distribution + +go $goversion + +require ( + github.com/AdRoll/goamz v0.0.0-20150130162828-d3664b76d905 + github.com/MSOpenTech/azure-sdk-for-go v0.0.0-20150323223030-d90753bcad2e + github.com/Sirupsen/logrus v0.7.3 + github.com/bugsnag/bugsnag-go v1.0.3-0.20141110184014-b1d153021fcd + github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b + github.com/bugsnag/panicwrap v0.0.0-20141110184334-e5f9854865b9 + github.com/codegangsta/cli v1.4.2-0.20150131031259-6086d7927ec3 + github.com/docker/docker v1.4.2-0.20150204013315-165ea5c158cf + github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 + github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7 + github.com/gorilla/context v0.0.0-20140604161150-14f550f51af5 + github.com/gorilla/handlers v0.0.0-20140825150757-0e84b7d810c1 + github.com/gorilla/mux v0.0.0-20140926153814-e444e69cbd2e + github.com/jlhawn/go-crypto v0.0.0-20150401213827-cd738dde20f0 + github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 + github.com/yvasiyarov/gorelic v0.0.7-0.20141212073537-a9bba5b9ab50 + github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f + golang.org/x/net v0.0.0-20150202051010-1dfe7915deaf + gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789 + gopkg.in/yaml.v2 v2.0.0-20150116202057-bef53efd0c76 +) +-- y/go.mod.want -- +module github.com/fishy/gcsbucket + +go $goversion + +require ( + cloud.google.com/go v0.18.0 + github.com/fishy/fsdb v0.0.0-20180217030800-5527ded01371 + github.com/golang/protobuf v1.0.0 + github.com/googleapis/gax-go v2.0.0+incompatible + golang.org/x/net v0.0.0-20180216171745-136a25c244d3 + golang.org/x/oauth2 v0.0.0-20180207181906-543e37812f10 + golang.org/x/text v0.3.1-0.20180208041248-4e4a3210bb54 + google.golang.org/api v0.0.0-20180217000815-c7a403bb5fe1 + google.golang.org/appengine v1.0.0 + google.golang.org/genproto v0.0.0-20180206005123-2b5a72b8730b + google.golang.org/grpc v1.10.0 +) diff --git a/src/cmd/go/testdata/script/mod_convert_dep.txt b/src/cmd/go/testdata/script/mod_convert_dep.txt index ad22aca5be8..875a836fd27 100644 --- a/src/cmd/go/testdata/script/mod_convert_dep.txt +++ b/src/cmd/go/testdata/script/mod_convert_dep.txt @@ -18,7 +18,7 @@ stdout '^m$' # Test that we ignore directories when trying to find alternate config files. cd $WORK/gopkgdir/x ! go list . -stderr 'cannot find main module' +stderr '^go: go.mod file not found in current directory or any parent directory; see ''go help modules''$' ! stderr 'Gopkg.lock' -- $WORK/test/Gopkg.lock -- diff --git a/src/cmd/go/testdata/script/mod_convert_tsv_insecure.txt b/src/cmd/go/testdata/script/mod_convert_tsv_insecure.txt index ddb0c081996..283e2d99366 100644 --- a/src/cmd/go/testdata/script/mod_convert_tsv_insecure.txt +++ b/src/cmd/go/testdata/script/mod_convert_tsv_insecure.txt @@ -1,4 +1,6 @@ env GO111MODULE=on +env GOPROXY=direct +env GOSUMDB=off [!net] skip [!exec:git] skip diff --git a/src/cmd/go/testdata/script/mod_deprecate_message.txt b/src/cmd/go/testdata/script/mod_deprecate_message.txt new file mode 100644 index 00000000000..4a0674b8084 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_deprecate_message.txt @@ -0,0 +1,73 @@ +# When there is a short single-line message, 'go get' should print it all. +go get -d short +stderr '^go: warning: module short is deprecated: short$' +go list -m -u -f '{{.Deprecated}}' short +stdout '^short$' + +# When there is a multi-line message, 'go get' should print the first line. +go get -d multiline +stderr '^go: warning: module multiline is deprecated: first line$' +! stderr 'second line' +go list -m -u -f '{{.Deprecated}}' multiline +stdout '^first line\nsecond line.$' + +# When there is a long message, 'go get' should print a placeholder. +go get -d long +stderr '^go: warning: module long is deprecated: \(message omitted: too long\)$' +go list -m -u -f '{{.Deprecated}}' long +stdout '^aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa$' + +# When a message contains unprintable chracters, 'go get' should say that +# without printing the message. +go get -d unprintable +stderr '^go: warning: module unprintable is deprecated: \(message omitted: contains non-printable characters\)$' +go list -m -u -f '{{.Deprecated}}' unprintable +stdout '^message contains ASCII BEL\x07$' + +-- go.mod -- +module use + +go 1.16 + +require ( + short v0.0.0 + multiline v0.0.0 + long v0.0.0 + unprintable v0.0.0 +) + +replace ( + short v0.0.0 => ./short + multiline v0.0.0 => ./multiline + long v0.0.0 => ./long + unprintable v0.0.0 => ./unprintable +) +-- short/go.mod -- +// Deprecated: short +module short + +go 1.16 +-- short/short.go -- +package short +-- multiline/go.mod -- +// Deprecated: first line +// second line. +module multiline + +go 1.16 +-- multiline/multiline.go -- +package multiline +-- long/go.mod -- +// Deprecated: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +module long + +go 1.16 +-- long/long.go -- +package long +-- unprintable/go.mod -- +// Deprecated: message contains ASCII BEL +module unprintable + +go 1.16 +-- unprintable/unprintable.go -- +package unprintable diff --git a/src/cmd/go/testdata/script/mod_edit.txt b/src/cmd/go/testdata/script/mod_edit.txt index d7e681e8313..5aa5ca1ffc0 100644 --- a/src/cmd/go/testdata/script/mod_edit.txt +++ b/src/cmd/go/testdata/script/mod_edit.txt @@ -16,9 +16,9 @@ cmpenv go.mod $WORK/go.mod.init cmpenv go.mod $WORK/go.mod.init # go mod edits -go mod edit -droprequire=x.1 -require=x.1@v1.0.0 -require=x.2@v1.1.0 -droprequire=x.2 -exclude='x.1 @ v1.2.0' -exclude=x.1@v1.2.1 -replace=x.1@v1.3.0=y.1@v1.4.0 -replace='x.1@v1.4.0 = ../z' -retract=v1.6.0 -retract=[v1.1.0,v1.2.0] -retract=[v1.3.0,v1.4.0] -retract=v1.0.0 +go mod edit -droprequire=x.1 -require=x.1@v1.0.0 -require=x.2@v1.1.0 -droprequire=x.2 -exclude='x.1 @ v1.2.0' -exclude=x.1@v1.2.1 -exclude=x.1@v2.0.0+incompatible -replace=x.1@v1.3.0=y.1@v1.4.0 -replace='x.1@v1.4.0 = ../z' -retract=v1.6.0 -retract=[v1.1.0,v1.2.0] -retract=[v1.3.0,v1.4.0] -retract=v1.0.0 cmpenv go.mod $WORK/go.mod.edit1 -go mod edit -droprequire=x.1 -dropexclude=x.1@v1.2.1 -dropreplace=x.1@v1.3.0 -require=x.3@v1.99.0 -dropretract=v1.0.0 -dropretract=[v1.1.0,v1.2.0] +go mod edit -droprequire=x.1 -dropexclude=x.1@v1.2.1 -dropexclude=x.1@v2.0.0+incompatible -dropreplace=x.1@v1.3.0 -require=x.3@v1.99.0 -dropretract=v1.0.0 -dropretract=[v1.1.0,v1.2.0] cmpenv go.mod $WORK/go.mod.edit2 # -exclude and -retract reject invalid versions. @@ -27,6 +27,17 @@ stderr '^go mod: -exclude=example.com/m@bad: version "bad" invalid: must be of t ! go mod edit -retract=bad stderr '^go mod: -retract=bad: version "bad" invalid: must be of the form v1.2.3$' +! go mod edit -exclude=example.com/m@v2.0.0 +stderr '^go mod: -exclude=example.com/m@v2\.0\.0: version "v2\.0\.0" invalid: should be v2\.0\.0\+incompatible \(or module example\.com/m/v2\)$' + +! go mod edit -exclude=example.com/m/v2@v1.0.0 +stderr '^go mod: -exclude=example.com/m/v2@v1\.0\.0: version "v1\.0\.0" invalid: should be v2, not v1$' + +! go mod edit -exclude=gopkg.in/example.v1@v2.0.0 +stderr '^go mod: -exclude=gopkg\.in/example\.v1@v2\.0\.0: version "v2\.0\.0" invalid: should be v1, not v2$' + +cmpenv go.mod $WORK/go.mod.edit2 + # go mod edit -json go mod edit -json cmpenv stdout $WORK/go.mod.json @@ -35,6 +46,10 @@ cmpenv stdout $WORK/go.mod.json go mod edit -json $WORK/go.mod.retractrationale cmp stdout $WORK/go.mod.retractrationale.json +# go mod edit -json (deprecation) +go mod edit -json $WORK/go.mod.deprecation +cmp stdout $WORK/go.mod.deprecation.json + # go mod edit -json (empty mod file) go mod edit -json $WORK/go.mod.empty cmp stdout $WORK/go.mod.empty.json @@ -88,6 +103,7 @@ require x.1 v1.0.0 exclude ( x.1 v1.2.0 x.1 v1.2.1 + x.1 v2.0.0+incompatible ) replace ( @@ -278,6 +294,20 @@ retract ( } ] } +-- $WORK/go.mod.deprecation -- +// Deprecated: and the new one is not ready yet +module m +-- $WORK/go.mod.deprecation.json -- +{ + "Module": { + "Path": "m", + "Deprecated": "and the new one is not ready yet" + }, + "Require": null, + "Exclude": null, + "Replace": null, + "Retract": null +} -- $WORK/go.mod.empty -- -- $WORK/go.mod.empty.json -- { diff --git a/src/cmd/go/testdata/script/mod_empty_err.txt b/src/cmd/go/testdata/script/mod_empty_err.txt index 982e6b2e518..c4359bccccf 100644 --- a/src/cmd/go/testdata/script/mod_empty_err.txt +++ b/src/cmd/go/testdata/script/mod_empty_err.txt @@ -1,4 +1,4 @@ -# This test checks error messages for non-existant packages in module mode. +# This test checks error messages for non-existent packages in module mode. # Veries golang.org/issue/35414 env GO111MODULE=on cd $WORK diff --git a/src/cmd/go/testdata/script/mod_find.txt b/src/cmd/go/testdata/script/mod_find.txt index 9468acfd33d..1e01973ff41 100644 --- a/src/cmd/go/testdata/script/mod_find.txt +++ b/src/cmd/go/testdata/script/mod_find.txt @@ -49,7 +49,7 @@ rm go.mod # Test that we ignore directories when trying to find go.mod. cd $WORK/gomoddir ! go list . -stderr 'cannot find main module' +stderr '^go: go.mod file not found in current directory or any parent directory; see ''go help modules''$' [!symlink] stop diff --git a/src/cmd/go/testdata/script/mod_get_deprecate_install.txt b/src/cmd/go/testdata/script/mod_get_deprecate_install.txt new file mode 100644 index 00000000000..d832b5f2e80 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_get_deprecate_install.txt @@ -0,0 +1,22 @@ +[short] skip + +env GO111MODULE=on + +# 'go get' outside a module with an executable prints a deprecation message. +go get example.com/cmd/a +stderr '^go get: installing executables with ''go get'' in module mode is deprecated.$' +stderr 'Use ''go install pkg@version'' instead.' + + +go mod init m + +# 'go get' inside a module with a non-main package does not print a message. +# This will stop building in the future, but it's the command we want to use. +go get rsc.io/quote +! stderr deprecated + +# 'go get' inside a module with an executable prints a different +# deprecation message. +go get example.com/cmd/a +stderr '^go get: installing executables with ''go get'' in module mode is deprecated.$' +stderr 'To adjust and download dependencies of the current module, use ''go get -d''' diff --git a/src/cmd/go/testdata/script/mod_get_deprecated.txt b/src/cmd/go/testdata/script/mod_get_deprecated.txt new file mode 100644 index 00000000000..4633009f69b --- /dev/null +++ b/src/cmd/go/testdata/script/mod_get_deprecated.txt @@ -0,0 +1,66 @@ +# 'go get pkg' should not show a deprecation message for an unrelated module. +go get -d ./use/nothing +! stderr 'module.*is deprecated' + +# 'go get pkg' should show a deprecation message for the module providing pkg. +go get -d example.com/deprecated/a +stderr '^go: warning: module example.com/deprecated/a is deprecated: in example.com/deprecated/a@v1.9.0$' +go get -d example.com/deprecated/a@v1.0.0 +stderr '^go: warning: module example.com/deprecated/a is deprecated: in example.com/deprecated/a@v1.9.0$' + +# 'go get pkg' should show a deprecation message for a module providing +# packages directly imported by pkg. +go get -d ./use/a +stderr '^go: warning: module example.com/deprecated/a is deprecated: in example.com/deprecated/a@v1.9.0$' + +# 'go get pkg' may show a deprecation message for an indirectly required module +# if it provides a package named on the command line. +go get -d ./use/b +! stderr 'module.*is deprecated' +go get -d local/use +! stderr 'module.*is deprecated' +go get -d example.com/deprecated/b +stderr '^go: warning: module example.com/deprecated/b is deprecated: in example.com/deprecated/b@v1.9.0$' + +# 'go get pkg' does not show a deprecation message for a module providing a +# directly imported package if the module is no longer deprecated in its +# latest version, even if the module is deprecated in its current version. +go get -d ./use/undeprecated +! stderr 'module.*is deprecated' + +-- go.mod -- +module m + +go 1.17 + +require ( + example.com/deprecated/a v1.0.0 + example.com/undeprecated v1.0.0 + local v0.0.0 +) + +replace local v0.0.0 => ./local +-- use/nothing/nothing.go -- +package nothing +-- use/a/a.go -- +package a + +import _ "example.com/deprecated/a" +-- use/b/b.go -- +package b + +import _ "local/use" +-- use/undeprecated/undeprecated.go -- +package undeprecated + +import _ "example.com/undeprecated" +-- local/go.mod -- +module local + +go 1.17 + +require example.com/deprecated/b v1.0.0 +-- local/use/use.go -- +package use + +import _ "example.com/deprecated/b" diff --git a/src/cmd/go/testdata/script/mod_get_downgrade.txt b/src/cmd/go/testdata/script/mod_get_downgrade.txt index a954c10344b..c26c5e1c210 100644 --- a/src/cmd/go/testdata/script/mod_get_downgrade.txt +++ b/src/cmd/go/testdata/script/mod_get_downgrade.txt @@ -20,8 +20,8 @@ stdout 'rsc.io/quote v1.5.1' stdout 'rsc.io/sampler v1.3.0' ! go get -d rsc.io/sampler@v1.0.0 rsc.io/quote@v1.5.2 golang.org/x/text@none +stderr -count=1 '^go get:' stderr '^go get: rsc.io/quote@v1.5.2 requires rsc.io/sampler@v1.3.0, not rsc.io/sampler@v1.0.0$' -stderr '^go get: rsc.io/quote@v1.5.2 requires golang.org/x/text@v0.0.0-20170915032832-14c0d48ead0c, not golang.org/x/text@none$' go list -m all stdout 'rsc.io/quote v1.5.1' diff --git a/src/cmd/go/testdata/script/mod_get_downup_artifact.txt b/src/cmd/go/testdata/script/mod_get_downup_artifact.txt index b35d4c4fd03..c20583b22a1 100644 --- a/src/cmd/go/testdata/script/mod_get_downup_artifact.txt +++ b/src/cmd/go/testdata/script/mod_get_downup_artifact.txt @@ -61,14 +61,8 @@ go list -m all stdout '^example.com/a v0.1.0 ' stdout '^example.com/b v0.1.0 ' stdout '^example.com/c v0.1.0 ' - - # BUG: d should remain at v0.1.0, because it is not transitively imported by a - # with b@v0.1.0. Today, it is spuriously upgraded to v0.2.0. -stdout '^example.com/d v0.2.0 ' - - # BUG: e should not be added, because it is not transitively imported by a - # with b@v0.1.0. Today, it is spuriously added. -stdout '^example.com/e v0.1.0 ' +stdout '^example.com/d v0.1.0 ' +! stdout '^example.com/e ' -- go.mod -- module example.com/m diff --git a/src/cmd/go/testdata/script/mod_get_downup_pseudo_artifact.txt b/src/cmd/go/testdata/script/mod_get_downup_pseudo_artifact.txt new file mode 100644 index 00000000000..c49615cecb3 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_get_downup_pseudo_artifact.txt @@ -0,0 +1,129 @@ +# This test illustrates a case where an upgrade–downgrade–upgrade cycle could +# add extraneous dependencies due to another module depending on an +# otherwise-unlisted version (such as a pseudo-version). +# +# This case corresponds to the "downhiddenartifact" test in the mvs package. + +# The initial package import graph used in the test looks like: +# +# a --- b +# \ \ +# \ \ +# c --- d +# +# The module dependency graph initially looks like: +# +# a --- b.3 +# \ \ +# \ \ +# c.2 --- d.2 +# +# c.1 --- b.2 (pseudo) +# +# b.1 --- e.1 + +cp go.mod go.mod.orig +go mod tidy +cmp go.mod.orig go.mod + +# When we downgrade d.2 to d.1, no dependency on e should be added +# because nothing else in the module or import graph requires it. +go get -d example.net/d@v0.1.0 + +go list -m all +stdout '^example.net/b v0.2.1-0.20210219000000-000000000000 ' +stdout '^example.net/c v0.1.0 ' +stdout '^example.net/d v0.1.0 ' +! stdout '^example.net/e ' + +-- go.mod -- +module example.net/a + +go 1.16 + +require ( + example.net/b v0.3.0 + example.net/c v0.2.0 +) + +replace ( + example.net/b v0.1.0 => ./b1 + example.net/b v0.2.1-0.20210219000000-000000000000 => ./b2 + example.net/b v0.3.0 => ./b3 + example.net/c v0.1.0 => ./c1 + example.net/c v0.2.0 => ./c2 + example.net/d v0.1.0 => ./d + example.net/d v0.2.0 => ./d + example.net/e v0.1.0 => ./e +) +-- a.go -- +package a + +import ( + _ "example.net/b" + _ "example.net/c" +) + +-- b1/go.mod -- +module example.net/b + +go 1.16 + +require example.net/e v0.1.0 +-- b1/b.go -- +package b + +import _ "example.net/e" + +-- b2/go.mod -- +module example.net/b + +go 1.16 +-- b2/b.go -- +package b + +-- b3/go.mod -- +module example.net/b + +go 1.16 + +require example.net/d v0.2.0 +-- b3/b.go -- +package b + +import _ "example.net/d" +-- c1/go.mod -- +module example.net/c + +go 1.16 + +require example.net/b v0.2.1-0.20210219000000-000000000000 +-- c1/c.go -- +package c + +import _ "example.net/b" + +-- c2/go.mod -- +module example.net/c + +go 1.16 + +require example.net/d v0.2.0 +-- c2/c.go -- +package c + +import _ "example.net/d" + +-- d/go.mod -- +module example.net/d + +go 1.16 +-- d/d.go -- +package d + +-- e/go.mod -- +module example.net/e + +go 1.16 +-- e/e.go -- +package e diff --git a/src/cmd/go/testdata/script/mod_get_insecure_redirect.txt b/src/cmd/go/testdata/script/mod_get_insecure_redirect.txt index 3755f176332..2e128344955 100644 --- a/src/cmd/go/testdata/script/mod_get_insecure_redirect.txt +++ b/src/cmd/go/testdata/script/mod_get_insecure_redirect.txt @@ -1,4 +1,4 @@ -# golang.org/issue/29591: 'go get' was following plain-HTTP redirects even without -insecure. +# golang.org/issue/29591: 'go get' was following plain-HTTP redirects even without -insecure (now replaced by GOINSECURE). [!net] skip [!exec:git] skip @@ -10,8 +10,6 @@ env GOSUMDB=off ! go get -d vcs-test.golang.org/insecure/go/insecure stderr 'redirected .* to insecure URL' -go get -d -insecure vcs-test.golang.org/insecure/go/insecure - # insecure host env GOINSECURE=vcs-test.golang.org go clean -modcache diff --git a/src/cmd/go/testdata/script/mod_get_missing_ziphash.txt b/src/cmd/go/testdata/script/mod_get_missing_ziphash.txt new file mode 100644 index 00000000000..789d42d24db --- /dev/null +++ b/src/cmd/go/testdata/script/mod_get_missing_ziphash.txt @@ -0,0 +1,55 @@ +# Test that if the module cache contains an extracted source directory but not +# a ziphash, 'go build' complains about a missing sum, and 'go get' adds +# the sum. Verifies #44749. + +# With a tidy go.sum, go build succeeds. This also populates the module cache. +cp go.sum.tidy go.sum +go build -n use +env GOPROXY=off +env GOSUMDB=off + +# Control case: if we delete the hash for rsc.io/quote v1.5.2, +# 'go build' reports an error. 'go get' adds the sum. +cp go.sum.bug go.sum +! go build -n use +stderr '^use.go:3:8: missing go.sum entry for module providing package rsc.io/quote \(imported by use\); to add:\n\tgo get use$' +go get -d use +cmp go.sum go.sum.tidy +go build -n use + +# If we delete the hash *and* the ziphash file, we should see the same behavior. +cp go.sum.bug go.sum +rm $WORK/gopath/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.ziphash +! go build -n use +stderr '^use.go:3:8: missing go.sum entry for module providing package rsc.io/quote \(imported by use\); to add:\n\tgo get use$' +go get -d use +cmp go.sum go.sum.tidy +go build -n use + +-- go.mod -- +module use + +go 1.16 + +require rsc.io/quote v1.5.2 +-- go.sum.tidy -- +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c h1:pvCbr/wm8HzDD3fVywevekufpn6tCGPY3spdHeZJEsw= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +rsc.io/quote v1.5.2 h1:3fEykkD9k7lYzXqCYrwGAf7iNhbk4yCjHmKBN9td4L0= +rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0= +rsc.io/sampler v1.3.0 h1:HLGR/BgEtI3r0uymSP/nl2uPLsUnNJX8toRyhfpBTII= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +rsc.io/testonly v1.0.0 h1:K/VWHdO+Jv7woUXG0GzVNx1czBXUt3Ib1deaMn+xk64= +rsc.io/testonly v1.0.0/go.mod h1:OqmGbIFOcF+XrFReLOGZ6BhMM7uMBiQwZsyNmh74SzY= +-- go.sum.bug -- +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c h1:pvCbr/wm8HzDD3fVywevekufpn6tCGPY3spdHeZJEsw= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0= +rsc.io/sampler v1.3.0 h1:HLGR/BgEtI3r0uymSP/nl2uPLsUnNJX8toRyhfpBTII= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +rsc.io/testonly v1.0.0 h1:K/VWHdO+Jv7woUXG0GzVNx1czBXUt3Ib1deaMn+xk64= +rsc.io/testonly v1.0.0/go.mod h1:OqmGbIFOcF+XrFReLOGZ6BhMM7uMBiQwZsyNmh74SzY= +-- use.go -- +package use + +import _ "rsc.io/quote" diff --git a/src/cmd/go/testdata/script/mod_get_pkgtags.txt b/src/cmd/go/testdata/script/mod_get_pkgtags.txt index c0a57f3fab2..0c79ec71b7b 100644 --- a/src/cmd/go/testdata/script/mod_get_pkgtags.txt +++ b/src/cmd/go/testdata/script/mod_get_pkgtags.txt @@ -16,6 +16,7 @@ go mod edit -droprequire example.net/tools # error out if dependencies of tag-guarded files are missing. go get -d example.net/tools@v0.1.0 +! stderr 'no Go source files' ! go list example.net/tools stderr '^package example.net/tools: build constraints exclude all Go files in .*[/\\]tools$' @@ -30,6 +31,19 @@ go list -deps example.net/cmd/tool stderr '^no required module provides package example.net/missing; to add it:\n\tgo get example.net/missing$' +# https://golang.org/issue/33526: 'go get' without '-d' should succeed +# for a module whose root is a constrained-out package. +# +# Ideally it should silently succeed, but today it logs the "no Go source files" +# error and succeeds anyway. + +go get example.net/tools@v0.1.0 +! stderr . + +! go build example.net/tools +stderr '^package example.net/tools: build constraints exclude all Go files in .*[/\\]tools$' + + # https://golang.org/issue/29268 # 'go get' should fetch modules whose roots contain test-only packages, but # without the -t flag shouldn't error out if the test has missing dependencies. diff --git a/src/cmd/go/testdata/script/mod_get_private_vcs.txt b/src/cmd/go/testdata/script/mod_get_private_vcs.txt index 514b0a7a531..75c776a7fa2 100644 --- a/src/cmd/go/testdata/script/mod_get_private_vcs.txt +++ b/src/cmd/go/testdata/script/mod_get_private_vcs.txt @@ -9,3 +9,35 @@ env GOPROXY=direct stderr 'Confirm the import path was entered correctly.' stderr 'If this is a private repository, see https://golang.org/doc/faq#git_https for additional information.' ! stdout . + +# Fetching a nonexistent commit should return an "unknown revision" +# error message. +! go get github.com/golang/term@86186f3aba07ed0212cfb944f3398997d2d07c6b +stderr '^go get: github.com/golang/term@86186f3aba07ed0212cfb944f3398997d2d07c6b: invalid version: unknown revision 86186f3aba07ed0212cfb944f3398997d2d07c6b$' +! stdout . + +! go get github.com/golang/nonexist@master +stderr '^Confirm the import path was entered correctly.$' +stderr '^If this is a private repository, see https://golang.org/doc/faq#git_https for additional information.$' +! stderr 'unknown revision' +! stdout . + +[!linux] stop # Needs XDG_CONFIG_HOME. +[!exec:false] stop + +# Test that Git clone errors will be shown to the user instead of a generic +# "unknown revision" error. To do this we want to force git ls-remote to return +# an error we don't already have special handling for. See golang/go#42751. +# +# Set XDG_CONFIG_HOME to tell Git where to look for the git config file listed +# below, which turns on ssh. +env XDG_CONFIG_HOME=$TMPDIR +env GIT_SSH_COMMAND=false +! go install github.com/golang/nonexist@master +stderr 'fatal: Could not read from remote repository.' +! stderr 'unknown revision' +! stdout . + +-- $TMPDIR/git/config -- +[url "git@github.com:"] + insteadOf = https://github.com/ diff --git a/src/cmd/go/testdata/script/mod_get_promote_implicit.txt b/src/cmd/go/testdata/script/mod_get_promote_implicit.txt index 10ca6594e4c..9eec2013210 100644 --- a/src/cmd/go/testdata/script/mod_get_promote_implicit.txt +++ b/src/cmd/go/testdata/script/mod_get_promote_implicit.txt @@ -6,7 +6,7 @@ cp go.mod.orig go.mod go list -m indirect-with-pkg stdout '^indirect-with-pkg v1.0.0 => ./indirect-with-pkg$' ! go list ./use-indirect -stderr '^go: m/use-indirect: package indirect-with-pkg imported from implicitly required module; to add missing requirements, run:\n\tgo get indirect-with-pkg@v1.0.0$' +stderr '^package m/use-indirect imports indirect-with-pkg from implicitly required module; to add missing requirements, run:\n\tgo get indirect-with-pkg@v1.0.0$' # We can promote the implicit requirement by getting the importing package. # NOTE: the hint recommends getting the imported package (tested below) since diff --git a/src/cmd/go/testdata/script/mod_get_retract.txt b/src/cmd/go/testdata/script/mod_get_retract.txt index fe0ac886295..560fa7bfb25 100644 --- a/src/cmd/go/testdata/script/mod_get_retract.txt +++ b/src/cmd/go/testdata/script/mod_get_retract.txt @@ -11,7 +11,7 @@ cp go.mod.orig go.mod go mod edit -require example.com/retract/self/prev@v1.9.0 go get -d example.com/retract/self/prev stderr '^go: warning: example.com/retract/self/prev@v1.9.0: retracted by module author: self$' -stderr '^go: to switch to the latest unretracted version, run:\n\tgo get example.com/retract/self/prev@latest$' +stderr '^go: to switch to the latest unretracted version, run:\n\tgo get example.com/retract/self/prev@latest\n$' go list -m example.com/retract/self/prev stdout '^example.com/retract/self/prev v1.9.0$' diff --git a/src/cmd/go/testdata/script/mod_go_version_missing.txt b/src/cmd/go/testdata/script/mod_go_version_missing.txt new file mode 100644 index 00000000000..aca36a04506 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_go_version_missing.txt @@ -0,0 +1,123 @@ +cp go.mod go.mod.orig + +# For modules whose go.mod file does not include a 'go' directive, +# we assume the language and dependency semantics of Go 1.16, +# but do not trigger “automatic vendoring” mode (-mod=vendor), +# which was added in Go 1.14 and was not triggered +# under the same conditions in Go 1.16 (which would instead +# default to -mod=readonly when no 'go' directive is present). + +# For Go 1.16 modules, 'all' should prune out dependencies of tests, +# even if the 'go' directive is missing. + +go list -mod=readonly all +stdout '^example.com/dep$' +! stdout '^example.com/testdep$' +cp stdout list-1.txt +cmp go.mod go.mod.orig + +# We should only default to -mod=vendor if the 'go' directive is explicit in the +# go.mod file. Otherwise, we don't actually know whether the module was written +# against Go 1.11 or 1.16. We would have to update the go.mod file to clarify, +# and as of Go 1.16 we don't update the go.mod file by default. +# +# If we set -mod=vendor explicitly, we shouldn't apply the Go 1.14 +# consistency check, because — again — we don't know whether we're in a 1.11 +# module or a bad-script-edited 1.16 module. + +! go list -mod=vendor all +! stderr '^go: inconsistent vendoring' +stderr 'cannot find package "\." in:\n\t.*[/\\]vendor[/\\]example.com[/\\]badedit$' + +# When we set -mod=mod, the go version should be updated immediately, +# to the current version, converting the requirements from eager to lazy. +# +# Since we don't know which requirements are actually relevant to the main +# module, all requirements are added as roots, making the requirements untidy. + +go list -mod=mod all +! stdout '^example.com/testdep$' +cmp stdout list-1.txt +cmpenv go.mod go.mod.untidy + +go mod tidy +cmpenv go.mod go.mod.tidy + +# On the other hand, if we jump straight to 'go mod tidy', +# the requirements remain tidy from the start. + +cp go.mod.orig go.mod +go mod tidy +cmpenv go.mod go.mod.tidy + + +# The updated version should have been written back to go.mod, so now the 'go' +# directive is explicit. -mod=vendor should trigger by default, and the stronger +# Go 1.14 consistency check should apply. +! go list all +stderr '^go: inconsistent vendoring' +! stderr badedit + + +-- go.mod -- +module example.com/m + +require example.com/dep v0.1.0 + +replace ( + example.com/dep v0.1.0 => ./dep + example.com/testdep v0.1.0 => ./testdep +) +-- go.mod.untidy -- +module example.com/m + +go $goversion + +require ( + example.com/dep v0.1.0 + example.com/testdep v0.1.0 // indirect +) + +replace ( + example.com/dep v0.1.0 => ./dep + example.com/testdep v0.1.0 => ./testdep +) +-- go.mod.tidy -- +module example.com/m + +go $goversion + +require example.com/dep v0.1.0 + +replace ( + example.com/dep v0.1.0 => ./dep + example.com/testdep v0.1.0 => ./testdep +) +-- vendor/example.com/dep/dep.go -- +package dep +import _ "example.com/badedit" +-- vendor/modules.txt -- +HAHAHA this is broken. + +-- m.go -- +package m + +import _ "example.com/dep" + +const x = 1_000 + +-- dep/go.mod -- +module example.com/dep + +require example.com/testdep v0.1.0 +-- dep/dep.go -- +package dep +-- dep/dep_test.go -- +package dep_test + +import _ "example.com/testdep" + +-- testdep/go.mod -- +module example.com/testdep +-- testdep/testdep.go -- +package testdep diff --git a/src/cmd/go/testdata/script/mod_gomodcache.txt b/src/cmd/go/testdata/script/mod_gomodcache.txt index b2143e20939..74a3c79622f 100644 --- a/src/cmd/go/testdata/script/mod_gomodcache.txt +++ b/src/cmd/go/testdata/script/mod_gomodcache.txt @@ -47,6 +47,11 @@ env GOMODCACHE=$WORK/modcache go mod download rsc.io/quote@v1.0.0 exists $WORK/modcache/cache/download/rsc.io/quote/@v/v1.0.0.info +# Test error when cannot create GOMODCACHE directory +env GOMODCACHE=$WORK/modcachefile +! go install example.com/cmd/a@v1.0.0 +stderr 'go: could not create module cache' + # Test that the following work even with GO111MODULE=off env GO111MODULE=off @@ -58,3 +63,5 @@ go clean -modcache -- go.mod -- module m + +-- $WORK/modcachefile -- diff --git a/src/cmd/go/testdata/script/mod_indirect_nospace.txt b/src/cmd/go/testdata/script/mod_indirect_nospace.txt new file mode 100644 index 00000000000..f4fb6a8c1b5 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_indirect_nospace.txt @@ -0,0 +1,32 @@ +# https://golang.org/issue/45932: "indirect" comments missing spaces +# should not be corrupted when the comment is removed. + +go mod tidy +cmp go.mod go.mod.direct + +-- go.mod -- +module example.net/m + +go 1.16 + +require example.net/x v0.1.0 //indirect + +replace example.net/x v0.1.0 => ./x +-- go.mod.direct -- +module example.net/m + +go 1.16 + +require example.net/x v0.1.0 + +replace example.net/x v0.1.0 => ./x +-- m.go -- +package m +import _ "example.net/x" + +-- x/go.mod -- +module example.net/x + +go 1.16 +-- x/x.go -- +package x diff --git a/src/cmd/go/testdata/script/mod_init_dep.txt b/src/cmd/go/testdata/script/mod_init_dep.txt index f8cf1d563ab..76b48678604 100644 --- a/src/cmd/go/testdata/script/mod_init_dep.txt +++ b/src/cmd/go/testdata/script/mod_init_dep.txt @@ -1,10 +1,6 @@ env GO111MODULE=on env GOFLAGS=-mod=mod -# modconv uses git directly to examine what old 'go get' would -[!net] skip -[!exec:git] skip - # go mod init should populate go.mod from Gopkg.lock go mod init x stderr 'copying requirements from Gopkg.lock' diff --git a/src/cmd/go/testdata/script/mod_init_glide.txt b/src/cmd/go/testdata/script/mod_init_glide.txt index a351a6ae4bc..373810c7687 100644 --- a/src/cmd/go/testdata/script/mod_init_glide.txt +++ b/src/cmd/go/testdata/script/mod_init_glide.txt @@ -3,6 +3,7 @@ env GO111MODULE=on env GOPROXY=direct +env GOSUMDB= # Regression test for golang.org/issue/32161: # 'go mod init' did not locate tags when resolving a commit to a pseudo-version. diff --git a/src/cmd/go/testdata/script/mod_install_pkg_version.txt b/src/cmd/go/testdata/script/mod_install_pkg_version.txt index e27ebc5cc50..fd02392af1b 100644 --- a/src/cmd/go/testdata/script/mod_install_pkg_version.txt +++ b/src/cmd/go/testdata/script/mod_install_pkg_version.txt @@ -16,7 +16,7 @@ env GO111MODULE=auto cd m cp go.mod go.mod.orig ! go list -m all -stderr '^go: example.com/cmd@v1.1.0-doesnotexist: missing go.sum entry; to add it:\n\tgo mod download example.com/cmd$' +stderr '^go list -m: example.com/cmd@v1.1.0-doesnotexist: missing go.sum entry; to add it:\n\tgo mod download example.com/cmd$' go install example.com/cmd/a@latest cmp go.mod go.mod.orig exists $GOPATH/bin/a$GOEXE @@ -59,17 +59,17 @@ rm $GOPATH/bin env GO111MODULE=on go mod download rsc.io/fortune@v1.0.0 ! go install $GOPATH/pkg/mod/rsc.io/fortune@v1.0.0 -stderr '^go: cannot find main module; see ''go help modules''$' +stderr '^go: go\.mod file not found in current directory or any parent directory; see ''go help modules''$' ! go install ../pkg/mod/rsc.io/fortune@v1.0.0 -stderr '^go: cannot find main module; see ''go help modules''$' +stderr '^go: go\.mod file not found in current directory or any parent directory; see ''go help modules''$' mkdir tmp cd tmp go mod init tmp go mod edit -require=rsc.io/fortune@v1.0.0 ! go install -mod=readonly $GOPATH/pkg/mod/rsc.io/fortune@v1.0.0 -stderr '^go: rsc.io/fortune@v1.0.0: missing go.sum entry; to add it:\n\tgo mod download rsc.io/fortune$' +stderr '^missing go\.sum entry for module providing package rsc\.io/fortune; to add:\n\tgo mod download rsc\.io/fortune$' ! go install -mod=readonly ../../pkg/mod/rsc.io/fortune@v1.0.0 -stderr '^go: rsc.io/fortune@v1.0.0: missing go.sum entry; to add it:\n\tgo mod download rsc.io/fortune$' +stderr '^missing go\.sum entry for module providing package rsc\.io/fortune; to add:\n\tgo mod download rsc\.io/fortune$' go get -d rsc.io/fortune@v1.0.0 go install -mod=readonly $GOPATH/pkg/mod/rsc.io/fortune@v1.0.0 exists $GOPATH/bin/fortune$GOEXE @@ -81,17 +81,17 @@ env GO111MODULE=auto # 'go install pkg@version' reports errors for meta packages, std packages, # and directories. ! go install std@v1.0.0 -stderr '^go install std@v1.0.0: argument must be a package path, not a meta-package$' +stderr '^go install: std@v1.0.0: argument must be a package path, not a meta-package$' ! go install fmt@v1.0.0 -stderr '^go install fmt@v1.0.0: argument must not be a package in the standard library$' +stderr '^go install: fmt@v1.0.0: argument must not be a package in the standard library$' ! go install example.com//cmd/a@v1.0.0 -stderr '^go install example.com//cmd/a@v1.0.0: argument must be a clean package path$' +stderr '^go install: example.com//cmd/a@v1.0.0: argument must be a clean package path$' ! go install example.com/cmd/a@v1.0.0 ./x@v1.0.0 -stderr '^go install ./x@v1.0.0: argument must be a package path, not a relative path$' +stderr '^go install: ./x@v1.0.0: argument must be a package path, not a relative path$' ! go install example.com/cmd/a@v1.0.0 $GOPATH/src/x@v1.0.0 -stderr '^go install '$WORK'[/\\]gopath/src/x@v1.0.0: argument must be a package path, not an absolute path$' +stderr '^go install: '$WORK'[/\\]gopath/src/x@v1.0.0: argument must be a package path, not an absolute path$' ! go install example.com/cmd/a@v1.0.0 cmd/...@v1.0.0 -stderr '^go install: package cmd/go not provided by module example.com/cmd@v1.0.0$' +stderr '^package cmd/go not provided by module example.com/cmd@v1.0.0$' # 'go install pkg@version' should accept multiple arguments but report an error # if the version suffixes are different, even if they refer to the same version. @@ -106,19 +106,19 @@ stdout '^example.com/cmd v1.0.0$' env GO111MODULE=auto ! go install example.com/cmd/a@v1.0.0 example.com/cmd/b@latest -stderr '^go install example.com/cmd/b@latest: all arguments must have the same version \(@v1.0.0\)$' +stderr '^go install: example.com/cmd/b@latest: all arguments must have the same version \(@v1.0.0\)$' # 'go install pkg@version' should report an error if the arguments are in # different modules. ! go install example.com/cmd/a@v1.0.0 rsc.io/fortune@v1.0.0 -stderr '^go install: package rsc.io/fortune provided by module rsc.io/fortune@v1.0.0\n\tAll packages must be provided by the same module \(example.com/cmd@v1.0.0\).$' +stderr '^package rsc.io/fortune provided by module rsc.io/fortune@v1.0.0\n\tAll packages must be provided by the same module \(example.com/cmd@v1.0.0\).$' # 'go install pkg@version' should report an error if an argument is not # a main package. ! go install example.com/cmd/a@v1.0.0 example.com/cmd/err@v1.0.0 -stderr '^go install: package example.com/cmd/err is not a main package$' +stderr '^package example.com/cmd/err is not a main package$' # Wildcards should match only main packages. This module has a non-main package # with an error, so we'll know if that gets built. @@ -137,13 +137,13 @@ rm $GOPATH/bin # If a wildcard matches no packages, we should see a warning. ! go install example.com/cmd/nomatch...@v1.0.0 -stderr '^go install example.com/cmd/nomatch\.\.\.@v1.0.0: module example.com/cmd@v1.0.0 found, but does not contain packages matching example.com/cmd/nomatch\.\.\.$' +stderr '^go install: example.com/cmd/nomatch\.\.\.@v1.0.0: module example.com/cmd@v1.0.0 found, but does not contain packages matching example.com/cmd/nomatch\.\.\.$' go install example.com/cmd/a@v1.0.0 example.com/cmd/nomatch...@v1.0.0 stderr '^go: warning: "example.com/cmd/nomatch\.\.\." matched no packages$' # If a wildcard matches only non-main packges, we should see a different warning. go install example.com/cmd/err...@v1.0.0 -stderr '^go: warning: "example.com/cmd/err\.\.\." matched no main packages$' +stderr '^go: warning: "example.com/cmd/err\.\.\." matched only non-main packages$' # 'go install pkg@version' should report errors if the module contains @@ -159,7 +159,7 @@ cmp stderr exclude-err # 'go install pkg@version' should report an error if the module requires a # higher version of itself. ! go install example.com/cmd/a@v1.0.0-newerself -stderr '^go install example.com/cmd/a@v1.0.0-newerself: version constraints conflict:\n\texample.com/cmd@v1.0.0-newerself requires example.com/cmd@v1.0.0, but example.com/cmd@v1.0.0-newerself is requested$' +stderr '^go install: example.com/cmd/a@v1.0.0-newerself: version constraints conflict:\n\texample.com/cmd@v1.0.0-newerself requires example.com/cmd@v1.0.0, but example.com/cmd@v1.0.0-newerself is requested$' # 'go install pkg@version' will only match a retracted version if it's @@ -192,12 +192,12 @@ package main func main() {} -- replace-err -- -go install example.com/cmd/a@v1.0.0-replace: example.com/cmd@v1.0.0-replace +go install: example.com/cmd/a@v1.0.0-replace (in example.com/cmd@v1.0.0-replace): The go.mod file for the module providing named packages contains one or more replace directives. It must not contain directives that would cause it to be interpreted differently than if it were the main module. -- exclude-err -- -go install example.com/cmd/a@v1.0.0-exclude: example.com/cmd@v1.0.0-exclude +go install: example.com/cmd/a@v1.0.0-exclude (in example.com/cmd@v1.0.0-exclude): The go.mod file for the module providing named packages contains one or more exclude directives. It must not contain directives that would cause it to be interpreted differently than if it were the main module. diff --git a/src/cmd/go/testdata/script/mod_invalid_path.txt b/src/cmd/go/testdata/script/mod_invalid_path.txt index 667828839fd..c8c075daaef 100644 --- a/src/cmd/go/testdata/script/mod_invalid_path.txt +++ b/src/cmd/go/testdata/script/mod_invalid_path.txt @@ -23,6 +23,20 @@ cd $WORK/gopath/src/badname ! go list . stderr 'invalid module path' +# Test that an import path containing an element with a leading dot is valid, +# but such a module path is not. +# Verifies #43985. +cd $WORK/gopath/src/dotname +go list ./.dot +stdout '^example.com/dotname/.dot$' +go list ./use +stdout '^example.com/dotname/use$' +! go list -m example.com/dotname/.dot@latest +stderr '^go list -m: example.com/dotname/.dot@latest: malformed module path "example.com/dotname/.dot": leading dot in path element$' +go get -d example.com/dotname/.dot +go get -d example.com/dotname/use +go mod tidy + -- mod/go.mod -- -- mod/foo.go -- @@ -38,3 +52,13 @@ module .\. -- badname/foo.go -- package badname +-- dotname/go.mod -- +module example.com/dotname + +go 1.16 +-- dotname/.dot/dot.go -- +package dot +-- dotname/use/use.go -- +package use + +import _ "example.com/dotname/.dot" diff --git a/src/cmd/go/testdata/script/mod_invalid_path_dotname.txt b/src/cmd/go/testdata/script/mod_invalid_path_dotname.txt new file mode 100644 index 00000000000..85934332d14 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_invalid_path_dotname.txt @@ -0,0 +1,46 @@ +# Test that an import path containing an element with a leading dot +# in another module is valid. + +# 'go get' works with no version query. +cp go.mod.empty go.mod +go get -d example.com/dotname/.dot +go list -m example.com/dotname +stdout '^example.com/dotname v1.0.0$' + +# 'go get' works with a version query. +cp go.mod.empty go.mod +go get -d example.com/dotname/.dot@latest +go list -m example.com/dotname +stdout '^example.com/dotname v1.0.0$' + +# 'go get' works on an importing package. +cp go.mod.empty go.mod +go get -d . +go list -m example.com/dotname +stdout '^example.com/dotname v1.0.0$' + +# 'go list' works on the dotted package. +go list example.com/dotname/.dot +stdout '^example.com/dotname/.dot$' + +# 'go list' works on an importing package. +go list . +stdout '^m$' + +# 'go mod tidy' works. +cp go.mod.empty go.mod +go mod tidy +go list -m example.com/dotname +stdout '^example.com/dotname v1.0.0$' + +-- go.mod.empty -- +module m + +go 1.16 +-- go.sum -- +example.com/dotname v1.0.0 h1:Q0JMAn464CnwFVCshs1n4+f5EFiW/eRhnx/fTWjw2Ag= +example.com/dotname v1.0.0/go.mod h1:7K4VLT7QylRI8H7yZwUkeDH2s19wQnyfp/3oBlItWJ0= +-- use.go -- +package use + +import _ "example.com/dotname/.dot" diff --git a/src/cmd/go/testdata/script/mod_invalid_path_plus.txt b/src/cmd/go/testdata/script/mod_invalid_path_plus.txt new file mode 100644 index 00000000000..51dbf936888 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_invalid_path_plus.txt @@ -0,0 +1,36 @@ +# https://golang.org/issue/44776 +# The '+' character should be disallowed in module paths, but allowed in package +# paths within valid modules. + +# 'go list' accepts package paths with pluses. +cp go.mod.orig go.mod +go get -d example.net/cmd +go list example.net/cmd/x++ + +# 'go list -m' rejects module paths with pluses. +! go list -versions -m 'example.net/bad++' +stderr '^go list -m: malformed module path "example.net/bad\+\+": invalid char ''\+''$' + +# 'go get' accepts package paths with pluses. +cp go.mod.orig go.mod +go get -d example.net/cmd/x++ +go list -m example.net/cmd +stdout '^example.net/cmd v0.0.0-00010101000000-000000000000 => ./cmd$' + +-- go.mod.orig -- +module example.com/m + +go 1.16 + +replace ( + example.net/cmd => ./cmd +) + +-- cmd/go.mod -- +module example.net/cmd + +go 1.16 +-- cmd/x++/main.go -- +package main + +func main() {} diff --git a/src/cmd/go/testdata/script/mod_invalid_version.txt b/src/cmd/go/testdata/script/mod_invalid_version.txt index 43b95643561..34d9c47674e 100644 --- a/src/cmd/go/testdata/script/mod_invalid_version.txt +++ b/src/cmd/go/testdata/script/mod_invalid_version.txt @@ -19,7 +19,7 @@ cp go.mod.orig go.mod go mod edit -require golang.org/x/text@14c0d48ead0c cd outside ! go list -m golang.org/x/text -stderr 'go: example.com@v0.0.0 \(replaced by \./\..\): parsing ../go.mod: '$WORK'/gopath/src/go.mod:5: require golang.org/x/text: version "14c0d48ead0c" invalid: must be of the form v1.2.3' +stderr 'go list -m: example.com@v0.0.0 \(replaced by \./\..\): parsing ../go.mod: '$WORK'/gopath/src/go.mod:5: require golang.org/x/text: version "14c0d48ead0c" invalid: must be of the form v1.2.3' cd .. go list -m golang.org/x/text stdout 'golang.org/x/text v0.1.1-0.20170915032832-14c0d48ead0c' @@ -30,7 +30,7 @@ cp go.mod.orig go.mod go mod edit -require golang.org/x/text/unicode@v0.0.0-20170915032832-14c0d48ead0c cd outside ! go list -m golang.org/x/text -stderr 'go: example.com@v0.0.0 requires\n\tgolang.org/x/text/unicode@v0.0.0-20170915032832-14c0d48ead0c: invalid version: missing golang.org/x/text/unicode/go.mod at revision 14c0d48ead0c' +stderr 'go list -m: example.com@v0.0.0 requires\n\tgolang.org/x/text/unicode@v0.0.0-20170915032832-14c0d48ead0c: invalid version: missing golang.org/x/text/unicode/go.mod at revision 14c0d48ead0c' cd .. ! go list -m golang.org/x/text stderr 'golang.org/x/text/unicode@v0.0.0-20170915032832-14c0d48ead0c: invalid version: missing golang.org/x/text/unicode/go.mod at revision 14c0d48ead0c' @@ -47,7 +47,7 @@ cp go.mod.orig go.mod go mod edit -require golang.org/x/text@v2.1.1-0.20170915032832-14c0d48ead0c cd outside ! go list -m golang.org/x/text -stderr 'go: example.com@v0.0.0 \(replaced by \./\.\.\): parsing ../go.mod: '$WORK'/gopath/src/go.mod:5: require golang.org/x/text: version "v2.1.1-0.20170915032832-14c0d48ead0c" invalid: should be v0 or v1, not v2' +stderr 'go list -m: example.com@v0.0.0 \(replaced by \./\.\.\): parsing ../go.mod: '$WORK'/gopath/src/go.mod:5: require golang.org/x/text: version "v2.1.1-0.20170915032832-14c0d48ead0c" invalid: should be v0 or v1, not v2' cd .. ! go list -m golang.org/x/text stderr $WORK'/gopath/src/go.mod:5: require golang.org/x/text: version "v2.1.1-0.20170915032832-14c0d48ead0c" invalid: should be v0 or v1, not v2' @@ -57,7 +57,7 @@ cp go.mod.orig go.mod go mod edit -require golang.org/x/text@v0.1.1-0.20170915032832-14c0d48ead0 cd outside ! go list -m golang.org/x/text -stderr 'go: example.com@v0.0.0 requires\n\tgolang.org/x/text@v0.1.1-0.20170915032832-14c0d48ead0: invalid pseudo-version: revision is shorter than canonical \(14c0d48ead0c\)' +stderr 'go list -m: example.com@v0.0.0 requires\n\tgolang.org/x/text@v0.1.1-0.20170915032832-14c0d48ead0: invalid pseudo-version: revision is shorter than canonical \(14c0d48ead0c\)' cd .. ! go list -m golang.org/x/text stderr 'golang.org/x/text@v0.1.1-0.20170915032832-14c0d48ead0: invalid pseudo-version: revision is shorter than canonical \(14c0d48ead0c\)' @@ -67,7 +67,7 @@ cp go.mod.orig go.mod go mod edit -require golang.org/x/text@v0.1.1-0.20170915032832-14c0d48ead0cd47e3104ada247d91be04afc7a5a cd outside ! go list -m golang.org/x/text -stderr 'go: example.com@v0.0.0 requires\n\tgolang.org/x/text@v0.1.1-0.20170915032832-14c0d48ead0cd47e3104ada247d91be04afc7a5a: invalid pseudo-version: revision is longer than canonical \(14c0d48ead0c\)' +stderr 'go list -m: example.com@v0.0.0 requires\n\tgolang.org/x/text@v0.1.1-0.20170915032832-14c0d48ead0cd47e3104ada247d91be04afc7a5a: invalid pseudo-version: revision is longer than canonical \(14c0d48ead0c\)' cd .. ! go list -m golang.org/x/text stderr 'golang.org/x/text@v0.1.1-0.20170915032832-14c0d48ead0cd47e3104ada247d91be04afc7a5a: invalid pseudo-version: revision is longer than canonical \(14c0d48ead0c\)' @@ -77,7 +77,7 @@ cp go.mod.orig go.mod go mod edit -require golang.org/x/text@v0.1.1-0.20190915032832-14c0d48ead0c cd outside ! go list -m golang.org/x/text -stderr 'go: example.com@v0.0.0 requires\n\tgolang.org/x/text@v0.1.1-0.20190915032832-14c0d48ead0c: invalid pseudo-version: does not match version-control timestamp \(expected 20170915032832\)' +stderr 'go list -m: example.com@v0.0.0 requires\n\tgolang.org/x/text@v0.1.1-0.20190915032832-14c0d48ead0c: invalid pseudo-version: does not match version-control timestamp \(expected 20170915032832\)' cd .. ! go list -m golang.org/x/text stderr 'golang.org/x/text@v0.1.1-0.20190915032832-14c0d48ead0c: invalid pseudo-version: does not match version-control timestamp \(expected 20170915032832\)' @@ -87,7 +87,7 @@ stderr 'golang.org/x/text@v0.1.1-0.20190915032832-14c0d48ead0c: invalid pseudo-v go mod edit -replace golang.org/x/text@v0.1.1-0.20190915032832-14c0d48ead0c=golang.org/x/text@14c0d48ead0c cd outside ! go list -m golang.org/x/text -stderr 'go: example.com@v0.0.0 requires\n\tgolang.org/x/text@v0.1.1-0.20190915032832-14c0d48ead0c: invalid pseudo-version: does not match version-control timestamp \(expected 20170915032832\)' +stderr 'go list -m: example.com@v0.0.0 requires\n\tgolang.org/x/text@v0.1.1-0.20190915032832-14c0d48ead0c: invalid pseudo-version: does not match version-control timestamp \(expected 20170915032832\)' cd .. go list -m golang.org/x/text stdout 'golang.org/x/text v0.1.1-0.20190915032832-14c0d48ead0c => golang.org/x/text v0.1.1-0.20170915032832-14c0d48ead0c' @@ -97,7 +97,7 @@ cp go.mod.orig go.mod go mod edit -require golang.org/x/text@v1.999.999-0.20170915032832-14c0d48ead0c cd outside ! go list -m golang.org/x/text -stderr 'go: example.com@v0.0.0 requires\n\tgolang.org/x/text@v1.999.999-0.20170915032832-14c0d48ead0c: invalid pseudo-version: preceding tag \(v1.999.998\) not found' +stderr 'go list -m: example.com@v0.0.0 requires\n\tgolang.org/x/text@v1.999.999-0.20170915032832-14c0d48ead0c: invalid pseudo-version: preceding tag \(v1.999.998\) not found' cd .. ! go list -m golang.org/x/text stderr 'golang.org/x/text@v1.999.999-0.20170915032832-14c0d48ead0c: invalid pseudo-version: preceding tag \(v1.999.998\) not found' @@ -109,7 +109,7 @@ cp go.mod.orig go.mod go mod edit -require golang.org/x/text@v1.0.0-20170915032832-14c0d48ead0c cd outside ! go list -m golang.org/x/text -stderr 'go: example.com@v0.0.0 requires\n\tgolang.org/x/text@v1.0.0-20170915032832-14c0d48ead0c: invalid pseudo-version: major version without preceding tag must be v0, not v1' +stderr 'go list -m: example.com@v0.0.0 requires\n\tgolang.org/x/text@v1.0.0-20170915032832-14c0d48ead0c: invalid pseudo-version: major version without preceding tag must be v0, not v1' cd .. ! go list -m golang.org/x/text stderr 'golang.org/x/text@v1.0.0-20170915032832-14c0d48ead0c: invalid pseudo-version: major version without preceding tag must be v0, not v1' @@ -120,7 +120,7 @@ cp go.mod.orig go.mod go mod edit -require golang.org/x/text@v0.0.0-0.20170915032832-14c0d48ead0c cd outside ! go list -m golang.org/x/text -stderr 'go: example.com@v0.0.0 requires\n\tgolang.org/x/text@v0.0.0-0.20170915032832-14c0d48ead0c: invalid pseudo-version: version before v0.0.0 would have negative patch number' +stderr 'go list -m: example.com@v0.0.0 requires\n\tgolang.org/x/text@v0.0.0-0.20170915032832-14c0d48ead0c: invalid pseudo-version: version before v0.0.0 would have negative patch number' cd .. ! go list -m golang.org/x/text stderr 'golang.org/x/text@v0.0.0-0.20170915032832-14c0d48ead0c: invalid pseudo-version: version before v0.0.0 would have negative patch number' @@ -130,7 +130,7 @@ stderr 'golang.org/x/text@v0.0.0-0.20170915032832-14c0d48ead0c: invalid pseudo-v go mod edit -replace golang.org/x/text@v0.0.0-0.20170915032832-14c0d48ead0c=golang.org/x/text@v0.0.0-20170915032832-14c0d48ead0c cd outside ! go list -m golang.org/x/text -stderr 'go: example.com@v0.0.0 requires\n\tgolang.org/x/text@v0.0.0-0.20170915032832-14c0d48ead0c: invalid pseudo-version: version before v0.0.0 would have negative patch number' +stderr 'go list -m: example.com@v0.0.0 requires\n\tgolang.org/x/text@v0.0.0-0.20170915032832-14c0d48ead0c: invalid pseudo-version: version before v0.0.0 would have negative patch number' cd .. go list -m golang.org/x/text stdout 'golang.org/x/text v0.0.0-0.20170915032832-14c0d48ead0c => golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c' @@ -153,7 +153,7 @@ cp go.mod.orig go.mod go mod edit -require golang.org/x/text@v0.2.1-0.20170915032832-14c0d48ead0c cd outside ! go list -m golang.org/x/text -stderr 'go: example.com@v0.0.0 requires\n\tgolang.org/x/text@v0.2.1-0.20170915032832-14c0d48ead0c: invalid pseudo-version: revision 14c0d48ead0c is not a descendent of preceding tag \(v0.2.0\)' +stderr 'go list -m: example.com@v0.0.0 requires\n\tgolang.org/x/text@v0.2.1-0.20170915032832-14c0d48ead0c: invalid pseudo-version: revision 14c0d48ead0c is not a descendent of preceding tag \(v0.2.0\)' cd .. ! go list -m golang.org/x/text stderr 'golang.org/x/text@v0.2.1-0.20170915032832-14c0d48ead0c: invalid pseudo-version: revision 14c0d48ead0c is not a descendent of preceding tag \(v0.2.0\)' @@ -163,7 +163,7 @@ cp go.mod.orig go.mod go mod edit -require golang.org/x/text@v0.2.1-0.20171213102548-c4d099d611ac cd outside ! go list -m golang.org/x/text -stderr 'go: example.com@v0.0.0 requires\n\tgolang.org/x/text@v0.2.1-0.20171213102548-c4d099d611ac: invalid pseudo-version: tag \(v0.2.0\) found on revision c4d099d611ac is already canonical, so should not be replaced with a pseudo-version derived from that tag' +stderr 'go list -m: example.com@v0.0.0 requires\n\tgolang.org/x/text@v0.2.1-0.20171213102548-c4d099d611ac: invalid pseudo-version: tag \(v0.2.0\) found on revision c4d099d611ac is already canonical, so should not be replaced with a pseudo-version derived from that tag' cd .. ! go list -m golang.org/x/text stderr 'golang.org/x/text@v0.2.1-0.20171213102548-c4d099d611ac: invalid pseudo-version: tag \(v0.2.0\) found on revision c4d099d611ac is already canonical, so should not be replaced with a pseudo-version derived from that tag' @@ -173,7 +173,7 @@ cp go.mod.orig go.mod go mod edit -require golang.org/x/text@v0.1.1-0.20170915032832-14c0d48ead0c+incompatible cd outside ! go list -m golang.org/x/text -stderr 'go: example.com@v0.0.0 requires\n\tgolang.org/x/text@v0.1.1-0.20170915032832-14c0d48ead0c\+incompatible: invalid version: \+incompatible suffix not allowed: major version v0 is compatible' +stderr 'go list -m: example.com@v0.0.0 requires\n\tgolang.org/x/text@v0.1.1-0.20170915032832-14c0d48ead0c\+incompatible: invalid version: \+incompatible suffix not allowed: major version v0 is compatible' cd .. ! go list -m golang.org/x/text stderr 'golang.org/x/text@v0.1.1-0.20170915032832-14c0d48ead0c\+incompatible: invalid version: \+incompatible suffix not allowed: major version v0 is compatible' @@ -194,7 +194,7 @@ cp go.mod.orig go.mod go mod edit -require github.com/pierrec/lz4@v2.0.9-0.20190209155647-9a39efadad3d+incompatible cd outside ! go list -m github.com/pierrec/lz4 -stderr 'go: example.com@v0.0.0 requires\n\tgithub.com/pierrec/lz4@v2.0.9-0.20190209155647-9a39efadad3d\+incompatible: invalid version: \+incompatible suffix not allowed: module contains a go.mod file, so semantic import versioning is required' +stderr 'go list -m: example.com@v0.0.0 requires\n\tgithub.com/pierrec/lz4@v2.0.9-0.20190209155647-9a39efadad3d\+incompatible: invalid version: \+incompatible suffix not allowed: module contains a go.mod file, so semantic import versioning is required' cd .. ! go list -m github.com/pierrec/lz4 stderr 'github.com/pierrec/lz4@v2.0.9-0.20190209155647-9a39efadad3d\+incompatible: invalid version: \+incompatible suffix not allowed: module contains a go.mod file, so semantic import versioning is required' diff --git a/src/cmd/go/testdata/script/mod_lazy_consistency.txt b/src/cmd/go/testdata/script/mod_lazy_consistency.txt new file mode 100644 index 00000000000..1bf3e31bfe0 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_lazy_consistency.txt @@ -0,0 +1,95 @@ +# If the root requirements in a lazy module are inconsistent +# (for example, due to a bad hand-edit or git merge), +# they can go unnoticed as long as the module with the violated +# requirement is not used. +# When we load a package from that module, we should spot-check its +# requirements and either emit an error or update the go.mod file. + +cp go.mod go.mod.orig + + +# If we load package x from x.1, we only check the requirements of x, +# which are fine: loading succeeds. + +go list -deps ./usex +stdout '^example.net/x$' +cmp go.mod go.mod.orig + + +# However, if we load needx2, we should load the requirements of needx2. +# Those requirements indicate x.2, not x.1, so the module graph is +# inconsistent and needs to be fixed. + +! go list -deps ./useneedx2 +stderr '^go: updates to go.mod needed; to update it:\n\tgo mod tidy$' + +! go list -deps example.net/needx2 +stderr '^go: updates to go.mod needed; to update it:\n\tgo mod tidy$' + + +# The command printed in the error message should fix the problem. + +go mod tidy +go list -deps ./useneedx2 +stdout '^example.net/m/useneedx2$' +stdout '^example.net/needx2$' +stdout '^example.net/x$' + +go list -m all +stdout '^example.net/needx2 v0\.1\.0 ' +stdout '^example.net/x v0\.2\.0 ' + + +-- go.mod -- +module example.net/m + +go 1.17 + +require ( + example.net/needx2 v0.1.0 + example.net/x v0.1.0 +) + +replace ( + example.net/needx2 v0.1.0 => ./needx2.1 + example.net/x v0.1.0 => ./x.1 + example.net/x v0.2.0 => ./x.2 +) +-- useneedx2/useneedx2.go -- +package useneedx2 + +import _ "example.net/needx2" +-- usex/usex.go -- +package usex + +import _ "example.net/x" + +-- x.1/go.mod -- +module example.com/x + +go 1.17 +-- x.1/x.go -- +package x + +-- x.2/go.mod -- +module example.com/x + +go 1.17 +-- x.2/x.go -- +package x + +const AddedInV2 = true + +-- needx2.1/go.mod -- +module example.com/x + +go 1.17 + +require example.net/x v0.2.0 +-- needx2.1/needx2.go -- +// Package needx2 needs x v0.2.0 or higher. +package needx2 + +import "example.net/x" + +var _ = x.AddedInV2 diff --git a/src/cmd/go/testdata/script/mod_lazy_downgrade.txt b/src/cmd/go/testdata/script/mod_lazy_downgrade.txt index 1e84820f811..2f815fef22f 100644 --- a/src/cmd/go/testdata/script/mod_lazy_downgrade.txt +++ b/src/cmd/go/testdata/script/mod_lazy_downgrade.txt @@ -1,5 +1,5 @@ # This test illustrates the interaction between lazy loading and downgrading in -# 'go get. +# 'go get'. # The package import graph used in this test looks like: # @@ -46,7 +46,7 @@ go list -m all # outside of the deepening scan should not affect the downgrade. cp go.mod.orig go.mod -go mod edit -go=1.16 +go mod edit -go=1.17 go list -m all stdout '^example.com/a v0.1.0 ' @@ -59,12 +59,50 @@ stdout '^example.com/a v0.1.0 ' stdout '^example.com/b v0.2.0 ' stdout '^example.com/c v0.1.0 ' +# At this point, b.2 is still an explicit root, so its dependency on c +# is still tracked, and it will still be downgraded away if we remove c. +# ('go get' never makes a root into a non-root. Only 'go mod tidy' does that.) + go get -d example.com/c@none go list -m all -! stdout '^example.com/a ' # TODO(#36460): example.com/a v0.1.0 -! stdout '^example.com/b ' # TODO(#36460): example.com/b v0.1.0 +! stdout '^example.com/a ' +! stdout '^example.com/b ' ! stdout '^example.com/c ' + +# This time, we drop the explicit 'b' root by downgrading it to v0.1.0 +# (the version required by a.1) and running 'go mod tidy'. +# It is still selected at v0.1.0 (as a dependency of a), +# but its dependency on c is now pruned from the module graph, so it doesn't +# result in any downgrades to b or a if we run 'go get c@none'. + +cp go.mod.orig go.mod +go mod edit -go=1.17 + +go list -m all +stdout '^example.com/a v0.1.0 ' +stdout '^example.com/b v0.3.0 ' +stdout '^example.com/c v0.2.0 ' + +go get -d example.com/c@v0.1.0 example.com/b@v0.1.0 +go list -m all +stdout '^example.com/a v0.1.0 ' +stdout '^example.com/b v0.1.0 ' +stdout '^example.com/c v0.1.0 ' + +go mod tidy +go list -m all +stdout '^example.com/a v0.1.0 ' +stdout '^example.com/b v0.1.0 ' +! stdout '^example.com/c ' + +go get -d example.com/c@none +go list -m all +stdout '^example.com/a v0.1.0' +stdout '^example.com/b v0.1.0' +! stdout '^example.com/c ' + + -- go.mod -- module example.com/lazy @@ -91,7 +129,7 @@ import _ "example.com/a" -- a/go.mod -- module example.com/a -go 1.15 +go 1.17 require example.com/b v0.1.0 -- a/a.go -- @@ -104,7 +142,7 @@ import _ "example.com/b" -- b1/go.mod -- module example.com/b -go 1.15 +go 1.17 require example.com/c v0.1.0 -- b1/b.go -- @@ -116,7 +154,7 @@ import _ "example.com/c" -- b2/go.mod -- module example.com/b -go 1.15 +go 1.17 require example.com/c v0.1.0 -- b2/b.go -- @@ -128,7 +166,7 @@ import _ "example.com/c" -- b3/go.mod -- module example.com/b -go 1.15 +go 1.17 require example.com/c v0.2.0 -- b3/b.go -- @@ -140,6 +178,6 @@ import _ "example.com/c" -- c/go.mod -- module example.com/c -go 1.15 +go 1.17 -- c/c.go -- package c diff --git a/src/cmd/go/testdata/script/mod_lazy_import_allmod.txt b/src/cmd/go/testdata/script/mod_lazy_import_allmod.txt index 4ad8cbf8ee2..3dc1515df26 100644 --- a/src/cmd/go/testdata/script/mod_lazy_import_allmod.txt +++ b/src/cmd/go/testdata/script/mod_lazy_import_allmod.txt @@ -53,8 +53,8 @@ stdout '^c v0.1.0 ' cp m.go.orig m.go cp go.mod.orig go.mod -go mod edit -go=1.16 -go mod edit -go=1.16 go.mod.new +go mod edit -go=1.17 +go mod edit -go=1.17 go.mod.new cp go.mod go.mod.orig go mod tidy @@ -63,14 +63,15 @@ cmp go.mod.orig go.mod go list -m all stdout '^a v0.1.0 ' stdout '^b v0.1.0 ' -stdout '^c v0.1.0 ' # TODO(#36460): This should be pruned out. +! stdout '^c ' -# After adding a new import of b/y, -# the import of c from b/y should again resolve to the version required by b. +# After adding a new direct import of b/y, +# the existing verison of b should be promoted to a root, +# bringing the version of c required by b into the build list. cp m.go.new m.go go mod tidy -cmp go.mod.new go.mod +cmp go.mod.lazy go.mod go list -m all stdout '^a v0.1.0 ' @@ -124,6 +125,23 @@ require ( b v0.1.0 ) +replace ( + a v0.1.0 => ./a1 + b v0.1.0 => ./b1 + c v0.1.0 => ./c1 + c v0.2.0 => ./c2 +) +-- go.mod.lazy -- +module m + +go 1.17 + +require ( + a v0.1.0 + b v0.1.0 + c v0.1.0 // indirect +) + replace ( a v0.1.0 => ./a1 b v0.1.0 => ./b1 @@ -133,7 +151,7 @@ replace ( -- a1/go.mod -- module a -go 1.16 +go 1.17 require b v0.1.0 -- a1/a.go -- @@ -145,7 +163,7 @@ import _ "b/x" -- b1/go.mod -- module b -go 1.16 +go 1.17 require c v0.1.0 -- b1/x/x.go -- @@ -161,7 +179,7 @@ func CVersion() string { -- c1/go.mod -- module c -go 1.16 +go 1.17 -- c1/c.go -- package c diff --git a/src/cmd/go/testdata/script/mod_lazy_new_import.txt b/src/cmd/go/testdata/script/mod_lazy_new_import.txt index 02935bf2365..86b14b64b6f 100644 --- a/src/cmd/go/testdata/script/mod_lazy_new_import.txt +++ b/src/cmd/go/testdata/script/mod_lazy_new_import.txt @@ -5,7 +5,7 @@ # # lazy ---- a/x ---- b # \ -# ---- a/y ---- c +# ---- a/y (new) ---- c # # Where a/x and x/y are disjoint packages, but both contained in module a. # @@ -32,17 +32,32 @@ cmp go.mod go.mod.old cp lazy.go.new lazy.go go list all go list -m all -stdout '^example.com/c v0.1.0' # not v0.2.0 as would be be resolved by 'latest' +stdout '^example.com/c v0.1.0' # not v0.2.0 as would be resolved by 'latest' cmp go.mod go.mod.old -# TODO(#36460): +# Now, we repeat the test with a lazy main module. cp lazy.go.old lazy.go -cp go.mod.old go.mod -go mod edit -go=1.16 +cp go.mod.117 go.mod + +# Before adding a new import, the go.mod file should +# enumerate modules for all packages already imported. +go list all +cmp go.mod go.mod.117 # When a new import is found, we should perform a deepening scan of the existing # dependencies and add a requirement on the version required by those # dependencies — not re-resolve 'latest'. +cp lazy.go.new lazy.go + +! go list all +stderr '^go: updates to go.mod needed; to update it:\n\tgo mod tidy$' + +go mod tidy +go list all +go list -m all +stdout '^example.com/c v0.1.0' # not v0.2.0 as would be resolved by 'latest' + +cmp go.mod go.mod.new -- go.mod -- @@ -52,6 +67,39 @@ go 1.15 require example.com/a v0.1.0 +replace ( + example.com/a v0.1.0 => ./a + example.com/b v0.1.0 => ./b + example.com/c v0.1.0 => ./c1 + example.com/c v0.2.0 => ./c2 +) +-- go.mod.117 -- +module example.com/lazy + +go 1.17 + +require ( + example.com/a v0.1.0 + example.com/b v0.1.0 // indirect +) + +replace ( + example.com/a v0.1.0 => ./a + example.com/b v0.1.0 => ./b + example.com/c v0.1.0 => ./c1 + example.com/c v0.2.0 => ./c2 +) +-- go.mod.new -- +module example.com/lazy + +go 1.17 + +require ( + example.com/a v0.1.0 + example.com/b v0.1.0 // indirect + example.com/c v0.1.0 // indirect +) + replace ( example.com/a v0.1.0 => ./a example.com/b v0.1.0 => ./b diff --git a/src/cmd/go/testdata/script/mod_lazy_test_horizon.txt b/src/cmd/go/testdata/script/mod_lazy_test_horizon.txt index 9cdfad79f6a..7d07eb60aa6 100644 --- a/src/cmd/go/testdata/script/mod_lazy_test_horizon.txt +++ b/src/cmd/go/testdata/script/mod_lazy_test_horizon.txt @@ -32,12 +32,12 @@ stdout '^c v0.2.0 ' # but the irrelevant dependency on c v0.2.0 should be pruned out, # leaving only the relevant dependency on c v0.1.0. -go mod edit -go=1.16 +go mod edit -go=1.17 go list -m c -stdout '^c v0.2.0' # TODO(#36460): v0.1.0 +stdout '^c v0.1.0' [!short] go test -v x -[!short] stdout ' c v0.2.0$' # TODO(#36460): v0.1.0 +[!short] stdout ' c v0.1.0$' -- m.go -- package m @@ -66,7 +66,7 @@ replace ( -- a1/go.mod -- module a -go 1.16 +go 1.17 require b v0.1.0 -- a1/a.go -- @@ -78,7 +78,7 @@ import _ "b" -- b1/go.mod -- module b -go 1.16 +go 1.17 require c v0.2.0 -- b1/b.go -- @@ -97,7 +97,7 @@ func TestCVersion(t *testing.T) { -- c1/go.mod -- module c -go 1.16 +go 1.17 -- c1/c.go -- package c @@ -105,7 +105,7 @@ const Version = "v0.1.0" -- c2/go.mod -- module c -go 1.16 +go 1.17 -- c2/c.go -- package c @@ -113,7 +113,7 @@ const Version = "v0.2.0" -- x1/go.mod -- module x -go 1.16 +go 1.17 require c v0.1.0 -- x1/x.go -- diff --git a/src/cmd/go/testdata/script/mod_lazy_test_of_test_dep.txt b/src/cmd/go/testdata/script/mod_lazy_test_of_test_dep.txt index ca6c55040eb..722712d1f2c 100644 --- a/src/cmd/go/testdata/script/mod_lazy_test_of_test_dep.txt +++ b/src/cmd/go/testdata/script/mod_lazy_test_of_test_dep.txt @@ -21,12 +21,13 @@ cp go.mod go.mod.old go mod tidy cmp go.mod go.mod.old + # In Go 1.15 mode, 'go list -m all' includes modules needed by the # transitive closure of tests of dependencies of tests of dependencies of …. go list -m all -stdout 'example.com/b v0.1.0' -stdout 'example.com/c v0.1.0' +stdout '^example.com/b v0.1.0 ' +stdout '^example.com/c v0.1.0 ' cmp go.mod go.mod.old # 'go test' (or equivalent) of any such dependency, no matter how remote, does @@ -36,18 +37,24 @@ go list -test -deps example.com/a stdout example.com/b ! stdout example.com/c -[!short] go test -c example.com/a +[!short] go test -c -o $devnull example.com/a [!short] cmp go.mod go.mod.old go list -test -deps example.com/b stdout example.com/c -[!short] go test -c example.com/b +[!short] go test -c -o $devnull example.com/b [!short] cmp go.mod go.mod.old -# TODO(#36460): +go mod edit -go=1.17 a/go.mod +go mod edit -go=1.17 b1/go.mod +go mod edit -go=1.17 b2/go.mod +go mod edit -go=1.17 c1/go.mod +go mod edit -go=1.17 c2/go.mod +go mod edit -go=1.17 -# After changing to 'go 1.16` uniformly, 'go list -m all' should prune out + +# After changing to 'go 1.17` uniformly, 'go list -m all' should prune out # example.com/c, because it is not imported by any package (or test of a package) # transitively imported by the main module. # @@ -62,10 +69,66 @@ stdout example.com/c # version of its module. # However, if we reach c by running successive tests starting from the main -# module, we should end up with exactly the version require by c, with an update +# module, we should end up with exactly the version required by b, with an update # to the go.mod file as soon as we test a test dependency that is not itself in # "all". +cp go.mod go.mod.117 +go mod tidy +cmp go.mod go.mod.117 + +go list -m all +stdout '^example.com/b v0.1.0 ' +! stdout '^example.com/c ' + +# 'go test' of a package (transitively) imported by the main module +# should work without changes to the go.mod file. + +go list -test -deps example.com/a +stdout example.com/b +! stdout example.com/c + +[!short] go test -c -o $devnull example.com/a + +# However, 'go test' of a package that is itself a dependency should require an +# update to the go.mod file. +! go list -test -deps example.com/b + + # TODO(#36460): The hint here is wrong. We should suggest + # 'go get -t example.com/b@v0.1.0' instead of 'go mod tidy'. +stderr '^go: updates to go\.mod needed; to update it:\n\tgo mod tidy$' + +[!short] ! go test -c -o $devnull example.com/b +[!short] stderr '^go: updates to go\.mod needed; to update it:\n\tgo mod tidy$' + +go get -t example.com/b@v0.1.0 +go list -test -deps example.com/b +stdout example.com/c + +[!short] go test -c -o $devnull example.com/b + +# The update should bring the version required by b, not the latest version of c. + +go list -m example.com/c +stdout '^example.com/c v0.1.0 ' + +cmp go.mod go.mod.b + + +# We should reach the same state if we arrive at it via `go test -mod=mod`. + +cp go.mod.117 go.mod + +[short] go list -mod=mod -test -deps example.com/a +[!short] go test -mod=mod -c -o $devnull example.com/a + +[short] go list -mod=mod -test -deps example.com/b +[!short] go test -mod=mod -c -o $devnull example.com/b + +cmp go.mod go.mod.b + + + -- go.mod -- module example.com/lazy @@ -73,6 +136,23 @@ go 1.15 require example.com/a v0.1.0 +replace ( + example.com/a v0.1.0 => ./a + example.com/b v0.1.0 => ./b1 + example.com/b v0.2.0 => ./b2 + example.com/c v0.1.0 => ./c1 + example.com/c v0.2.0 => ./c2 +) +-- go.mod.b -- +module example.com/lazy + +go 1.17 + +require ( + example.com/a v0.1.0 + example.com/b v0.1.0 // indirect +) + replace ( example.com/a v0.1.0 => ./a example.com/b v0.1.0 => ./b1 diff --git a/src/cmd/go/testdata/script/mod_list.txt b/src/cmd/go/testdata/script/mod_list.txt index 1ba6d7c910e..239c7caa4a2 100644 --- a/src/cmd/go/testdata/script/mod_list.txt +++ b/src/cmd/go/testdata/script/mod_list.txt @@ -29,7 +29,8 @@ stdout 'v1.3.0.*mod[\\/]rsc.io[\\/]sampler@v1.3.1 .*[\\/]v1.3.1.mod => v1.3.1.*s go list std stdout ^math/big -# rsc.io/quote/buggy should be listable as a package +# rsc.io/quote/buggy should be listable as a package, +# even though it is only a test. go list -mod=mod rsc.io/quote/buggy # rsc.io/quote/buggy should not be listable as a module diff --git a/src/cmd/go/testdata/script/mod_list_deprecated.txt b/src/cmd/go/testdata/script/mod_list_deprecated.txt new file mode 100644 index 00000000000..f0ecbba2cea --- /dev/null +++ b/src/cmd/go/testdata/script/mod_list_deprecated.txt @@ -0,0 +1,52 @@ +# 'go list pkg' does not show deprecation. +go list example.com/deprecated/a +stdout '^example.com/deprecated/a$' + +# 'go list -m' does not show deprecation. +go list -m example.com/deprecated/a +stdout '^example.com/deprecated/a v1.9.0$' + +# 'go list -m -versions' does not show deprecation. +go list -m -versions example.com/deprecated/a +stdout '^example.com/deprecated/a v1.0.0 v1.9.0$' + +# 'go list -m -u' shows deprecation. +go list -m -u example.com/deprecated/a +stdout '^example.com/deprecated/a v1.9.0 \(deprecated\)$' + +# 'go list -m -u -f' exposes the deprecation message. +go list -m -u -f {{.Deprecated}} example.com/deprecated/a +stdout '^in example.com/deprecated/a@v1.9.0$' + +# This works even if we use an old version that does not have the deprecation +# message in its go.mod file. +go get -d example.com/deprecated/a@v1.0.0 +! grep Deprecated: $WORK/gopath/pkg/mod/cache/download/example.com/deprecated/a/@v/v1.0.0.mod +go list -m -u -f {{.Deprecated}} example.com/deprecated/a +stdout '^in example.com/deprecated/a@v1.9.0$' + +# 'go list -m -u' does not show deprecation for the main module. +go list -m -u +! stdout deprecated +go list -m -u -f '{{if not .Deprecated}}ok{{end}}' +stdout ok + +# 'go list -m -u' does not show a deprecation message for a module that is not +# deprecated at the latest version, even if it is deprecated at the current +# version. +go list -m -u example.com/undeprecated +stdout '^example.com/undeprecated v1.0.0 \[v1.0.1\]$' +-- go.mod -- +// Deprecated: main module is deprecated, too! +module example.com/use + +go 1.17 + +require ( + example.com/deprecated/a v1.9.0 + example.com/undeprecated v1.0.0 +) +-- go.sum -- +example.com/deprecated/a v1.9.0 h1:pRyvBIZheJpQVVnNW4Fdg8QuoqDgtkCreqZZbASV3BE= +example.com/deprecated/a v1.9.0/go.mod h1:Z1uUVshSY9kh6l/2hZ8oA9SBviX2yfaeEpcLDz6AZwY= +example.com/undeprecated v1.0.0/go.mod h1:1qiRbdA9VzJXDqlG26Y41O5Z7YyO+jAD9do8XCZQ+Gg= diff --git a/src/cmd/go/testdata/script/mod_list_deprecated_replace.txt b/src/cmd/go/testdata/script/mod_list_deprecated_replace.txt new file mode 100644 index 00000000000..48b991fc473 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_list_deprecated_replace.txt @@ -0,0 +1,68 @@ +# When all versions are replaced, we should not look up a deprecation message. +# We will still look up a deprecation message for the replacement. +cp go.mod.allreplaced go.mod +go list -m -u -f '{{.Path}}@{{.Version}} <{{.Deprecated}}>{{with .Replace}} => {{.Path}}@{{.Version}} <{{.Deprecated}}>{{end}}' all +stdout '^example.com/deprecated/a@v1.0.0 <> => example.com/deprecated/b@v1.0.0 $' + +# When one version is replaced, we should see a deprecation message. +cp go.mod.onereplaced go.mod +go list -m -u -f '{{.Path}}@{{.Version}} <{{.Deprecated}}>{{with .Replace}} => {{.Path}}@{{.Version}} <{{.Deprecated}}>{{end}}' all +stdout '^example.com/deprecated/a@v1.0.0 => example.com/deprecated/b@v1.0.0 $' + +# If the replacement is a directory, we won't look that up. +cp go.mod.dirreplacement go.mod +go list -m -u -f '{{.Path}}@{{.Version}} <{{.Deprecated}}>{{with .Replace}} => {{.Path}}@{{.Version}} <{{.Deprecated}}>{{end}}' all +stdout '^example.com/deprecated/a@v1.0.0 <> => ./a@ <>$' + +# If the latest version of the replacement is replaced, we'll use the content +# from that replacement. +cp go.mod.latestreplaced go.mod +go list -m -u -f '{{.Path}}@{{.Version}} <{{.Deprecated}}>{{with .Replace}} => {{.Path}}@{{.Version}} <{{.Deprecated}}>{{end}}' all +stdout '^example.com/deprecated/a@v1.0.0 <> => example.com/deprecated/b@v1.0.0 $' + +-- go.mod.allreplaced -- +module m + +go 1.17 + +require example.com/deprecated/a v1.0.0 + +replace example.com/deprecated/a => example.com/deprecated/b v1.0.0 +-- go.mod.onereplaced -- +module m + +go 1.17 + +require example.com/deprecated/a v1.0.0 + +replace example.com/deprecated/a v1.0.0 => example.com/deprecated/b v1.0.0 +-- go.mod.dirreplacement -- +module m + +go 1.17 + +require example.com/deprecated/a v1.0.0 + +replace example.com/deprecated/a => ./a +-- go.mod.latestreplaced -- +module m + +go 1.17 + +require example.com/deprecated/a v1.0.0 + +replace ( + example.com/deprecated/a => example.com/deprecated/b v1.0.0 + example.com/deprecated/b v1.9.0 => ./b +) +-- go.sum -- +example.com/deprecated/b v1.0.0/go.mod h1:b19J9ywRGviY7Nq4aJ1WBJ+A7qUlEY9ihp22yI4/F6M= +-- a/go.mod -- +module example.com/deprecated/a + +go 1.17 +-- b/go.mod -- +// Deprecated: in ./b +module example.com/deprecated/b + +go 1.17 diff --git a/src/cmd/go/testdata/script/mod_list_retract.txt b/src/cmd/go/testdata/script/mod_list_retract.txt index 3ba53bc5969..4b133485152 100644 --- a/src/cmd/go/testdata/script/mod_list_retract.txt +++ b/src/cmd/go/testdata/script/mod_list_retract.txt @@ -29,12 +29,12 @@ go list -m -retracted -f '{{with .Retracted}}retracted{{end}}' example.com/retra go list -m -f '{{with .Retracted}}retracted{{end}}' example.com/retract@v1.0.0-unused ! stdout . -# 'go list -m -retracted mod@version' shows an error if the go.mod that should -# contain the retractions is not available. -! go list -m -retracted example.com/retract/missingmod@v1.0.0 -stderr '^go list -m: loading module retractions for example.com/retract/missingmod@v1.0.0: .*404 Not Found$' -go list -e -m -retracted -f '{{.Error.Err}}' example.com/retract/missingmod@v1.0.0 -stdout '^loading module retractions for example.com/retract/missingmod@v1.0.0: .*404 Not Found$' +# 'go list -m -retracted mod@version' does not show an error if the module +# that would contain the retraction is unavailable. See #45305. +go list -m -retracted -f '{{.Path}} {{.Version}} {{.Error}}' example.com/retract/missingmod@v1.0.0 +stdout '^example.com/retract/missingmod v1.0.0 $' +exists $GOPATH/pkg/mod/cache/download/example.com/retract/missingmod/@v/v1.9.0.info +! exists $GOPATH/pkg/mod/cache/download/example.com/retract/missingmod/@v/v1.9.0.mod # 'go list -m -retracted mod@version' shows retractions. go list -m -retracted example.com/retract@v1.0.0-unused diff --git a/src/cmd/go/testdata/script/mod_list_std.txt b/src/cmd/go/testdata/script/mod_list_std.txt index baf7908ab93..f4e0433d8a0 100644 --- a/src/cmd/go/testdata/script/mod_list_std.txt +++ b/src/cmd/go/testdata/script/mod_list_std.txt @@ -48,18 +48,20 @@ stdout ^vendor/golang.org/x/crypto/internal/subtle ! stdout ^golang\.org/x # Within the std module, the dependencies of the non-vendored packages within -# std should appear to come from modules, but they should be loaded from the -# vendor directory (just like ordinary vendored module dependencies). +# std should appear to be packages beginning with 'vendor/', not 'golang.org/…' +# module dependencies. go list all -stdout ^golang.org/x/ +! stdout ^golang.org/x/ ! stdout ^std/ ! stdout ^cmd/ -! stdout ^vendor/ +stdout ^vendor/ go list -deps -f '{{if not .Standard}}{{.ImportPath}}{{end}}' std -! stdout ^vendor/golang.org/x/net/http2/hpack -stdout ^golang.org/x/net/http2/hpack +! stdout . + +# However, the 'golang.org/…' module dependencies should resolve to those same +# directories. go list -f '{{.Dir}}' golang.org/x/net/http2/hpack stdout $GOROOT[/\\]src[/\\]vendor diff --git a/src/cmd/go/testdata/script/mod_list_sums.txt b/src/cmd/go/testdata/script/mod_list_sums.txt new file mode 100644 index 00000000000..86c528f8290 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_list_sums.txt @@ -0,0 +1,32 @@ +# https://golang.org/issue/41297: 'go list -m' should not require go.sum with +# -versions or when all args are version queries. + +go mod init m +go mod edit -require=rsc.io/quote@v1.5.1 + +go list -m -mod=readonly rsc.io/quote@latest +stdout '^rsc\.io/quote v1\.5\.2$' +! stderr . + +go list -m -mod=readonly -versions rsc.io/quote +stdout 'rsc\.io/quote v1\.0\.0 .* v1\.5\.3-pre1$' +! stderr . + +# Incidentally fetching the required version of a module records its checksum, +# just because it happens to be in the build list, and recording the checksum +# triggers an error under -mod=readonly. +# +# TODO(#41297): This should not be an error. +! go list -m -mod=readonly rsc.io/quote@ all' should also fail. ! go list -m example.com/printversion@v1.0.0 all -stderr 'go: cannot match "all": working directory is not part of a module' +stderr 'go: cannot match "all": go.mod file not found in current directory or any parent directory; see ''go help modules''$' +! stdout 'example.com/version' + +# 'go list -m ' should fail if any of the mods lacks an explicit version. +! go list -m example.com/printversion +stderr 'go: cannot match "example.com/printversion" without -versions or an explicit version: go.mod file not found in current directory or any parent directory; see ''go help modules''$' ! stdout 'example.com/version' # 'go list -m' with wildcards should fail. Wildcards match modules in the # build list, so they aren't meaningful outside a module. ! go list -m ... -stderr 'go: cannot match "...": working directory is not part of a module' +stderr 'go: cannot match "...": go.mod file not found in current directory or any parent directory; see ''go help modules''$' ! go list -m rsc.io/quote/... -stderr 'go: cannot match "rsc.io/quote/...": working directory is not part of a module' +stderr 'go: cannot match "rsc.io/quote/...": go.mod file not found in current directory or any parent directory; see ''go help modules''$' # 'go clean' should skip the current directory if it isn't in a module. @@ -76,20 +81,20 @@ go clean -n # 'go mod graph' should fail, since there's no module graph. ! go mod graph -stderr 'cannot find main module' +stderr '^go: go.mod file not found in current directory or any parent directory; see ''go help modules''$' # 'go mod why' should fail, since there is no main module to depend on anything. ! go mod why -m example.com/version -stderr 'cannot find main module' +stderr '^go: go.mod file not found in current directory or any parent directory; see ''go help modules''$' # 'go mod edit', 'go mod tidy', and 'go mod fmt' should fail: # there is no go.mod file to edit. ! go mod tidy -stderr 'cannot find main module' +stderr '^go: go.mod file not found in current directory or any parent directory; see ''go help modules''$' ! go mod edit -fmt -stderr 'cannot find main module' +stderr '^go: go.mod file not found in current directory or any parent directory; see ''go help modules''$' ! go mod edit -require example.com/version@v1.0.0 -stderr 'cannot find main module' +stderr '^go: go.mod file not found in current directory or any parent directory; see ''go help modules''$' # 'go mod download' without arguments should report an error. @@ -104,33 +109,33 @@ exists $GOPATH/pkg/mod/cache/download/example.com/printversion/@v/v1.0.0.zip # 'go mod download all' should fail. "all" is not meaningful outside of a module. ! go mod download all -stderr 'go: cannot match "all": working directory is not part of a module' +stderr 'go: cannot match "all": go.mod file not found in current directory or any parent directory; see ''go help modules''$' # 'go mod vendor' should fail: it starts by clearing the existing vendor # directory, and we don't know where that is. ! go mod vendor -stderr 'cannot find main module' +stderr '^go: go.mod file not found in current directory or any parent directory; see ''go help modules''$' # 'go mod verify' should fail: we have no modules to verify. ! go mod verify -stderr 'cannot find main module' +stderr '^go: go.mod file not found in current directory or any parent directory; see ''go help modules''$' # 'go get' without arguments implicitly operates on the main module, and thus # should fail. ! go get -stderr 'cannot find main module' +stderr '^go: go.mod file not found in current directory or any parent directory; see ''go help modules''$' ! go get -u -stderr 'cannot find main module' +stderr '^go: go.mod file not found in current directory or any parent directory; see ''go help modules''$' ! go get -u ./needmod -stderr 'cannot find main module' +stderr '^go: go.mod file not found in current directory or any parent directory; see ''go help modules''$' # 'go get -u all' upgrades the transitive import graph of the main module, # which is empty. ! go get -u all -stderr 'go get: cannot match "all": working directory is not part of a module' +stderr '^go get: cannot match "all": go.mod file not found in current directory or any parent directory; see ''go help modules''$' # 'go get' should check the proposed module graph for consistency, # even though we won't write it anywhere. @@ -147,16 +152,16 @@ exists $GOPATH/pkg/mod/example.com/version@v1.0.0 # 'go build' without arguments implicitly operates on the current directory, and should fail. cd needmod ! go build -stderr 'cannot find main module' +stderr '^go: go.mod file not found in current directory or any parent directory; see ''go help modules''$' cd .. # 'go build' of a non-module directory should fail too. ! go build ./needmod -stderr 'cannot find main module' +stderr '^go: go.mod file not found in current directory or any parent directory; see ''go help modules''$' # 'go build' of source files should fail if they import anything outside std. ! go build -n ./needmod/needmod.go -stderr '^needmod[/\\]needmod.go:10:2: no required module provides package example.com/version: working directory is not part of a module$' +stderr '^needmod[/\\]needmod.go:10:2: no required module provides package example.com/version: go.mod file not found in current directory or any parent directory; see ''go help modules''$' # 'go build' of source files should succeed if they do not import anything outside std. go build -n -o ignore ./stdonly/stdonly.go @@ -164,6 +169,8 @@ go build -n -o ignore ./stdonly/stdonly.go # 'go build' should succeed for standard-library packages. go build -n fmt +# 'go build' should use the latest version of the Go language. +go build ./newgo/newgo.go # 'go doc' without arguments implicitly operates on the current directory, and should fail. # TODO(golang.org/issue/32027): currently, it succeeds. @@ -179,7 +186,7 @@ go doc fmt # 'go doc' should fail for a package path outside a module. ! go doc example.com/version -stderr 'doc: no required module provides package example.com/version: working directory is not part of a module' +stderr 'doc: no required module provides package example.com/version: go.mod file not found in current directory or any parent directory; see ''go help modules''$' # 'go install' with a version should succeed if all constraints are met. # See mod_install_pkg_version. @@ -194,24 +201,20 @@ stderr '^go install: version is required when current directory is not in a modu # 'go install' should fail if a source file imports a package that must be # resolved to a module. ! go install ./needmod/needmod.go -stderr 'needmod[/\\]needmod.go:10:2: no required module provides package example.com/version: working directory is not part of a module' +stderr 'needmod[/\\]needmod.go:10:2: no required module provides package example.com/version: go.mod file not found in current directory or any parent directory; see ''go help modules''$' # 'go install' should succeed with a package in GOROOT. go install cmd/addr2line ! stderr . -# 'go run' with a verison should fail due to syntax. -! go run example.com/printversion@v1.0.0 -stderr 'can only use path@version syntax with' - # 'go run' should fail if a package argument must be resolved to a module. ! go run example.com/printversion -stderr '^no required module provides package example.com/printversion: working directory is not part of a module$' +stderr '^no required module provides package example.com/printversion: go.mod file not found in current directory or any parent directory; see ''go help modules''$' # 'go run' should fail if a source file imports a package that must be # resolved to a module. ! go run ./needmod/needmod.go -stderr '^needmod[/\\]needmod.go:10:2: no required module provides package example.com/version: working directory is not part of a module$' +stderr '^needmod[/\\]needmod.go:10:2: no required module provides package example.com/version: go.mod file not found in current directory or any parent directory; see ''go help modules''$' # 'go fmt' should be able to format files outside of a module. @@ -326,3 +329,15 @@ func Test(t *testing.T) { fmt.Println("stdonly was tested") } +-- newgo/newgo.go -- +// Package newgo requires Go 1.14 or newer. +package newgo + +import "io" + +const C = 299_792_458 + +type ReadWriteCloser interface { + io.ReadCloser + io.WriteCloser +} diff --git a/src/cmd/go/testdata/script/mod_prefer_compatible.txt b/src/cmd/go/testdata/script/mod_prefer_compatible.txt index aa6260f63c8..1b408c3e9e9 100644 --- a/src/cmd/go/testdata/script/mod_prefer_compatible.txt +++ b/src/cmd/go/testdata/script/mod_prefer_compatible.txt @@ -23,8 +23,8 @@ stdout '^github.com/russross/blackfriday v1\.' go list -m github.com/russross/blackfriday@upgrade stdout '^github.com/russross/blackfriday v1\.' -go list -m github.com/russross/blackfriday@patch -stdout '^github.com/russross/blackfriday v1\.' +! go list -m github.com/russross/blackfriday@patch +stderr '^go list -m: github.com/russross/blackfriday@patch: can''t query version "patch" of module github.com/russross/blackfriday: no existing version is required$' # If we're fetching directly from version control, ignored +incompatible # versions should also be omitted by 'go list'. diff --git a/src/cmd/go/testdata/script/mod_proxy_https.txt b/src/cmd/go/testdata/script/mod_proxy_https.txt index a23090cd0ad..a5e28dd0b97 100644 --- a/src/cmd/go/testdata/script/mod_proxy_https.txt +++ b/src/cmd/go/testdata/script/mod_proxy_https.txt @@ -10,6 +10,7 @@ stderr 'invalid proxy URL.*proxydir' # GOPROXY HTTPS paths may elide the "https://" prefix. # (See golang.org/issue/32191.) env GOPROXY=proxy.golang.org +env GOSUMDB= go list -versions -m golang.org/x/text -- go.mod -- diff --git a/src/cmd/go/testdata/script/mod_readonly.txt b/src/cmd/go/testdata/script/mod_readonly.txt index 176be729679..d05ad2a3174 100644 --- a/src/cmd/go/testdata/script/mod_readonly.txt +++ b/src/cmd/go/testdata/script/mod_readonly.txt @@ -89,7 +89,7 @@ stderr '^no required module provides package rsc.io/quote; to add it:\n\tgo get -- go.mod -- module m -go 1.20 +go 1.16 -- x.go -- package x @@ -104,7 +104,7 @@ require ( -- go.mod.redundant -- module m -go 1.20 +go 1.16 require ( rsc.io/quote v1.5.2 @@ -114,7 +114,7 @@ require ( -- go.mod.indirect -- module m -go 1.20 +go 1.16 require ( rsc.io/quote v1.5.2 // indirect @@ -124,7 +124,7 @@ require ( -- go.mod.untidy -- module m -go 1.20 +go 1.16 require ( rsc.io/sampler v1.3.0 // indirect diff --git a/src/cmd/go/testdata/script/mod_replace_gopkgin.txt b/src/cmd/go/testdata/script/mod_replace_gopkgin.txt index df752d9716e..d24f37b7880 100644 --- a/src/cmd/go/testdata/script/mod_replace_gopkgin.txt +++ b/src/cmd/go/testdata/script/mod_replace_gopkgin.txt @@ -35,7 +35,7 @@ go list -m gopkg.in/src-d/go-git.v4 # A mismatched gopkg.in path should not be able to replace a different major version. cd ../3-to-gomod-4 ! go list -m gopkg.in/src-d/go-git.v3 -stderr '^go: gopkg\.in/src-d/go-git\.v3@v3\.2\.0 \(replaced by gopkg\.in/src-d/go-git\.v3@v3\.0\.0-20190801152248-0d1a009cbb60\): version "v3\.0\.0-20190801152248-0d1a009cbb60" invalid: go\.mod has non-\.\.\.\.v3 module path "gopkg\.in/src-d/go-git\.v4" at revision 0d1a009cbb60$' +stderr '^go list -m: gopkg\.in/src-d/go-git\.v3@v3\.2\.0 \(replaced by gopkg\.in/src-d/go-git\.v3@v3\.0\.0-20190801152248-0d1a009cbb60\): version "v3\.0\.0-20190801152248-0d1a009cbb60" invalid: go\.mod has non-\.\.\.\.v3 module path "gopkg\.in/src-d/go-git\.v4" at revision 0d1a009cbb60$' -- 4-to-4/go.mod -- module golang.org/issue/34254 diff --git a/src/cmd/go/testdata/script/mod_require_exclude.txt b/src/cmd/go/testdata/script/mod_require_exclude.txt index 9156d4ce5d5..0946dbf0bb3 100644 --- a/src/cmd/go/testdata/script/mod_require_exclude.txt +++ b/src/cmd/go/testdata/script/mod_require_exclude.txt @@ -7,16 +7,27 @@ cp go.mod go.mod.orig ! go list -mod=readonly -m all stderr '^go: ignoring requirement on excluded version rsc.io/sampler v1\.99\.99$' -stderr '^go: updates to go.mod needed, disabled by -mod=readonly$' +stderr '^go: updates to go.mod needed, disabled by -mod=readonly; to update it:\n\tgo mod tidy$' ! stdout '^rsc.io/sampler v1.99.99' cmp go.mod go.mod.orig ! go list -mod=vendor -m rsc.io/sampler stderr '^go: ignoring requirement on excluded version rsc.io/sampler v1\.99\.99$' -stderr '^go list -m: module rsc.io/sampler: can''t resolve module using the vendor directory\n\t\(Use -mod=mod or -mod=readonly to bypass\.\)$' +stderr '^go: updates to go.mod needed, disabled by -mod=vendor; to update it:\n\tgo mod tidy$' ! stdout '^rsc.io/sampler v1.99.99' cmp go.mod go.mod.orig +# The failure message should be clear when -mod=vendor is implicit. + +go mod edit -go=1.14 +! go list -m rsc.io/sampler +stderr '^go: ignoring requirement on excluded version rsc.io/sampler v1\.99\.99$' +stderr '^go: updates to go.mod needed, disabled by -mod=vendor\n\t\(Go version in go.mod is at least 1.14 and vendor directory exists\.\)\n\tto update it:\n\tgo mod tidy$' +! stdout '^rsc.io/sampler v1.99.99' +go mod edit -go=1.13 +cmp go.mod go.mod.orig + + # With the selected version excluded, commands that load only modules should # drop the excluded module. @@ -58,7 +69,11 @@ module x go 1.13 exclude rsc.io/sampler v1.99.99 + require rsc.io/sampler v1.99.99 +-- vendor/modules.txt -- +# rsc.io/sampler v1.99.99 +## explicit -- go.moddrop -- module x diff --git a/src/cmd/go/testdata/script/mod_retention.txt b/src/cmd/go/testdata/script/mod_retention.txt index a4441c4b3c7..0e639db551d 100644 --- a/src/cmd/go/testdata/script/mod_retention.txt +++ b/src/cmd/go/testdata/script/mod_retention.txt @@ -62,9 +62,10 @@ cmp go.mod go.mod.tidy # A missing "go" version directive should be added. # However, that should not remove other redundant requirements. +# In fact, it may *add* redundant requirements due to activating lazy loading. cp go.mod.nogo go.mod go list -mod=mod all -cmpenv go.mod go.mod.currentgo +cmpenv go.mod go.mod.addedgo -- go.mod.tidy -- @@ -133,12 +134,13 @@ require ( rsc.io/sampler v1.3.0 // indirect rsc.io/testonly v1.0.0 // indirect ) --- go.mod.currentgo -- +-- go.mod.addedgo -- module m go $goversion require ( + golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c // indirect rsc.io/quote v1.5.2 rsc.io/sampler v1.3.0 // indirect rsc.io/testonly v1.0.0 // indirect diff --git a/src/cmd/go/testdata/script/mod_retract_fix_version.txt b/src/cmd/go/testdata/script/mod_retract_fix_version.txt new file mode 100644 index 00000000000..e45758b6270 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_retract_fix_version.txt @@ -0,0 +1,48 @@ +# retract must not be used without a module directive. +! go list -m all +stderr 'go.mod:3: no module directive found, so retract cannot be used$' + +# Commands that update go.mod should fix non-canonical versions in +# retract directives. +# Verifies #44494. +go mod edit -module=rsc.io/quote/v2 +! go list -m all +stderr '^go: updates to go.mod needed; to update it:\n\tgo mod tidy$' +go mod tidy +go list -m all +cmp go.mod go.mod.want + +# If a retracted version doesn't match the module's major version suffx, +# an error should be reported. +! go mod edit -retract=v3.0.1 +stderr '^go mod: -retract=v3.0.1: version "v3.0.1" invalid: should be v2, not v3$' +cp go.mod.mismatch-v2 go.mod +! go list -m all +stderr 'go.mod:3: retract rsc.io/quote/v2: version "v3.0.1" invalid: should be v2, not v3$' + +cp go.mod.mismatch-v1 go.mod +! go list -m all +stderr 'go.mod:3: retract rsc.io/quote: version "v3.0.1" invalid: should be v0 or v1, not v3$' + +-- go.mod -- +go 1.16 + +retract latest +-- go.mod.want -- +go 1.16 + +retract v2.0.1 + +module rsc.io/quote/v2 +-- go.mod.mismatch-v2 -- +go 1.16 + +retract v3.0.1 + +module rsc.io/quote/v2 +-- go.mod.mismatch-v1 -- +go 1.16 + +retract v3.0.1 + +module rsc.io/quote diff --git a/src/cmd/go/testdata/script/mod_retract_rationale.txt b/src/cmd/go/testdata/script/mod_retract_rationale.txt index 4d3a3d67c65..823c384e488 100644 --- a/src/cmd/go/testdata/script/mod_retract_rationale.txt +++ b/src/cmd/go/testdata/script/mod_retract_rationale.txt @@ -29,7 +29,7 @@ cmp stdout multiline # 'go get' should omit long messages. go get -d example.com/retract/rationale@v1.0.0-long -stderr '^go: warning: example.com/retract/rationale@v1.0.0-long: retracted by module author: \(rationale omitted: too long\)' +stderr '^go: warning: example.com/retract/rationale@v1.0.0-long: retracted by module author: \(message omitted: too long\)' # 'go list' should show the full message. go list -m -retracted -f '{{.Retracted}}' example.com/retract/rationale @@ -38,7 +38,7 @@ stdout '^\[lo{500}ng\]$' # 'go get' should omit messages with unprintable characters. go get -d example.com/retract/rationale@v1.0.0-unprintable -stderr '^go: warning: example.com/retract/rationale@v1.0.0-unprintable: retracted by module author: \(rationale omitted: contains non-printable characters\)' +stderr '^go: warning: example.com/retract/rationale@v1.0.0-unprintable: retracted by module author: \(message omitted: contains non-printable characters\)' # 'go list' should show the full message. go list -m -retracted -f '{{.Retracted}}' example.com/retract/rationale diff --git a/src/cmd/go/testdata/script/mod_retract_replace.txt b/src/cmd/go/testdata/script/mod_retract_replace.txt index 770aea41a59..9cd714739ab 100644 --- a/src/cmd/go/testdata/script/mod_retract_replace.txt +++ b/src/cmd/go/testdata/script/mod_retract_replace.txt @@ -5,8 +5,10 @@ go get -d # The latest version, v1.9.0, is not available on the proxy. -! go list -m -retracted example.com/retract/missingmod -stderr '^go list -m: loading module retractions for example.com/retract/missingmod@v1.0.0: .*404 Not Found$' +go list -m -retracted example.com/retract/missingmod +stdout '^example.com/retract/missingmod v1.0.0$' +exists $GOPATH/pkg/mod/cache/download/example.com/retract/missingmod/@v/v1.9.0.info +! exists $GOPATH/pkg/mod/cache/download/example.com/retract/missingmod/@v/v1.9.0.mod # If we replace that version, we should see retractions. go mod edit -replace=example.com/retract/missingmod@v1.9.0=./missingmod-v1.9.0 diff --git a/src/cmd/go/testdata/script/mod_retract_versions.txt b/src/cmd/go/testdata/script/mod_retract_versions.txt new file mode 100644 index 00000000000..012fa15f420 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_retract_versions.txt @@ -0,0 +1,22 @@ +# https://golang.org/issue/44296: the --versions flag should not affect +# the version reported by 'go list' in case of retractions. + +env FMT='{{.Path}}{{with .Error}}: {{printf "%q" .Err}}{{end}} {{printf "%q" .Version}}{{with .Versions}} {{.}}{{end}}' + +go list -m -e -f $FMT example.com/retract/self/pseudo +stdout '^example.com/retract/self/pseudo: "module example.com/retract/self/pseudo: not a known dependency" ""$' + +go list -m -e -f $FMT example.com/retract/self/pseudo@latest +stdout '^example.com/retract/self/pseudo: "module example.com/retract/self/pseudo: no matching versions for query \\"latest\\"" "latest"$' + + +go list -m -e -f $FMT --versions example.com/retract/self/pseudo +stdout '^example.com/retract/self/pseudo ""$' + +go list -m -e -f $FMT --versions example.com/retract/self/pseudo@latest +stdout '^example.com/retract/self/pseudo: "module example.com/retract/self/pseudo: no matching versions for query \\"latest\\"" "latest"$' + +-- go.mod -- +module test + +go 1.17 diff --git a/src/cmd/go/testdata/script/mod_run_nonmain.txt b/src/cmd/go/testdata/script/mod_run_nonmain.txt new file mode 100644 index 00000000000..036755d2d1a --- /dev/null +++ b/src/cmd/go/testdata/script/mod_run_nonmain.txt @@ -0,0 +1,18 @@ +! go run $PWD +! stderr 'no packages loaded' +stderr '^package example.net/nonmain is not a main package$' + +! go run . +stderr '^package example.net/nonmain is not a main package$' + +! go run ./... +stderr '^go: warning: "\./\.\.\." matched only non-main packages$' +stderr '^go run: no packages loaded from \./\.\.\.$' + +-- go.mod -- +module example.net/nonmain + +go 1.17 +-- nonmain.go -- +// Package nonmain is not a main package. +package nonmain diff --git a/src/cmd/go/testdata/script/mod_run_pkg_version.txt b/src/cmd/go/testdata/script/mod_run_pkg_version.txt new file mode 100644 index 00000000000..e921fab5085 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_run_pkg_version.txt @@ -0,0 +1,103 @@ +# This test checks the behavior of 'go run' with a 'cmd@version' argument. +# Most of 'go run' is covered in other tests. +# mod_install_pkg_version covers most of the package loading functionality. +# This test focuses on 'go run' behavior specific to this mode. +[short] skip + +# 'go run pkg@version' works outside a module. +env GO111MODULE=auto +go run example.com/cmd/a@v1.0.0 +stdout '^a@v1.0.0$' + + +# 'go run pkg@version' reports an error if modules are disabled. +env GO111MODULE=off +! go run example.com/cmd/a@v1.0.0 +stderr '^go: modules disabled by GO111MODULE=off; see ''go help modules''$' +env GO111MODULE=on + + +# 'go run pkg@version' ignores go.mod in the current directory. +cd m +cp go.mod go.mod.orig +! go list -m all +stderr '^go list -m: example.com/cmd@v1.1.0-doesnotexist: missing go.sum entry; to add it:\n\tgo mod download example.com/cmd$' +go run example.com/cmd/a@v1.0.0 +stdout '^a@v1.0.0$' +cmp go.mod go.mod.orig +cd .. + + +# 'go install pkg@version' works on a module that doesn't have a go.mod file +# and with a module whose go.mod file has missing requirements. +# With a proxy, the two cases are indistinguishable. +go run rsc.io/fortune@v1.0.0 +stderr '^go: found rsc.io/quote in rsc.io/quote v1.5.2$' +stderr '^Hello, world.$' + + +# 'go run pkg@version' should report an error if pkg is not a main package. +! go run example.com/cmd/err@v1.0.0 +stderr '^package example.com/cmd/err is not a main package$' + + +# 'go run pkg@version' should report errors if the module contains +# replace or exclude directives. +go mod download example.com/cmd@v1.0.0-replace +! go run example.com/cmd/a@v1.0.0-replace +cmp stderr replace-err + +go mod download example.com/cmd@v1.0.0-exclude +! go run example.com/cmd/a@v1.0.0-exclude +cmp stderr exclude-err + + +# 'go run dir@version' works like a normal 'go run' command if +# dir is a relative or absolute path. +go mod download rsc.io/fortune@v1.0.0 +! go run $GOPATH/pkg/mod/rsc.io/fortune@v1.0.0 +stderr '^go: go\.mod file not found in current directory or any parent directory; see ''go help modules''$' +! go run ../pkg/mod/rsc.io/fortune@v1.0.0 +stderr '^go: go\.mod file not found in current directory or any parent directory; see ''go help modules''$' +mkdir tmp +cd tmp +go mod init tmp +go mod edit -require=rsc.io/fortune@v1.0.0 +! go run -mod=readonly $GOPATH/pkg/mod/rsc.io/fortune@v1.0.0 +stderr '^missing go\.sum entry for module providing package rsc\.io/fortune; to add:\n\tgo mod download rsc\.io/fortune$' +! go run -mod=readonly ../../pkg/mod/rsc.io/fortune@v1.0.0 +stderr '^missing go\.sum entry for module providing package rsc\.io/fortune; to add:\n\tgo mod download rsc\.io/fortune$' +cd .. +rm tmp + + +# 'go run' does not interpret @version arguments after the first. +go run example.com/cmd/a@v1.0.0 example.com/doesnotexist@v1.0.0 +stdout '^a@v1.0.0$' + + +# 'go run pkg@version' succeeds when -mod=readonly is set explicitly. +# Verifies #43278. +go run -mod=readonly example.com/cmd/a@v1.0.0 +stdout '^a@v1.0.0$' + +-- m/go.mod -- +module m + +go 1.16 + +require example.com/cmd v1.1.0-doesnotexist +-- x/x.go -- +package main + +func main() {} +-- replace-err -- +go run: example.com/cmd/a@v1.0.0-replace (in example.com/cmd@v1.0.0-replace): + The go.mod file for the module providing named packages contains one or + more replace directives. It must not contain directives that would cause + it to be interpreted differently than if it were the main module. +-- exclude-err -- +go run: example.com/cmd/a@v1.0.0-exclude (in example.com/cmd@v1.0.0-exclude): + The go.mod file for the module providing named packages contains one or + more exclude directives. It must not contain directives that would cause + it to be interpreted differently than if it were the main module. diff --git a/src/cmd/go/testdata/script/mod_run_pkgerror.txt b/src/cmd/go/testdata/script/mod_run_pkgerror.txt new file mode 100644 index 00000000000..48f900dd346 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_run_pkgerror.txt @@ -0,0 +1,32 @@ +# https://golang.org/issue/39986: files reported as invalid by go/build should +# be listed in InvalidGoFiles. + +go list -e -f '{{.Incomplete}}{{"\n"}}{{.Error}}{{"\n"}}{{.InvalidGoFiles}}{{"\n"}}' . +stdout '^true\nfound packages m \(m\.go\) and main \(main\.go\) in '$PWD'\n\[main.go\]\n' + + +# https://golang.org/issue/45827: 'go run .' should report the same package +# errors as 'go build' and 'go list'. + +! go build +stderr '^found packages m \(m\.go\) and main \(main\.go\) in '$PWD'$' + +! go list . +stderr '^found packages m \(m\.go\) and main \(main\.go\) in '$PWD'$' + +! go run . +! stderr 'no packages loaded' +stderr '^found packages m \(m\.go\) and main \(main\.go\) in '$PWD'$' + +! go run ./... +! stderr 'no packages loaded' +stderr '^found packages m \(m\.go\) and main \(main\.go\) in '$PWD'$' + +-- go.mod -- +module m + +go 1.17 +-- m.go -- +package m +-- main.go -- +package main diff --git a/src/cmd/go/testdata/script/mod_std_vendor.txt b/src/cmd/go/testdata/script/mod_std_vendor.txt index fb954d74edb..c3cde52953f 100644 --- a/src/cmd/go/testdata/script/mod_std_vendor.txt +++ b/src/cmd/go/testdata/script/mod_std_vendor.txt @@ -36,11 +36,11 @@ stderr 'use of vendored package' # When run within the 'std' module, 'go list -test' should report vendored -# transitive dependencies at their original module paths. +# transitive dependencies at their vendored paths. cd $GOROOT/src go list -test -f '{{range .Deps}}{{.}}{{"\n"}}{{end}}' net/http -stdout ^golang.org/x/net/http2/hpack -! stdout ^vendor/golang.org/x/net/http2/hpack +! stdout ^golang.org/x/net/http2/hpack +stdout ^vendor/golang.org/x/net/http2/hpack -- go.mod -- module m diff --git a/src/cmd/go/testdata/script/mod_sum_readonly.txt b/src/cmd/go/testdata/script/mod_sum_readonly.txt index 57c5bbeefdf..113f13ea390 100644 --- a/src/cmd/go/testdata/script/mod_sum_readonly.txt +++ b/src/cmd/go/testdata/script/mod_sum_readonly.txt @@ -4,7 +4,7 @@ env GO111MODULE=on # When a sum is needed to load the build list, we get an error for the # specific module. The .mod file is not downloaded, and go.sum is not written. ! go list -m all -stderr '^go: rsc.io/quote@v1.5.2: missing go.sum entry; to add it:\n\tgo mod download rsc.io/quote$' +stderr '^go list -m: rsc.io/quote@v1.5.2: missing go.sum entry; to add it:\n\tgo mod download rsc.io/quote$' ! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod ! exists go.sum @@ -12,7 +12,7 @@ stderr '^go: rsc.io/quote@v1.5.2: missing go.sum entry; to add it:\n\tgo mod dow # we should see the same error. cp go.sum.h2only go.sum ! go list -m all -stderr '^go: rsc.io/quote@v1.5.2: missing go.sum entry; to add it:\n\tgo mod download rsc.io/quote$' +stderr '^go list -m: rsc.io/quote@v1.5.2: missing go.sum entry; to add it:\n\tgo mod download rsc.io/quote$' ! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod cmp go.sum go.sum.h2only rm go.sum @@ -21,7 +21,7 @@ rm go.sum cp go.mod go.mod.orig go mod edit -replace rsc.io/quote@v1.5.2=rsc.io/quote@v1.5.1 ! go list -m all -stderr '^go: rsc.io/quote@v1.5.2 \(replaced by rsc.io/quote@v1.5.1\): missing go.sum entry; to add it:\n\tgo mod download rsc.io/quote$' +stderr '^go list -m: rsc.io/quote@v1.5.2 \(replaced by rsc.io/quote@v1.5.1\): missing go.sum entry; to add it:\n\tgo mod download rsc.io/quote$' ! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.1.mod ! exists go.sum cp go.mod.orig go.mod diff --git a/src/cmd/go/testdata/script/mod_sumdb.txt b/src/cmd/go/testdata/script/mod_sumdb.txt index 9a688e1461c..fa3483c5cb1 100644 --- a/src/cmd/go/testdata/script/mod_sumdb.txt +++ b/src/cmd/go/testdata/script/mod_sumdb.txt @@ -37,3 +37,9 @@ go get -d rsc.io/fortune -- go.mod.orig -- module m + +go 1.16 +-- m.go -- +package m + +import _ "rsc.io/quote" diff --git a/src/cmd/go/testdata/script/mod_sumdb_cache.txt b/src/cmd/go/testdata/script/mod_sumdb_cache.txt index 2937b2e4dcd..1b38475fb5e 100644 --- a/src/cmd/go/testdata/script/mod_sumdb_cache.txt +++ b/src/cmd/go/testdata/script/mod_sumdb_cache.txt @@ -43,12 +43,5 @@ env GOPROXY=$proxy/sumdb-504 ! go get -d rsc.io/quote@v1.5.2 stderr 504 -# but -insecure bypasses the checksum lookup entirely -env GOINSECURE= -go get -d -insecure rsc.io/quote@v1.5.2 - -# and then it is in go.sum again -go get -d rsc.io/quote@v1.5.2 - -- go.mod.orig -- module m diff --git a/src/cmd/go/testdata/script/mod_test.txt b/src/cmd/go/testdata/script/mod_test.txt index 50f00355c17..76f1d7a9a4d 100644 --- a/src/cmd/go/testdata/script/mod_test.txt +++ b/src/cmd/go/testdata/script/mod_test.txt @@ -60,6 +60,8 @@ go list -test -- a/go.mod.empty -- module example.com/user/a +go 1.11 + -- a/a.go -- package a diff --git a/src/cmd/go/testdata/script/mod_tidy_convergence.txt b/src/cmd/go/testdata/script/mod_tidy_convergence.txt new file mode 100644 index 00000000000..22c8fc66c57 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_tidy_convergence.txt @@ -0,0 +1,188 @@ +# This test demonstrates a simple case in which 'go mod tidy' may resolve a +# missing package, only to remove that package when resolving its dependencies. +# +# If we naively iterate 'go mod tidy' until the dependency graph converges, this +# scenario may fail to converge. + +# The import graph used in this test looks like: +# +# m --- x +# | +# x_test --- y +# +# The module dependency graph of m is initially empty. +# Modules x and y look like: +# +# x.1 (provides package x that imports y, but does not depend on module y) +# +# x.2-pre (no dependencies, but does not provide package x) +# +# y.1 (no dependencies, but provides package y) +# +# y.2 --- x.2-pre (provides package y) +# +# +# When we resolve the missing import of y in x_test, we add y@latest — which is +# y.2, not y.1 — as a new dependency. That upgrades to x to x.2-pre, which +# removes package x (and also the need for module y). We can then safely remove +# the dependency on module y, because nothing imports package y any more! +# +# We might be tempted to remove the dependency on module x for the same reason: +# it no longer provides any imported package. However, that would cause 'go mod +# tidy -e' to become unstable: with x.2-pre out of the way, we could once again +# resolve the missing import of package x by re-adding x.1. + +cp go.mod go.mod.orig + +# 'go mod tidy' without -e should fail without modifying go.mod, +# because it cannot resolve x and y simultaneously. +! go mod tidy + +cmp go.mod go.mod.orig + +stderr '^go: found example\.net/y in example\.net/y v0.2.0$' +stderr '^go: finding module for package example\.net/x$' + + # TODO: This error message should be clearer — it doesn't indicate why v0.2.0-pre is required. +stderr '^example\.net/m imports\n\texample\.net/x: package example\.net/x provided by example\.net/x at latest version v0\.1\.0 but not at required version v0\.2\.0-pre$' + + +# 'go mod tidy -e' should follow upgrades to try to resolve the modules that it +# can, and then stop. When we resolve example.net/y, we upgrade to example.net/x +# to v0.2.0-pre. At that version, package x no longer exists and no longer +# imports package y, so the import of x should be left unsatisfied and the +# existing dependency on example.net/x removed. +# +# TODO(bcmills): It would be ever better if we could keep the original +# dependency on example.net/x v0.1.0, but I don't see a way to do that without +# making the algorithm way too complicated. (We would have to detect that the +# new dependency on example.net/y interferes with the package that caused us to +# to add that dependency in the first place, and back out that part of the change +# without also backing out any other needed changes.) + +go mod tidy -e +cmp go.mod go.mod.tidye +stderr '^go: found example\.net/y in example\.net/y v0.2.0$' + + # TODO: This error message should be clearer — it doesn't indicate why v0.2.0-pre is required. +stderr '^example\.net/m imports\n\texample\.net/x: package example\.net/x provided by example\.net/x at latest version v0\.1\.0 but not at required version v0\.2\.0-pre$' + + +# Since we attempt to resolve the dependencies of package x whenever we add x itself, +# this end state is stable. + +go mod tidy -e +cmp go.mod go.mod.tidye + + +# An explicit 'go get' with the correct versions should allow 'go mod tidy' to +# succeed and remain stable. y.1 does not upgrade x, and can therefore be used +# with it. + +go get -d example.net/x@v0.1.0 example.net/y@v0.1.0 +go mod tidy +cmp go.mod go.mod.postget + + +# The 'tidy' logic for a lazy main module is somewhat different from that for an +# eager main module, but the overall behavior is the same. + +cp go.mod.orig go.mod +go mod edit -go=1.17 go.mod +go mod edit -go=1.17 go.mod.tidye +go mod edit -go=1.17 go.mod.postget + +go mod tidy -e +cmp go.mod go.mod.tidye +stderr '^go: found example\.net/y in example\.net/y v0.2.0$' +stderr '^example\.net/m imports\n\texample\.net/x: package example\.net/x provided by example\.net/x at latest version v0\.1\.0 but not at required version v0\.2\.0-pre$' + +go get -d example.net/x@v0.1.0 example.net/y@v0.1.0 +go mod tidy +cmp go.mod go.mod.postget + + +-- go.mod -- +module example.net/m + +go 1.16 + +replace ( + example.net/x v0.1.0 => ./x1 + example.net/x v0.2.0-pre => ./x2-pre + example.net/y v0.1.0 => ./y1 + example.net/y v0.2.0 => ./y2 +) + +require ( + example.net/x v0.1.0 +) +-- go.mod.tidye -- +module example.net/m + +go 1.16 + +replace ( + example.net/x v0.1.0 => ./x1 + example.net/x v0.2.0-pre => ./x2-pre + example.net/y v0.1.0 => ./y1 + example.net/y v0.2.0 => ./y2 +) +-- go.mod.postget -- +module example.net/m + +go 1.16 + +replace ( + example.net/x v0.1.0 => ./x1 + example.net/x v0.2.0-pre => ./x2-pre + example.net/y v0.1.0 => ./y1 + example.net/y v0.2.0 => ./y2 +) + +require ( + example.net/x v0.1.0 + example.net/y v0.1.0 // indirect +) +-- m.go -- +package m + +import _ "example.net/x" + +-- x1/go.mod -- +module example.net/x + +go 1.16 +-- x1/x.go -- +package x +-- x1/x_test.go -- +package x + +import _ "example.net/y" + +-- x2-pre/go.mod -- +module example.net/x + +go 1.16 +-- x2-pre/README.txt -- +There is no package x here. Use example.com/x/subpkg instead. +-- x2-pre/subpkg/subpkg.go -- +package subpkg // import "example.net/x/subpkg" + +-- y1/go.mod -- +module example.net/y + +go 1.16 +-- y1/y.go -- +package y + +-- y2/go.mod -- +module example.net/y + +go 1.16 + +require example.net/x v0.2.0-pre +-- y2/y.go -- +package y + +import _ "example.net/x/subpkg" diff --git a/src/cmd/go/testdata/script/mod_tidy_convergence_loop.txt b/src/cmd/go/testdata/script/mod_tidy_convergence_loop.txt new file mode 100644 index 00000000000..3c4d3244d5d --- /dev/null +++ b/src/cmd/go/testdata/script/mod_tidy_convergence_loop.txt @@ -0,0 +1,329 @@ +# This test demonstrates a simple case in which 'go mod tidy' may resolve a +# missing package, only to remove that package when resolving its dependencies. +# +# If we naively iterate 'go mod tidy' until the dependency graph converges, this +# scenario may fail to converge. + +# The import graph used in this test looks like: +# +# m --- w +# | +# + --- x +# | +# + --- y +# | +# + --- z +# +# The module dependency graph of m initially contains w.1 (and, by extension, +# y.2-pre and z.2-pre). This is an arbitrary point in the cycle of possible +# configurations. +# +# w.1 requires y.2-pre and z.2-pre +# x.1 requires z.2-pre and w.2-pre +# y.1 requires w.2-pre and x.2-pre +# z.1 requires x.2-pre and y.2-pre +# +# At each point, exactly one missing package can be resolved by adding a +# dependency on the .1 release of the module that provides that package. +# However, adding that dependency causes the module providing another package to +# roll over from its .1 release to its .2-pre release, which removes the +# package. Once the package is removed, 'go mod tidy -e' no longer sees the +# module as relevant to the main module, and will happily remove the existing +# dependency on it. +# +# The cycle is of length 4 so that at every step only one package can be +# resolved. This is important because it prevents the iteration from ever +# reaching a state in which every package is simultaneously over-upgraded — such +# a state is stable and does not exhibit failure to converge. + +cp go.mod go.mod.orig + +# 'go mod tidy' without -e should fail without modifying go.mod, +# because it cannot resolve x, y, and z simultaneously. +! go mod tidy + +cmp go.mod go.mod.orig + +stderr '^go: finding module for package example\.net/w$' +stderr '^go: finding module for package example\.net/x$' +stderr -count=2 '^go: finding module for package example\.net/y$' +stderr -count=2 '^go: finding module for package example\.net/z$' +stderr '^go: found example\.net/x in example\.net/x v0.1.0$' + + # TODO: These error messages should be clearer — it doesn't indicate why v0.2.0-pre is required. +stderr '^example\.net/m imports\n\texample\.net/w: package example\.net/w provided by example\.net/w at latest version v0\.1\.0 but not at required version v0\.2\.0-pre$' +stderr '^example\.net/m imports\n\texample\.net/y: package example\.net/y provided by example\.net/y at latest version v0\.1\.0 but not at required version v0\.2\.0-pre$' +stderr '^example\.net/m imports\n\texample\.net/z: package example\.net/z provided by example\.net/z at latest version v0\.1\.0 but not at required version v0\.2\.0-pre$' + + +# 'go mod tidy -e' should preserve all of the upgrades to modules that could +# provide the missing packages but don't. That would at least explain why they +# are missing, and why no individual module can be upgraded in order to satisfy +# a missing import. +# +# TODO(bcmills): Today, it doesn't preserve those upgrades, and instead advances +# the state by one through the cycle of semi-tidy states. + +go mod tidy -e + +cmp go.mod go.mod.tidye1 + +stderr '^go: finding module for package example\.net/w$' +stderr '^go: finding module for package example\.net/x$' +stderr -count=2 '^go: finding module for package example\.net/y$' +stderr -count=2 '^go: finding module for package example\.net/z$' +stderr '^go: found example\.net/x in example\.net/x v0.1.0$' + +stderr '^example\.net/m imports\n\texample\.net/w: package example\.net/w provided by example\.net/w at latest version v0\.1\.0 but not at required version v0\.2\.0-pre$' +stderr '^example\.net/m imports\n\texample\.net/y: package example\.net/y provided by example\.net/y at latest version v0\.1\.0 but not at required version v0\.2\.0-pre$' +stderr '^example\.net/m imports\n\texample\.net/z: package example\.net/z provided by example\.net/z at latest version v0\.1\.0 but not at required version v0\.2\.0-pre$' + + +go mod tidy -e +cmp go.mod go.mod.tidye2 + +go mod tidy -e +cmp go.mod go.mod.tidye3 + +go mod tidy -e +cmp go.mod go.mod.orig + + +# If we upgrade away all of the packages simultaneously, the resulting tidy +# state converges at "no dependencies", because simultaneously adding all of the +# packages simultaneously over-upgrades all of the dependencies, and 'go mod +# tidy' treats "no package can be added" as a terminal state. + +go get -d example.net/w@v0.2.0-pre example.net/x@v0.2.0-pre example.net/y@v0.2.0-pre example.net/z@v0.2.0-pre +go mod tidy -e +cmp go.mod go.mod.postget +go mod tidy -e +cmp go.mod go.mod.postget + + +# The 'tidy' logic for a lazy main module requires more iterations to converge, +# because it is willing to drop dependencies on non-root modules that do not +# otherwise provide imported packages. +# +# On the first iteration, it adds x.1 as a root, which upgrades z and w, +# dropping w.1's requirement on y. w.1 was initially a root, so the upgraded +# w.2-pre is retained as a root. +# +# On the second iteration, it adds y.1 as a root, which upgrades w and x, +# dropping x.1's requirement on z. x.1 was added as a root in the previous step, +# so the upgraded x.2-pre is retained as a root. +# +# On the third iteration, it adds z.1 as a root, which upgrades x and y. +# x and y were already roots (from the previous steps), so their upgraded versions +# are retained (not dropped) and the iteration stops. +# +# At that point, we have z.1 as a root providing package z, +# and w, x, and y have all been upgraded to no longer provide any packages. +# So only z is retained as a new root. +# +# (From the above, we can see that in a lazy module we still cycle through the +# same possible root states, but in a different order from the eager case.) +# +# TODO(bcmills): if we retained the upgrades on w, x, and y (since they are +# lexical prefixes for unresolved packages w, x, and y, respectively), then 'go +# mod tidy -e' itself would become stable and no longer cycle through states. + +cp go.mod.orig go.mod +go mod edit -go=1.17 go.mod +cp go.mod go.mod.117 +go mod edit -go=1.17 go.mod.tidye1 +go mod edit -go=1.17 go.mod.tidye2 +go mod edit -go=1.17 go.mod.tidye3 +go mod edit -go=1.17 go.mod.postget + +go list -m all + +go mod tidy -e +cmp go.mod go.mod.tidye3 + +go mod tidy -e +cmp go.mod go.mod.tidye2 + +go mod tidy -e +cmp go.mod go.mod.tidye1 + +go mod tidy -e +cmp go.mod go.mod.117 + + +# As in the eager case, for the lazy module the fully-upgraded dependency graph +# becomes empty, and the empty graph is stable. + +go get -d example.net/w@v0.2.0-pre example.net/x@v0.2.0-pre example.net/y@v0.2.0-pre example.net/z@v0.2.0-pre +go mod tidy -e +cmp go.mod go.mod.postget +go mod tidy -e +cmp go.mod go.mod.postget + + +-- m.go -- +package m + +import ( + _ "example.net/w" + _ "example.net/x" + _ "example.net/y" + _ "example.net/z" +) + +-- go.mod -- +module example.net/m + +go 1.16 + +replace ( + example.net/w v0.1.0 => ./w1 + example.net/w v0.2.0-pre => ./w2-pre + example.net/x v0.1.0 => ./x1 + example.net/x v0.2.0-pre => ./x2-pre + example.net/y v0.1.0 => ./y1 + example.net/y v0.2.0-pre => ./y2-pre + example.net/z v0.1.0 => ./z1 + example.net/z v0.2.0-pre => ./z2-pre +) + +require example.net/w v0.1.0 +-- go.mod.tidye1 -- +module example.net/m + +go 1.16 + +replace ( + example.net/w v0.1.0 => ./w1 + example.net/w v0.2.0-pre => ./w2-pre + example.net/x v0.1.0 => ./x1 + example.net/x v0.2.0-pre => ./x2-pre + example.net/y v0.1.0 => ./y1 + example.net/y v0.2.0-pre => ./y2-pre + example.net/z v0.1.0 => ./z1 + example.net/z v0.2.0-pre => ./z2-pre +) + +require example.net/x v0.1.0 +-- go.mod.tidye2 -- +module example.net/m + +go 1.16 + +replace ( + example.net/w v0.1.0 => ./w1 + example.net/w v0.2.0-pre => ./w2-pre + example.net/x v0.1.0 => ./x1 + example.net/x v0.2.0-pre => ./x2-pre + example.net/y v0.1.0 => ./y1 + example.net/y v0.2.0-pre => ./y2-pre + example.net/z v0.1.0 => ./z1 + example.net/z v0.2.0-pre => ./z2-pre +) + +require example.net/y v0.1.0 +-- go.mod.tidye3 -- +module example.net/m + +go 1.16 + +replace ( + example.net/w v0.1.0 => ./w1 + example.net/w v0.2.0-pre => ./w2-pre + example.net/x v0.1.0 => ./x1 + example.net/x v0.2.0-pre => ./x2-pre + example.net/y v0.1.0 => ./y1 + example.net/y v0.2.0-pre => ./y2-pre + example.net/z v0.1.0 => ./z1 + example.net/z v0.2.0-pre => ./z2-pre +) + +require example.net/z v0.1.0 +-- go.mod.postget -- +module example.net/m + +go 1.16 + +replace ( + example.net/w v0.1.0 => ./w1 + example.net/w v0.2.0-pre => ./w2-pre + example.net/x v0.1.0 => ./x1 + example.net/x v0.2.0-pre => ./x2-pre + example.net/y v0.1.0 => ./y1 + example.net/y v0.2.0-pre => ./y2-pre + example.net/z v0.1.0 => ./z1 + example.net/z v0.2.0-pre => ./z2-pre +) +-- w1/go.mod -- +module example.net/w + +go 1.16 + +require ( + example.net/y v0.2.0-pre + example.net/z v0.2.0-pre +) +-- w1/w.go -- +package w +-- w2-pre/go.mod -- +module example.net/w + +go 1.16 +-- w2-pre/README.txt -- +Package w has been removed. + +-- x1/go.mod -- +module example.net/x + +go 1.16 + +require ( + example.net/z v0.2.0-pre + example.net/w v0.2.0-pre +) +-- x1/x.go -- +package x +-- x2-pre/go.mod -- +module example.net/x + +go 1.16 +-- x2-pre/README.txt -- +Package x has been removed. + +-- y1/go.mod -- +module example.net/y + +go 1.16 + +require ( + example.net/w v0.2.0-pre + example.net/x v0.2.0-pre +) +-- y1/y.go -- +package y + +-- y2-pre/go.mod -- +module example.net/y + +go 1.16 +-- y2-pre/README.txt -- +Package y has been removed. + +-- z1/go.mod -- +module example.net/z + +go 1.16 + +require ( + example.net/x v0.2.0-pre + example.net/y v0.2.0-pre +) +-- z1/z.go -- +package z + +-- z2-pre/go.mod -- +module example.net/z + +go 1.16 +-- z2-pre/README.txt -- +Package z has been removed. diff --git a/src/cmd/go/testdata/script/mod_tidy_error.txt b/src/cmd/go/testdata/script/mod_tidy_error.txt index b6c24ceaf75..395537b1a71 100644 --- a/src/cmd/go/testdata/script/mod_tidy_error.txt +++ b/src/cmd/go/testdata/script/mod_tidy_error.txt @@ -4,12 +4,12 @@ env GO111MODULE=on # 'go mod tidy' and 'go mod vendor' should not hide loading errors. ! go mod tidy -stderr '^issue27063 imports\n\tnonexist: package nonexist is not in GOROOT \(.*\)' +! stderr 'package nonexist is not in GOROOT' stderr '^issue27063 imports\n\tnonexist.example.com: cannot find module providing package nonexist.example.com' stderr '^issue27063 imports\n\tissue27063/other imports\n\tother.example.com/nonexist: cannot find module providing package other.example.com/nonexist' ! go mod vendor -stderr '^issue27063 imports\n\tnonexist: package nonexist is not in GOROOT \(.*\)' +! stderr 'package nonexist is not in GOROOT' stderr '^issue27063 imports\n\tnonexist.example.com: cannot find module providing package nonexist.example.com' stderr '^issue27063 imports\n\tissue27063/other imports\n\tother.example.com/nonexist: cannot find module providing package other.example.com/nonexist' diff --git a/src/cmd/go/testdata/script/mod_tidy_indirect.txt b/src/cmd/go/testdata/script/mod_tidy_indirect.txt new file mode 100644 index 00000000000..1f092b223bd --- /dev/null +++ b/src/cmd/go/testdata/script/mod_tidy_indirect.txt @@ -0,0 +1,67 @@ +cp go.mod go.mod.orig +go mod tidy +cmp go.mod go.mod.orig + +-- go.mod -- +module example.com/tidy + +go 1.16 + +require ( + example.net/incomplete v0.1.0 + example.net/indirect v0.2.0 // indirect + example.net/toolow v0.1.0 +) + +replace ( + example.net/incomplete v0.1.0 => ./incomplete + example.net/indirect v0.1.0 => ./indirect.1 + example.net/indirect v0.2.0 => ./indirect.2 + example.net/toolow v0.1.0 => ./toolow +) +-- tidy.go -- +package tidy + +import ( + _ "example.net/incomplete" + _ "example.net/toolow" +) + +-- incomplete/go.mod -- +module example.net/incomplete + +go 1.16 + +// This module omits a needed requirement on example.net/indirect. +-- incomplete/incomplete.go -- +package incomplete + +import _ "example.net/indirect/newpkg" + +-- toolow/go.mod -- +module example.net/toolow + +go 1.16 + +require example.net/indirect v0.1.0 +-- toolow/toolow.go -- +package toolow + +import _ "example.net/indirect/oldpkg" + +-- indirect.1/go.mod -- +module example.net/indirect + +go 1.16 +-- indirect.1/oldpkg/oldpkg.go -- +package oldpkg + + +-- indirect.2/go.mod -- +module example.net/indirect + +go 1.16 +-- indirect.2/oldpkg/oldpkg.go -- +package oldpkg +-- indirect.2/newpkg/newpkg.go -- +package newpkg diff --git a/src/cmd/go/testdata/script/mod_tidy_newroot.txt b/src/cmd/go/testdata/script/mod_tidy_newroot.txt new file mode 100644 index 00000000000..3abd5ef08a3 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_tidy_newroot.txt @@ -0,0 +1,82 @@ +# https://golang.org/issue/45952: 'go mod tidy' in an eager module failed due +# to an erroneous check on root completeness. +# +# Per the issue report: +# > It may have to do with: +# > +# > package A imports package B in go.mod, which imports package C in its own go.mod +# > package A drops direct dependency on package B … +# +# We infer from that that package C is still needed by some other indirect +# dependency, and must be at a higher version than what is required by that +# dependency (or else no new root would be needed). An additional package D +# in its own module satisfies that condition, reproducing the bug. + +go mod tidy +cmp go.mod go.mod.tidy + +-- go.mod -- +module example.net/a + +go 1.16 + +require ( + example.net/b v0.1.0 + example.net/d v0.1.0 +) + +replace ( + example.net/b v0.1.0 => ./b + example.net/c v0.1.0 => ./c + example.net/c v0.2.0 => ./c + example.net/d v0.1.0 => ./d +) +-- go.mod.tidy -- +module example.net/a + +go 1.16 + +require ( + example.net/c v0.2.0 // indirect + example.net/d v0.1.0 +) + +replace ( + example.net/b v0.1.0 => ./b + example.net/c v0.1.0 => ./c + example.net/c v0.2.0 => ./c + example.net/d v0.1.0 => ./d +) +-- a.go -- +package a + +import _ "example.net/d" + +-- b/go.mod -- +module example.net/b + +go 1.16 + +require example.net/c v0.2.0 +-- b/b.go -- +package b + +import _ "example.net/c" + +-- c/go.mod -- +module example.net/c + +go 1.16 +-- c/c.go -- +package c + +-- d/go.mod -- +module example.net/d + +go 1.16 + +require example.net/c v0.1.0 +-- d/d.go -- +package d + +import _ "example.net/c" diff --git a/src/cmd/go/testdata/script/mod_tidy_replace.txt b/src/cmd/go/testdata/script/mod_tidy_replace.txt index dd994388910..297f6a6a45c 100644 --- a/src/cmd/go/testdata/script/mod_tidy_replace.txt +++ b/src/cmd/go/testdata/script/mod_tidy_replace.txt @@ -136,3 +136,10 @@ require ( ) replace not-rsc.io/quote/v3 => rsc.io/quote/v3 v3.0.0 +-- multiple-paths/use.go -- +package quoter + +import ( + _ "not-rsc.io/quote/v3" + _ "rsc.io/quote/v3" +) diff --git a/src/cmd/go/testdata/script/mod_tidy_symlink_issue35941.txt b/src/cmd/go/testdata/script/mod_tidy_symlink_issue35941.txt new file mode 100644 index 00000000000..d4658c65d40 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_tidy_symlink_issue35941.txt @@ -0,0 +1,36 @@ +env GO111MODULE=on +[!symlink] skip + +cd m +symlink symlink -> ../outside + +cp go.mod go.mod.orig + +# Issue 35941: suppress symlink warnings when running 'go mod tidy'. +# 'go mod tidy' should not scan packages in symlinked subdirectories. +go mod tidy +! stderr 'warning: ignoring symlink' +cmp go.mod go.mod.orig + +! go build ./symlink +stderr '^symlink[\\/]symlink.go:3:8: module example.net/unresolved provides package example.net/unresolved and is replaced but not required; to add it:\n\tgo get example.net/unresolved@v0.1.0$' + +-- m/go.mod -- +module example.net/m + +go 1.16 + +replace example.net/unresolved v0.1.0 => ../unresolved +-- m/a.go -- +package a +-- outside/symlink.go -- +package symlink + +import _ "example.net/unresolved" +-- unresolved/go.mod -- +module example.net/unresolved + +go 1.16 +-- unresolved/unresolved.go -- +// Package unresolved exists, but 'go mod tidy' won't add it. +package unresolved diff --git a/src/cmd/go/testdata/script/mod_tidy_version.txt b/src/cmd/go/testdata/script/mod_tidy_version.txt new file mode 100644 index 00000000000..5441d9cc06f --- /dev/null +++ b/src/cmd/go/testdata/script/mod_tidy_version.txt @@ -0,0 +1,248 @@ +# https://golang.org/issue/45094: 'go mod tidy' now accepts a '-go' flag +# to change the language version in use. +# +# The package import graph used in this test looks like: +# +# m --- a --- b +# | +# b_test --- c +# | +# c_test --- d +# +# The module diagram looks like: +# +# m --- a --- b +# | +# + --- c +# | +# + --- d +# +# Module b omits its dependency on c, and module c omits its dependency on d. +# +# In go 1.15, the tidy main module must require a (because it is direct), +# c (because it is a missing test dependency of an imported package), +# and d (because it is a missing transitive test dependency). +# +# In go 1.16, the tidy main module can omit d because it is no longer +# included in "all". +# +# In go 1.17, the main module must explicitly require b +# (because it is transitively imported by the main module). + + +cp go.mod go.mod.orig + +# An invalid argument should be rejected. + +! go mod tidy -go=bananas +stderr '^go mod: invalid -go option "bananas"; expecting something like "-go 1.17"$' +cmp go.mod go.mod.orig + + +go mod tidy -go=1.15 +cmp go.mod go.mod.115 + +go mod tidy +cmp go.mod go.mod.115 + + +go mod tidy -go=1.16 +cmp go.mod go.mod.116 + +go mod tidy +cmp go.mod go.mod.116 + + +go mod tidy -go=1.17 +cmp go.mod go.mod.117 + +go mod tidy +cmp go.mod go.mod.117 + + +# If we downgrade back to 1.15, we should re-resolve d to v0.2.0 instead +# of the original v0.1.0 (because the original requirement is lost). + +go mod tidy -go=1.15 +cmp go.mod go.mod.115-2 + + +# -go= (with an empty argument) maintains the existing version or adds the +# default version (just like omitting the flag). + +go mod tidy -go='' +cmp go.mod go.mod.115-2 + +cp go.mod.orig go.mod +go mod tidy -go='' +cmpenv go.mod go.mod.latest + + + +-- go.mod -- +module example.com/m + +require ( + example.net/a v0.1.0 + example.net/c v0.1.0 // indirect + example.net/d v0.1.0 // indirect +) + +replace ( + example.net/a v0.1.0 => ./a + example.net/a v0.2.0 => ./a + example.net/b v0.1.0 => ./b + example.net/b v0.2.0 => ./b + example.net/c v0.1.0 => ./c + example.net/c v0.2.0 => ./c + example.net/d v0.1.0 => ./d + example.net/d v0.2.0 => ./d +) +-- m.go -- +package m + +import _ "example.net/a" + +-- go.mod.115 -- +module example.com/m + +go 1.15 + +require ( + example.net/a v0.1.0 + example.net/c v0.1.0 // indirect + example.net/d v0.1.0 // indirect +) + +replace ( + example.net/a v0.1.0 => ./a + example.net/a v0.2.0 => ./a + example.net/b v0.1.0 => ./b + example.net/b v0.2.0 => ./b + example.net/c v0.1.0 => ./c + example.net/c v0.2.0 => ./c + example.net/d v0.1.0 => ./d + example.net/d v0.2.0 => ./d +) +-- go.mod.115-2 -- +module example.com/m + +go 1.15 + +require ( + example.net/a v0.1.0 + example.net/c v0.1.0 // indirect + example.net/d v0.2.0 // indirect +) + +replace ( + example.net/a v0.1.0 => ./a + example.net/a v0.2.0 => ./a + example.net/b v0.1.0 => ./b + example.net/b v0.2.0 => ./b + example.net/c v0.1.0 => ./c + example.net/c v0.2.0 => ./c + example.net/d v0.1.0 => ./d + example.net/d v0.2.0 => ./d +) +-- go.mod.116 -- +module example.com/m + +go 1.16 + +require ( + example.net/a v0.1.0 + example.net/c v0.1.0 // indirect +) + +replace ( + example.net/a v0.1.0 => ./a + example.net/a v0.2.0 => ./a + example.net/b v0.1.0 => ./b + example.net/b v0.2.0 => ./b + example.net/c v0.1.0 => ./c + example.net/c v0.2.0 => ./c + example.net/d v0.1.0 => ./d + example.net/d v0.2.0 => ./d +) +-- go.mod.117 -- +module example.com/m + +go 1.17 + +require ( + example.net/a v0.1.0 + example.net/b v0.1.0 // indirect + example.net/c v0.1.0 // indirect +) + +replace ( + example.net/a v0.1.0 => ./a + example.net/a v0.2.0 => ./a + example.net/b v0.1.0 => ./b + example.net/b v0.2.0 => ./b + example.net/c v0.1.0 => ./c + example.net/c v0.2.0 => ./c + example.net/d v0.1.0 => ./d + example.net/d v0.2.0 => ./d +) +-- go.mod.latest -- +module example.com/m + +go $goversion + +require ( + example.net/a v0.1.0 + example.net/b v0.1.0 // indirect + example.net/c v0.1.0 // indirect +) + +replace ( + example.net/a v0.1.0 => ./a + example.net/a v0.2.0 => ./a + example.net/b v0.1.0 => ./b + example.net/b v0.2.0 => ./b + example.net/c v0.1.0 => ./c + example.net/c v0.2.0 => ./c + example.net/d v0.1.0 => ./d + example.net/d v0.2.0 => ./d +) +-- a/go.mod -- +module example.net/a + +go 1.15 + +require example.net/b v0.1.0 +-- a/a.go -- +package a + +import _ "example.net/b" + +-- b/go.mod -- +module example.net/b + +go 1.15 +-- b/b.go -- +package b +-- b/b_test.go -- +package b_test + +import _ "example.net/c" + +-- c/go.mod -- +module example.net/c + +go 1.15 +-- c/c.go -- +package c +-- c/c_test.go -- +package c_test + +import _ "example.net/d" + +-- d/go.mod -- +module example.net/d + +go 1.15 +-- d/d.go -- +package d diff --git a/src/cmd/go/testdata/script/mod_vendor_gomod.txt b/src/cmd/go/testdata/script/mod_vendor_gomod.txt new file mode 100644 index 00000000000..3f6ea3561a4 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_vendor_gomod.txt @@ -0,0 +1,38 @@ +# https://golang.org/issue/42970: As of Go 1.17, go.mod and go.sum files should +# be stripped from vendored dependencies. + +go mod vendor +cd vendor/example.net/x +go list all +! stdout '^example.net/m' +stdout '^example.net/x$' +exists ./go.sum + +cd ../../.. +go mod edit -go=1.17 +go mod vendor +cd vendor/example.net/x +go list all +stdout '^example.net/m$' +stdout '^example.net/x$' +! exists ./go.sum + +-- go.mod -- +module example.net/m + +go 1.16 + +require example.net/x v0.1.0 + +replace example.net/x v0.1.0 => ./x +-- m.go -- +package m + +import _ "example.net/x" +-- x/go.mod -- +module example.net/x + +go 1.16 +-- x/go.sum -- +-- x/x.go -- +package x diff --git a/src/cmd/go/testdata/script/mod_vendor_goversion.txt b/src/cmd/go/testdata/script/mod_vendor_goversion.txt new file mode 100644 index 00000000000..aa4cb41171a --- /dev/null +++ b/src/cmd/go/testdata/script/mod_vendor_goversion.txt @@ -0,0 +1,102 @@ +# https://golang.org/issue/36876: As of Go 1.17, vendor/modules.txt should +# indicate the language version used by each dependency. + +[short] skip + + +# Control case: without a vendor directory, need117 builds and bad114 doesn't. + +go build example.net/need117 +! go build example.net/bad114 +stderr '^bad114[/\\]bad114.go:15:2: duplicate method Y$' + + +# With a vendor/modules.txt lacking language versions, the world is topsy-turvy, +# because we have to guess a uniform version for everything. +# +# We always guess Go 1.16, because that was the last version for which +# 'go mod vendor' failed to record dependency versions, and it has most of +# the language features added since modules were introduced in Go 1.11. +# +# Even so, modules that declare 'go 1.17' and use 1.17 features spuriously fail +# to build, and modules that declare an older version and use features from a +# newer one spuriously build (instead of failing as they ought to). + +go mod vendor + +! grep 1.17 vendor/modules.txt +! go build example.net/need117 +stderr '^vendor[/\\]example\.net[/\\]need117[/\\]need117.go:5:18: .*\n\tconversion of slices to array pointers only supported as of -lang=go1\.17' + +! grep 1.13 vendor/modules.txt +go build example.net/bad114 + + +# Upgrading the main module to 1.17 adds version annotations. +# Then everything is once again consistent with the non-vendored world. + +go mod edit -go=1.17 +go mod vendor + +grep '^## explicit; go 1.17$' vendor/modules.txt +go build example.net/need117 + +grep '^## explicit; go 1.13$' vendor/modules.txt +! go build example.net/bad114 +stderr '^vendor[/\\]example\.net[/\\]bad114[/\\]bad114.go:15:2: duplicate method Y$' + +-- go.mod -- +module example.net/m + +go 1.16 + +require ( + example.net/bad114 v0.1.0 + example.net/need117 v0.1.0 +) + +replace ( + example.net/bad114 v0.1.0 => ./bad114 + example.net/need117 v0.1.0 => ./need117 +) +-- m.go -- +package m + +import _ "example.net/bad114" +import _ "example.net/need117" + +-- bad114/go.mod -- +// Module bad114 requires Go 1.14 or higher, but declares Go 1.13. +module example.net/bad114 + +go 1.13 +-- bad114/bad114.go -- +package bad114 + +type XY interface { + X() + Y() +} + +type YZ interface { + Y() + Z() +} + +type XYZ interface { + XY + YZ +} + +-- need117/go.mod -- +// Module need117 requires Go 1.17 or higher. +module example.net/need117 + +go 1.17 +-- need117/need117.go -- +package need117 + +func init() { + s := make([]byte, 4) + _ = (*[4]byte)(s) +} diff --git a/src/cmd/go/testdata/script/mod_verify.txt b/src/cmd/go/testdata/script/mod_verify.txt index 43812d069f6..b5106659a9a 100644 --- a/src/cmd/go/testdata/script/mod_verify.txt +++ b/src/cmd/go/testdata/script/mod_verify.txt @@ -48,10 +48,13 @@ go mod tidy grep '^rsc.io/quote v1.1.0/go.mod ' go.sum grep '^rsc.io/quote v1.1.0 ' go.sum -# sync should ignore missing ziphash; verify should not +# verify should fail on a missing ziphash. tidy should restore it. rm $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.1.0.ziphash -go mod tidy ! go mod verify +stderr '^rsc.io/quote v1.1.0: missing ziphash: open '$GOPATH'[/\\]pkg[/\\]mod[/\\]cache[/\\]download[/\\]rsc.io[/\\]quote[/\\]@v[/\\]v1.1.0.ziphash' +go mod tidy +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.1.0.ziphash +go mod verify # Packages below module root should not be mentioned in go.sum. rm go.sum diff --git a/src/cmd/go/testdata/script/test_cache_inputs.txt b/src/cmd/go/testdata/script/test_cache_inputs.txt index 50486e19090..d694a309947 100644 --- a/src/cmd/go/testdata/script/test_cache_inputs.txt +++ b/src/cmd/go/testdata/script/test_cache_inputs.txt @@ -99,6 +99,15 @@ rm $WORK/external.txt go test testcache -run=ExternalFile stdout '\(cached\)' +# The -benchtime flag without -bench should not affect caching. +go test testcache -run=Benchtime -benchtime=1x +go test testcache -run=Benchtime -benchtime=1x +stdout '\(cached\)' + +go test testcache -run=Benchtime -bench=Benchtime -benchtime=1x +go test testcache -run=Benchtime -bench=Benchtime -benchtime=1x +! stdout '\(cached\)' + # Executables within GOROOT and GOPATH should affect caching, # even if the test does not stat them explicitly. @@ -228,6 +237,10 @@ func TestExternalFile(t *testing.T) { func TestOSArgs(t *testing.T) { t.Log(os.Args) } + +func TestBenchtime(t *testing.T) { +} + -- mkold.go -- package main diff --git a/src/cmd/go/testdata/script/test_chatty_parallel_fail.txt b/src/cmd/go/testdata/script/test_chatty_parallel_fail.txt index 3f7360b6592..f8faa93663d 100644 --- a/src/cmd/go/testdata/script/test_chatty_parallel_fail.txt +++ b/src/cmd/go/testdata/script/test_chatty_parallel_fail.txt @@ -14,7 +14,7 @@ stdout -count=1 '{"Time":"[0-9TZ:.+-]{20,40}","Action":"output","Package":"comma stdout -count=1 '{"Time":"[0-9TZ:.+-]{20,40}","Action":"output","Package":"command-line-arguments","Test":"TestChattyParallel/sub-2","Output":"=== CONT TestChattyParallel/sub-2\\n"}\n{"Time":"[0-9TZ:.+-]{20,40}","Action":"output","Package":"command-line-arguments","Test":"TestChattyParallel/sub-2","Output":" chatty_parallel_test.go:38: error from sub-2\\n"}' -- chatty_parallel_test.go -- -package chatty_paralell_test +package chatty_parallel_test import ( "testing" @@ -22,7 +22,7 @@ import ( "flag" ) -// This test ensures the the order of CONT lines in parallel chatty tests. +// This test ensures the order of CONT lines in parallel chatty tests. func TestChattyParallel(t *testing.T) { t.Parallel() diff --git a/src/cmd/go/testdata/script/test_chatty_parallel_success.txt b/src/cmd/go/testdata/script/test_chatty_parallel_success.txt index 4a86d74f196..63034fa3b5f 100644 --- a/src/cmd/go/testdata/script/test_chatty_parallel_success.txt +++ b/src/cmd/go/testdata/script/test_chatty_parallel_success.txt @@ -13,7 +13,7 @@ stdout -count=2 '{"Time":"[0-9TZ:.+-]{20,40}","Action":"output","Package":"comma stdout -count=2 '{"Time":"[0-9TZ:.+-]{20,40}","Action":"output","Package":"command-line-arguments","Test":"TestChattyParallel/sub-2","Output":"=== CONT TestChattyParallel/sub-2\\n"}\n{"Time":"[0-9TZ:.+-]{20,40}","Action":"output","Package":"command-line-arguments","Test":"TestChattyParallel/sub-2","Output":" chatty_parallel_test.go:32: this is sub-2\\n"}' -- chatty_parallel_test.go -- -package chatty_paralell_test +package chatty_parallel_test import ( "testing" @@ -21,7 +21,7 @@ import ( "flag" ) -// This test ensures the the order of CONT lines in parallel chatty tests. +// This test ensures the order of CONT lines in parallel chatty tests. func TestChattyParallel(t *testing.T) { t.Parallel() diff --git a/src/cmd/go/testdata/script/test_chatty_parallel_success_sleepy.txt b/src/cmd/go/testdata/script/test_chatty_parallel_success_sleepy.txt index 5952a87beab..e651a7ed243 100644 --- a/src/cmd/go/testdata/script/test_chatty_parallel_success_sleepy.txt +++ b/src/cmd/go/testdata/script/test_chatty_parallel_success_sleepy.txt @@ -5,7 +5,7 @@ go test -parallel 3 chatty_parallel_test.go -v stdout '--- PASS: TestFast \([0-9.]{4}s\)\n=== CONT TestSlow\n chatty_parallel_test.go:31: this is the second TestSlow log\n--- PASS: TestSlow \([0-9.]{4}s\)' -- chatty_parallel_test.go -- -package chatty_paralell_test +package chatty_parallel_test import ( "testing" diff --git a/src/cmd/go/testdata/script/test_finished_subtest_goroutines.txt b/src/cmd/go/testdata/script/test_finished_subtest_goroutines.txt new file mode 100644 index 00000000000..8db821eb77f --- /dev/null +++ b/src/cmd/go/testdata/script/test_finished_subtest_goroutines.txt @@ -0,0 +1,52 @@ +# Regression test for https://golang.org/issue/45127: +# Goroutines for completed parallel subtests should exit immediately, +# not block until earlier subtests have finished. + +[short] skip + +! go test . +stdout 'panic: slow failure' +! stdout '\[chan send' + +-- go.mod -- +module golang.org/issue45127 + +go 1.16 +-- issue45127_test.go -- +package main + +import ( + "fmt" + "runtime" + "runtime/debug" + "sync" + "testing" +) + +func TestTestingGoroutineLeak(t *testing.T) { + debug.SetTraceback("all") + + var wg sync.WaitGroup + const nFast = 10 + + t.Run("slow", func(t *testing.T) { + t.Parallel() + wg.Wait() + for i := 0; i < nFast; i++ { + // If the subtest goroutines are going to park on the channel + // send, allow them to park now. If they're not going to park, + // make sure they have had a chance to run to completion so + // that they aren't spuriously parked when we panic. + runtime.Gosched() + } + panic("slow failure") + }) + + wg.Add(nFast) + for i := 0; i < nFast; i++ { + t.Run(fmt.Sprintf("leaky%d", i), func(t *testing.T) { + t.Parallel() + wg.Done() + }) + } +} diff --git a/src/cmd/go/testdata/script/test_overlay.txt b/src/cmd/go/testdata/script/test_overlay.txt new file mode 100644 index 00000000000..b6bdc116e62 --- /dev/null +++ b/src/cmd/go/testdata/script/test_overlay.txt @@ -0,0 +1,24 @@ +[short] skip + +cd $WORK/gopath/src/foo +go test -list=. -overlay=overlay.json . +stdout 'TestBar' + +-- go.mod -- +module test.pkg +-- foo/foo_test.go -- +package foo + +import "testing" + +func TestFoo(t *testing.T) { } +-- tmp/bar_test.go -- +package foo + +import "testing" + +func TestBar(t *testing.T) { + t.Fatal("dummy failure") +} +-- foo/overlay.json -- +{"Replace": {"foo_test.go": "../tmp/bar_test.go"}} diff --git a/src/cmd/go/testdata/script/test_shuffle.txt b/src/cmd/go/testdata/script/test_shuffle.txt new file mode 100644 index 00000000000..3a50605c344 --- /dev/null +++ b/src/cmd/go/testdata/script/test_shuffle.txt @@ -0,0 +1,148 @@ +# Shuffle order of tests and benchmarks + +# Run tests +go test -v foo_test.go +! stdout '-test.shuffle ' +stdout '(?s)TestOne(.*)TestTwo(.*)TestThree' + +go test -v -shuffle=off foo_test.go +! stdout '-test.shuffle ' +stdout '(?s)TestOne(.*)TestTwo(.*)TestThree' + +go test -v -shuffle=42 foo_test.go +stdout '^-test.shuffle 42' +stdout '(?s)TestThree(.*)TestOne(.*)TestTwo' + +go test -v -shuffle=43 foo_test.go +stdout '^-test.shuffle 43' +stdout '(?s)TestThree(.*)TestTwo(.*)TestOne' + +go test -v -shuffle=44 foo_test.go +stdout '^-test.shuffle 44' +stdout '(?s)TestOne(.*)TestThree(.*)TestTwo' + +go test -v -shuffle=0 foo_test.go +stdout '^-test.shuffle 0' +stdout '(?s)TestTwo(.*)TestOne(.*)TestThree' + +go test -v -shuffle -1 foo_test.go +stdout '^-test.shuffle -1' +stdout '(?s)TestThree(.*)TestOne(.*)TestTwo' + +go test -v -shuffle=on foo_test.go +stdout '^-test.shuffle ' +stdout '(?s)=== RUN TestOne(.*)--- PASS: TestOne' +stdout '(?s)=== RUN TestTwo(.*)--- PASS: TestTwo' +stdout '(?s)=== RUN TestThree(.*)--- PASS: TestThree' + + +# Run tests and benchmarks +go test -v -bench=. foo_test.go +! stdout '-test.shuffle ' +stdout '(?s)TestOne(.*)TestTwo(.*)TestThree(.*)BenchmarkOne(.*)BenchmarkTwo(.*)BenchmarkThree' + +go test -v -bench=. -shuffle=off foo_test.go +! stdout '-test.shuffle ' +stdout '(?s)TestOne(.*)TestTwo(.*)TestThree(.*)BenchmarkOne(.*)BenchmarkTwo(.*)BenchmarkThree' + +go test -v -bench=. -shuffle=42 foo_test.go +stdout '^-test.shuffle 42' +stdout '(?s)TestThree(.*)TestOne(.*)TestTwo(.*)BenchmarkThree(.*)BenchmarkOne(.*)BenchmarkTwo' + +go test -v -bench=. -shuffle=43 foo_test.go +stdout '^-test.shuffle 43' +stdout '(?s)TestThree(.*)TestTwo(.*)TestOne(.*)BenchmarkThree(.*)BenchmarkOne(.*)BenchmarkTwo' + +go test -v -bench=. -shuffle=44 foo_test.go +stdout '^-test.shuffle 44' +stdout '(?s)TestOne(.*)TestThree(.*)TestTwo(.*)BenchmarkTwo(.*)BenchmarkOne(.*)BenchmarkThree' + +go test -v -bench=. -shuffle=0 foo_test.go +stdout '^-test.shuffle 0' +stdout '(?s)TestTwo(.*)TestOne(.*)TestThree(.*)BenchmarkThree(.*)BenchmarkOne(.*)BenchmarkTwo' + +go test -v -bench=. -shuffle -1 foo_test.go +stdout '^-test.shuffle -1' +stdout '(?s)TestThree(.*)TestOne(.*)TestTwo(.*)BenchmarkOne(.*)BenchmarkThree(.*)BenchmarkTwo' + +go test -v -bench=. -shuffle=on foo_test.go +stdout '^-test.shuffle ' +stdout '(?s)=== RUN TestOne(.*)--- PASS: TestOne' +stdout '(?s)=== RUN TestTwo(.*)--- PASS: TestTwo' +stdout '(?s)=== RUN TestThree(.*)--- PASS: TestThree' +stdout -count=2 'BenchmarkOne' +stdout -count=2 'BenchmarkTwo' +stdout -count=2 'BenchmarkThree' + + +# When running go test -count=N, each of the N runs distinct runs should maintain the same +# shuffled order of these tests. +go test -v -shuffle=43 -count=4 foo_test.go +stdout '^-test.shuffle 43' +stdout '(?s)TestThree(.*)TestTwo(.*)TestOne(.*)TestThree(.*)TestTwo(.*)TestOne(.*)TestThree(.*)TestTwo(.*)TestOne(.*)TestThree(.*)TestTwo(.*)TestOne' + +go test -v -bench=. -shuffle=44 -count=2 foo_test.go +stdout '^-test.shuffle 44' +stdout '(?s)TestOne(.*)TestThree(.*)TestTwo(.*)TestOne(.*)TestThree(.*)TestTwo(.*)BenchmarkTwo(.*)BenchmarkOne(.*)BenchmarkThree(.*)' + + +# The feature should work with test binaries as well +go test -c +exec ./m.test -test.shuffle=off +! stdout '^-test.shuffle ' + +exec ./m.test -test.shuffle=on +stdout '^-test.shuffle ' + +exec ./m.test -test.v -test.bench=. -test.shuffle=0 foo_test.go +stdout '^-test.shuffle 0' +stdout '(?s)TestTwo(.*)TestOne(.*)TestThree(.*)BenchmarkThree(.*)BenchmarkOne(.*)BenchmarkTwo' + +exec ./m.test -test.v -test.bench=. -test.shuffle=123 foo_test.go +stdout '^-test.shuffle 123' +stdout '(?s)TestThree(.*)TestOne(.*)TestTwo(.*)BenchmarkThree(.*)BenchmarkTwo(.*)BenchmarkOne' + +exec ./m.test -test.v -test.bench=. -test.shuffle=-1 foo_test.go +stdout '^-test.shuffle -1' +stdout '(?s)TestThree(.*)TestOne(.*)TestTwo(.*)BenchmarkOne(.*)BenchmarkThree(.*)BenchmarkTwo' + +exec ./m.test -test.v -test.bench=. -test.shuffle=44 -test.count=2 foo_test.go +stdout '^-test.shuffle 44' +stdout '(?s)TestOne(.*)TestThree(.*)TestTwo(.*)TestOne(.*)TestThree(.*)TestTwo(.*)BenchmarkTwo(.*)BenchmarkOne(.*)BenchmarkThree(.*)' + + +# Negative testcases for invalid input +! go test -shuffle -count=2 +stderr 'invalid value "-count=2" for flag -shuffle: -shuffle argument must be "on", "off", or an int64: strconv.ParseInt: parsing "-count=2": invalid syntax' + +! go test -shuffle= +stderr '(?s)invalid value "" for flag -shuffle: -shuffle argument must be "on", "off", or an int64: strconv.ParseInt: parsing "": invalid syntax' + +! go test -shuffle=' ' +stderr '(?s)invalid value " " for flag -shuffle: -shuffle argument must be "on", "off", or an int64: strconv.ParseInt: parsing " ": invalid syntax' + +! go test -shuffle=true +stderr 'invalid value "true" for flag -shuffle: -shuffle argument must be "on", "off", or an int64: strconv.ParseInt: parsing "true": invalid syntax' + +! go test -shuffle='abc' +stderr 'invalid value "abc" for flag -shuffle: -shuffle argument must be "on", "off", or an int64: strconv.ParseInt: parsing "abc": invalid syntax' + +-- go.mod -- +module m + +go 1.16 +-- foo_test.go -- +package foo + +import "testing" + +func TestOne(t *testing.T) {} +func TestTwo(t *testing.T) {} +func TestThree(t *testing.T) {} + +func BenchmarkOne(b *testing.B) {} +func BenchmarkTwo(b *testing.B) {} +func BenchmarkThree(b *testing.B) {} + +-- foo.go -- +package foo diff --git a/src/cmd/go/testdata/script/test_trimpath.txt b/src/cmd/go/testdata/script/test_trimpath.txt new file mode 100644 index 00000000000..065f9ce4d17 --- /dev/null +++ b/src/cmd/go/testdata/script/test_trimpath.txt @@ -0,0 +1,51 @@ +[short] skip + +go test -trimpath -v . +! stdout '[/\\]pkg_test[/\\]' +stdout -count=3 '[/\\]pkg[/\\]' + +-- go.mod -- +module example.com/pkg + +go 1.17 + +-- pkg.go -- +package pkg + +import "runtime" + +func PrintFile() { + _, file, _, _ := runtime.Caller(0) + println(file) +} + +-- pkg_test.go -- +package pkg + +import "runtime" + +func PrintFileForTest() { + _, file, _, _ := runtime.Caller(0) + println(file) +} + +-- pkg_x_test.go -- +package pkg_test + +import ( + "runtime" + "testing" + + "example.com/pkg" +) + +func TestMain(m *testing.M) { + pkg.PrintFile() + pkg.PrintFileForTest() + PrintFileInXTest() +} + +func PrintFileInXTest() { + _, file, _, _ := runtime.Caller(0) + println(file) +} diff --git a/src/cmd/go/testdata/script/test_trimpath_main.txt b/src/cmd/go/testdata/script/test_trimpath_main.txt new file mode 100644 index 00000000000..c07621245fb --- /dev/null +++ b/src/cmd/go/testdata/script/test_trimpath_main.txt @@ -0,0 +1,38 @@ +[short] skip + +go test -trimpath -v . +! stdout '[/\\]pkg_test[/\\]' +stdout -count=2 '[/\\]pkg[/\\]' + +-- go.mod -- +module example.com/pkg + +go 1.17 + +-- main.go -- +package main + +import "runtime" + +func PrintFile() { + _, file, _, _ := runtime.Caller(0) + println(file) +} + +-- main_test.go -- +package main + +import ( + "runtime" + "testing" +) + +func PrintFileForTest() { + _, file, _, _ := runtime.Caller(0) + println(file) +} + +func TestMain(m *testing.M) { + PrintFile() + PrintFileForTest() +} diff --git a/src/cmd/go/testdata/script/test_trimpath_test_suffix.txt b/src/cmd/go/testdata/script/test_trimpath_test_suffix.txt new file mode 100644 index 00000000000..6cbad83bc78 --- /dev/null +++ b/src/cmd/go/testdata/script/test_trimpath_test_suffix.txt @@ -0,0 +1,40 @@ +[short] skip + +go test -trimpath -v . +! stdout '[/\\]pkg_test_test[/\\]' +stdout -count=2 '[/\\]pkg_test[/\\]' + +-- go.mod -- +module example.com/pkg_test + +go 1.17 + +-- pkg.go -- +package pkg_test + +import "runtime" + +func PrintFile() { + _, file, _, _ := runtime.Caller(0) + println(file) +} + +-- pkg_x_test.go -- +package pkg_test_test + +import ( + "runtime" + "testing" + + "example.com/pkg_test" +) + +func PrintFileForTest() { + _, file, _, _ := runtime.Caller(0) + println(file) +} + +func TestMain(m *testing.M) { + pkg_test.PrintFile() + PrintFileForTest() +} diff --git a/src/cmd/go/testdata/script/test_write_profiles_on_timeout.txt b/src/cmd/go/testdata/script/test_write_profiles_on_timeout.txt index 08e67a429e0..0db183f8f04 100644 --- a/src/cmd/go/testdata/script/test_write_profiles_on_timeout.txt +++ b/src/cmd/go/testdata/script/test_write_profiles_on_timeout.txt @@ -3,6 +3,7 @@ [short] skip ! go test -cpuprofile cpu.pprof -memprofile mem.pprof -timeout 1ms +stdout '^panic: test timed out' grep . cpu.pprof grep . mem.pprof @@ -12,6 +13,14 @@ module profiling go 1.16 -- timeout_test.go -- package timeouttest_test -import "testing" -import "time" -func TestSleep(t *testing.T) { time.Sleep(time.Second) } + +import ( + "testing" + "time" +) + +func TestSleep(t *testing.T) { + for { + time.Sleep(1 * time.Second) + } +} diff --git a/src/cmd/go/testdata/script/toolexec.txt b/src/cmd/go/testdata/script/toolexec.txt index 526234196b5..4f26da6d26b 100644 --- a/src/cmd/go/testdata/script/toolexec.txt +++ b/src/cmd/go/testdata/script/toolexec.txt @@ -11,12 +11,37 @@ go build ./cmd/mytool # Finally, note that asm and cgo are run twice. go build -toolexec=$PWD/mytool -[amd64] stderr -count=2 '^asm'${GOEXE}' TOOLEXEC_IMPORTPATH=test/main/withasm$' -stderr -count=1 '^compile'${GOEXE}' TOOLEXEC_IMPORTPATH=test/main/withasm$' -[cgo] stderr -count=2 '^cgo'${GOEXE}' TOOLEXEC_IMPORTPATH=test/main/withcgo$' -[cgo] stderr -count=1 '^compile'${GOEXE}' TOOLEXEC_IMPORTPATH=test/main/withcgo$' -stderr -count=1 '^compile'${GOEXE}' TOOLEXEC_IMPORTPATH=test/main$' -stderr -count=1 '^link'${GOEXE}' TOOLEXEC_IMPORTPATH=test/main$' +[amd64] stderr -count=2 '^asm'${GOEXE}' TOOLEXEC_IMPORTPATH="test/main/withasm"$' +stderr -count=1 '^compile'${GOEXE}' TOOLEXEC_IMPORTPATH="test/main/withasm"$' +[cgo] stderr -count=2 '^cgo'${GOEXE}' TOOLEXEC_IMPORTPATH="test/main/withcgo"$' +[cgo] stderr -count=1 '^compile'${GOEXE}' TOOLEXEC_IMPORTPATH="test/main/withcgo"$' +stderr -count=1 '^compile'${GOEXE}' TOOLEXEC_IMPORTPATH="test/main"$' +stderr -count=1 '^link'${GOEXE}' TOOLEXEC_IMPORTPATH="test/main"$' + +# Test packages are a little bit trickier. +# We have four variants of test/main, as reported by 'go list -test': +# +# test/main - the regular non-test package +# test/main.test - the generated test program +# test/main [test/main.test] - the test package for foo_test.go +# test/main_test [test/main.test] - the test package for foo_separate_test.go +# +# As such, TOOLEXEC_IMPORTPATH must see the same strings, to be able to uniquely +# identify each package being built as reported by 'go list -f {{.ImportPath}}'. +# Note that these are not really "import paths" anymore, but that naming is +# consistent with 'go list -json' at least. + +go test -toolexec=$PWD/mytool + +stderr -count=2 '^# test/main\.test$' +stderr -count=1 '^compile'${GOEXE}' TOOLEXEC_IMPORTPATH="test/main\.test"$' +stderr -count=1 '^link'${GOEXE}' TOOLEXEC_IMPORTPATH="test/main\.test"$' + +stderr -count=1 '^# test/main \[test/main\.test\]$' +stderr -count=1 '^compile'${GOEXE}' TOOLEXEC_IMPORTPATH="test/main \[test/main\.test\]"$' + +stderr -count=1 '^# test/main_test \[test/main\.test\]$' +stderr -count=1 '^compile'${GOEXE}' TOOLEXEC_IMPORTPATH="test/main_test \[test/main\.test\]"$' -- go.mod -- module test/main @@ -32,6 +57,18 @@ import ( ) func main() {} +-- foo_test.go -- +package main + +import "testing" + +func TestFoo(t *testing.T) {} +-- foo_separate_test.go -- +package main_test + +import "testing" + +func TestSeparateFoo(t *testing.T) {} -- withcgo/withcgo.go -- package withcgo @@ -71,7 +108,7 @@ func main() { // We can't alter the version output. } else { // Print which tool we're running, and on what package. - fmt.Fprintf(os.Stdout, "%s TOOLEXEC_IMPORTPATH=%s\n", toolName, os.Getenv("TOOLEXEC_IMPORTPATH")) + fmt.Fprintf(os.Stdout, "%s TOOLEXEC_IMPORTPATH=%q\n", toolName, os.Getenv("TOOLEXEC_IMPORTPATH")) } // Simply run the tool. diff --git a/src/cmd/go/testdata/script/version_goexperiment.txt b/src/cmd/go/testdata/script/version_goexperiment.txt new file mode 100644 index 00000000000..4b165eb6055 --- /dev/null +++ b/src/cmd/go/testdata/script/version_goexperiment.txt @@ -0,0 +1,16 @@ +# Test that experiments appear in "go version " + +# This test requires rebuilding the runtime, which takes a while. +[short] skip + +env GOEXPERIMENT=fieldtrack +go build -o main$GOEXE version.go +go version main$GOEXE +stdout 'X:fieldtrack$' +exec ./main$GOEXE +stderr 'X:fieldtrack$' + +-- version.go -- +package main +import "runtime" +func main() { println(runtime.Version()) } diff --git a/src/cmd/gofmt/gofmt.go b/src/cmd/gofmt/gofmt.go index b82aa7e7a93..b3c120daab4 100644 --- a/src/cmd/gofmt/gofmt.go +++ b/src/cmd/gofmt/gofmt.go @@ -26,13 +26,12 @@ import ( var ( // main operation modes - list = flag.Bool("l", false, "list files whose formatting differs from gofmt's") - write = flag.Bool("w", false, "write result to (source) file instead of stdout") - rewriteRule = flag.String("r", "", "rewrite rule (e.g., 'a[b:len(a)] -> a[b:]')") - simplifyAST = flag.Bool("s", false, "simplify code") - doDiff = flag.Bool("d", false, "display diffs instead of rewriting files") - allErrors = flag.Bool("e", false, "report all errors (not just the first 10 on different lines)") - allowTypeParams = flag.Bool("G", false, "allow generic code") + list = flag.Bool("l", false, "list files whose formatting differs from gofmt's") + write = flag.Bool("w", false, "write result to (source) file instead of stdout") + rewriteRule = flag.String("r", "", "rewrite rule (e.g., 'a[b:len(a)] -> a[b:]')") + simplifyAST = flag.Bool("s", false, "simplify code") + doDiff = flag.Bool("d", false, "display diffs instead of rewriting files") + allErrors = flag.Bool("e", false, "report all errors (not just the first 10 on different lines)") // debugging cpuprofile = flag.String("cpuprofile", "", "write cpu profile to this file") @@ -72,9 +71,6 @@ func initParserMode() { if *allErrors { parserMode |= parser.AllErrors } - if *allowTypeParams { - parserMode |= parser.ParseTypeParams - } } func isGoFile(f fs.DirEntry) bool { @@ -155,7 +151,7 @@ func processFile(filename string, in io.Reader, out io.Writer, stdin bool) error if err != nil { return fmt.Errorf("computing diff: %s", err) } - fmt.Printf("diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename)) + fmt.Fprintf(out, "diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename)) out.Write(data) } } @@ -168,21 +164,15 @@ func processFile(filename string, in io.Reader, out io.Writer, stdin bool) error } func visitFile(path string, f fs.DirEntry, err error) error { - if err == nil && isGoFile(f) { - err = processFile(path, nil, os.Stdout, false) + if err != nil || !isGoFile(f) { + return err } - // Don't complain if a file was deleted in the meantime (i.e. - // the directory changed concurrently while running gofmt). - if err != nil && !os.IsNotExist(err) { + if err := processFile(path, nil, os.Stdout, false); err != nil { report(err) } return nil } -func walkDir(path string) { - filepath.WalkDir(path, visitFile) -} - func main() { // call gofmtMain in a separate function // so that it can use defer and have them @@ -210,7 +200,8 @@ func gofmtMain() { initParserMode() initRewrite() - if flag.NArg() == 0 { + args := flag.Args() + if len(args) == 0 { if *write { fmt.Fprintln(os.Stderr, "error: cannot use -w with standard input") exitCode = 2 @@ -222,15 +213,18 @@ func gofmtMain() { return } - for i := 0; i < flag.NArg(); i++ { - path := flag.Arg(i) - switch dir, err := os.Stat(path); { + for _, arg := range args { + switch info, err := os.Stat(arg); { case err != nil: report(err) - case dir.IsDir(): - walkDir(path) + case !info.IsDir(): + // Non-directory arguments are always formatted. + if err := processFile(arg, nil, os.Stdout, false); err != nil { + report(err) + } default: - if err := processFile(path, nil, os.Stdout, false); err != nil { + // Directories are walked, ignoring non-Go files. + if err := filepath.WalkDir(arg, visitFile); err != nil { report(err) } } diff --git a/src/cmd/gofmt/gofmt_test.go b/src/cmd/gofmt/gofmt_test.go index 60e4f2e03d2..f0d3f8780f4 100644 --- a/src/cmd/gofmt/gofmt_test.go +++ b/src/cmd/gofmt/gofmt_test.go @@ -49,12 +49,13 @@ func gofmtFlags(filename string, maxLines int) string { case scanner.EOF: return "" } - } return "" } +var typeParamsEnabled = false + func runTest(t *testing.T, in, out string) { // process flags *simplifyAST = false @@ -78,7 +79,10 @@ func runTest(t *testing.T, in, out string) { // fake flag - pretend input is from stdin stdin = true case "-G": - *allowTypeParams = true + // fake flag - test is for generic code + if !typeParamsEnabled { + return + } default: t.Errorf("unrecognized flag name: %s", name) } diff --git a/src/cmd/gofmt/gofmt_typeparams_test.go b/src/cmd/gofmt/gofmt_typeparams_test.go new file mode 100644 index 00000000000..10641a77cb2 --- /dev/null +++ b/src/cmd/gofmt/gofmt_typeparams_test.go @@ -0,0 +1,12 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build typeparams +// +build typeparams + +package main + +func init() { + typeParamsEnabled = true +} diff --git a/src/cmd/internal/archive/archive.go b/src/cmd/internal/archive/archive.go index e9b25fe240a..da1f2932435 100644 --- a/src/cmd/internal/archive/archive.go +++ b/src/cmd/internal/archive/archive.go @@ -106,6 +106,12 @@ var ( errNotObject = errors.New("unrecognized object file format") ) +type ErrGoObjOtherVersion struct{ magic []byte } + +func (e ErrGoObjOtherVersion) Error() string { + return fmt.Sprintf("go object of a different version: %q", e.magic) +} + // An objReader is an object file reader. type objReader struct { a *Archive @@ -389,7 +395,7 @@ func (r *objReader) parseArchive(verbose bool) error { // The object file consists of a textual header ending in "\n!\n" // and then the part we want to parse begins. // The format of that part is defined in a comment at the top -// of src/liblink/objfile.c. +// of cmd/internal/goobj/objfile.go. func (r *objReader) parseObject(o *GoObj, size int64) error { h := make([]byte, 0, 256) var c1, c2, c3 byte @@ -418,6 +424,9 @@ func (r *objReader) parseObject(o *GoObj, size int64) error { return err } if !bytes.Equal(p, []byte(goobj.Magic)) { + if bytes.HasPrefix(p, []byte("\x00go1")) && bytes.HasSuffix(p, []byte("ld")) { + return r.error(ErrGoObjOtherVersion{p[1:]}) // strip the \x00 byte + } return r.error(errCorruptObject) } r.skip(o.Size) diff --git a/src/cmd/internal/archive/archive_test.go b/src/cmd/internal/archive/archive_test.go index cb4eb842b45..c284a9cf0dc 100644 --- a/src/cmd/internal/archive/archive_test.go +++ b/src/cmd/internal/archive/archive_test.go @@ -173,7 +173,7 @@ func TestParseGoobj(t *testing.T) { continue } if e.Type != EntryGoObj { - t.Errorf("wrong type of object: wnat EntryGoObj, got %v", e.Type) + t.Errorf("wrong type of object: want EntryGoObj, got %v", e.Type) } if !bytes.Contains(e.Obj.TextHeader, []byte(runtime.GOARCH)) { t.Errorf("text header does not contain GOARCH %s: %q", runtime.GOARCH, e.Obj.TextHeader) @@ -204,7 +204,7 @@ func TestParseArchive(t *testing.T) { continue } if e.Type != EntryGoObj { - t.Errorf("wrong type of object: wnat EntryGoObj, got %v", e.Type) + t.Errorf("wrong type of object: want EntryGoObj, got %v", e.Type) } if !bytes.Contains(e.Obj.TextHeader, []byte(runtime.GOARCH)) { t.Errorf("text header does not contain GOARCH %s: %q", runtime.GOARCH, e.Obj.TextHeader) diff --git a/src/cmd/internal/bio/buf_mmap.go b/src/cmd/internal/bio/buf_mmap.go index 4b43d74f269..b9755c7e50e 100644 --- a/src/cmd/internal/bio/buf_mmap.go +++ b/src/cmd/internal/bio/buf_mmap.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd // +build darwin dragonfly freebsd linux netbsd openbsd package bio diff --git a/src/cmd/internal/bio/buf_nommap.go b/src/cmd/internal/bio/buf_nommap.go index f43c67ac2d8..533a93180cd 100644 --- a/src/cmd/internal/bio/buf_nommap.go +++ b/src/cmd/internal/bio/buf_nommap.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd // +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd package bio diff --git a/src/cmd/internal/dwarf/dwarf.go b/src/cmd/internal/dwarf/dwarf.go index 8de4096f068..ec441c2bcb6 100644 --- a/src/cmd/internal/dwarf/dwarf.go +++ b/src/cmd/internal/dwarf/dwarf.go @@ -9,13 +9,15 @@ package dwarf import ( "bytes" - "cmd/internal/objabi" "errors" "fmt" + "internal/buildcfg" exec "internal/execabs" "sort" "strconv" "strings" + + "cmd/internal/objabi" ) // InfoPrefix is the prefix for all the symbols containing DWARF info entries. @@ -318,8 +320,6 @@ const ( ) // Index into the abbrevs table below. -// Keep in sync with ispubname() and ispubtype() in ld/dwarf.go. -// ispubtype considers >= NULLTYPE public const ( DW_ABRV_NULL = iota DW_ABRV_COMPUNIT @@ -383,7 +383,7 @@ func expandPseudoForm(form uint8) uint8 { return form } expandedForm := DW_FORM_udata - if objabi.GOOS == "darwin" || objabi.GOOS == "ios" { + if buildcfg.GOOS == "darwin" || buildcfg.GOOS == "ios" { expandedForm = DW_FORM_data4 } return uint8(expandedForm) @@ -1043,6 +1043,15 @@ func PutIntConst(ctxt Context, info, typ Sym, name string, val int64) { putattr(ctxt, info, DW_ABRV_INT_CONSTANT, DW_FORM_sdata, DW_CLS_CONSTANT, val, nil) } +// PutGlobal writes a DIE for a global variable. +func PutGlobal(ctxt Context, info, typ, gvar Sym, name string) { + Uleb128put(ctxt, info, DW_ABRV_VARIABLE) + putattr(ctxt, info, DW_ABRV_VARIABLE, DW_FORM_string, DW_CLS_STRING, int64(len(name)), name) + putattr(ctxt, info, DW_ABRV_VARIABLE, DW_FORM_block1, DW_CLS_ADDRESS, 0, gvar) + putattr(ctxt, info, DW_ABRV_VARIABLE, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, typ) + putattr(ctxt, info, DW_ABRV_VARIABLE, DW_FORM_flag, DW_CLS_FLAG, 1, nil) +} + // PutBasedRanges writes a range table to sym. All addresses in ranges are // relative to some base address, which must be arranged by the caller // (e.g., with a DW_AT_low_pc attribute, or in a BASE-prefixed range). @@ -1257,7 +1266,7 @@ func PutAbstractFunc(ctxt Context, s *FnState) error { // its corresponding 'abstract' DIE (containing location-independent // attributes such as name, type, etc). Inlined subroutine DIEs can // have other inlined subroutine DIEs as children. -func PutInlinedFunc(ctxt Context, s *FnState, callersym Sym, callIdx int) error { +func putInlinedFunc(ctxt Context, s *FnState, callersym Sym, callIdx int) error { ic := s.InlCalls.Calls[callIdx] callee := ic.AbsFunSym @@ -1268,7 +1277,7 @@ func PutInlinedFunc(ctxt Context, s *FnState, callersym Sym, callIdx int) error Uleb128put(ctxt, s.Info, int64(abbrev)) if logDwarf { - ctxt.Logf("PutInlinedFunc(caller=%v,callee=%v,abbrev=%d)\n", callersym, callee, abbrev) + ctxt.Logf("putInlinedFunc(caller=%v,callee=%v,abbrev=%d)\n", callersym, callee, abbrev) } // Abstract origin. @@ -1304,7 +1313,7 @@ func PutInlinedFunc(ctxt Context, s *FnState, callersym Sym, callIdx int) error // Children of this inline. for _, sib := range inlChildren(callIdx, &s.InlCalls) { absfn := s.InlCalls.Calls[sib].AbsFunSym - err := PutInlinedFunc(ctxt, s, absfn, sib) + err := putInlinedFunc(ctxt, s, absfn, sib) if err != nil { return err } @@ -1346,7 +1355,7 @@ func PutConcreteFunc(ctxt Context, s *FnState) error { // Inlined subroutines. for _, sib := range inlChildren(-1, &s.InlCalls) { absfn := s.InlCalls.Calls[sib].AbsFunSym - err := PutInlinedFunc(ctxt, s, absfn, sib) + err := putInlinedFunc(ctxt, s, absfn, sib) if err != nil { return err } @@ -1394,7 +1403,7 @@ func PutDefaultFunc(ctxt Context, s *FnState) error { // Inlined subroutines. for _, sib := range inlChildren(-1, &s.InlCalls) { absfn := s.InlCalls.Calls[sib].AbsFunSym - err := PutInlinedFunc(ctxt, s, absfn, sib) + err := putInlinedFunc(ctxt, s, absfn, sib) if err != nil { return err } @@ -1600,14 +1609,6 @@ func putvar(ctxt Context, s *FnState, v *Var, absfn Sym, fnabbrev, inlIndex int, // Var has no children => no terminator } -// VarsByOffset attaches the methods of sort.Interface to []*Var, -// sorting in increasing StackOffset. -type VarsByOffset []*Var - -func (s VarsByOffset) Len() int { return len(s) } -func (s VarsByOffset) Less(i, j int) bool { return s[i].StackOffset < s[j].StackOffset } -func (s VarsByOffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - // byChildIndex implements sort.Interface for []*dwarf.Var by child index. type byChildIndex []*Var diff --git a/src/cmd/internal/goobj/builtinlist.go b/src/cmd/internal/goobj/builtinlist.go index 0cca7523329..9f248137daa 100644 --- a/src/cmd/internal/goobj/builtinlist.go +++ b/src/cmd/internal/goobj/builtinlist.go @@ -41,6 +41,7 @@ var builtins = [...]struct { {"runtime.printcomplex", 1}, {"runtime.printstring", 1}, {"runtime.printpointer", 1}, + {"runtime.printuintptr", 1}, {"runtime.printiface", 1}, {"runtime.printeface", 1}, {"runtime.printslice", 1}, @@ -61,7 +62,6 @@ var builtins = [...]struct { {"runtime.stringtoslicebyte", 1}, {"runtime.stringtoslicerune", 1}, {"runtime.slicecopy", 1}, - {"runtime.slicestringcopy", 1}, {"runtime.decoderune", 1}, {"runtime.countrunes", 1}, {"runtime.convI2I", 1}, @@ -122,7 +122,6 @@ var builtins = [...]struct { {"runtime.typedslicecopy", 1}, {"runtime.selectnbsend", 1}, {"runtime.selectnbrecv", 1}, - {"runtime.selectnbrecv2", 1}, {"runtime.selectsetpc", 1}, {"runtime.selectgo", 1}, {"runtime.block", 1}, @@ -172,8 +171,9 @@ var builtins = [...]struct { {"runtime.uint64tofloat64", 1}, {"runtime.uint32tofloat64", 1}, {"runtime.complex128div", 1}, + {"runtime.getcallerpc", 1}, + {"runtime.getcallersp", 1}, {"runtime.racefuncenter", 1}, - {"runtime.racefuncenterfp", 1}, {"runtime.racefuncexit", 1}, {"runtime.raceread", 1}, {"runtime.racewrite", 1}, @@ -181,6 +181,7 @@ var builtins = [...]struct { {"runtime.racewriterange", 1}, {"runtime.msanread", 1}, {"runtime.msanwrite", 1}, + {"runtime.msanmove", 1}, {"runtime.checkptrAlignment", 1}, {"runtime.checkptrArithmetic", 1}, {"runtime.libfuzzerTraceCmp1", 1}, diff --git a/src/cmd/internal/goobj/mkbuiltin.go b/src/cmd/internal/goobj/mkbuiltin.go index 22608e7e699..18b969586cc 100644 --- a/src/cmd/internal/goobj/mkbuiltin.go +++ b/src/cmd/internal/goobj/mkbuiltin.go @@ -2,9 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build ignore // +build ignore -// Generate builtinlist.go from cmd/compile/internal/gc/builtin/runtime.go. +// Generate builtinlist.go from cmd/compile/internal/typecheck/builtin/runtime.go. package main @@ -53,7 +54,7 @@ func main() { func mkbuiltin(w io.Writer) { pkg := "runtime" fset := token.NewFileSet() - path := filepath.Join("..", "..", "compile", "internal", "gc", "builtin", "runtime.go") + path := filepath.Join("..", "..", "compile", "internal", "typecheck", "builtin", "runtime.go") f, err := parser.ParseFile(fset, path, nil, 0) if err != nil { log.Fatal(err) diff --git a/src/cmd/internal/goobj/objfile.go b/src/cmd/internal/goobj/objfile.go index d1b838f676c..e2858bd57da 100644 --- a/src/cmd/internal/goobj/objfile.go +++ b/src/cmd/internal/goobj/objfile.go @@ -33,7 +33,7 @@ import ( // New object file format. // // Header struct { -// Magic [...]byte // "\x00go116ld" +// Magic [...]byte // "\x00go117ld" // Fingerprint [8]byte // Flags uint32 // Offsets [...]uint32 // byte offset of each block below @@ -89,7 +89,7 @@ import ( // Relocs [...]struct { // Off int32 // Size uint8 -// Type uint8 +// Type uint16 // Add int64 // Sym symRef // } @@ -219,7 +219,7 @@ type Header struct { Offsets [NBlk]uint32 } -const Magic = "\x00go116ld" +const Magic = "\x00go117ld" func (h *Header) Write(w *Writer) { w.RawString(h.Magic) @@ -373,32 +373,32 @@ const HashSize = sha1.Size // Reloc struct { // Off int32 // Siz uint8 -// Type uint8 +// Type uint16 // Add int64 // Sym SymRef // } type Reloc [RelocSize]byte -const RelocSize = 4 + 1 + 1 + 8 + 8 +const RelocSize = 4 + 1 + 2 + 8 + 8 -func (r *Reloc) Off() int32 { return int32(binary.LittleEndian.Uint32(r[:])) } -func (r *Reloc) Siz() uint8 { return r[4] } -func (r *Reloc) Type() uint8 { return r[5] } -func (r *Reloc) Add() int64 { return int64(binary.LittleEndian.Uint64(r[6:])) } +func (r *Reloc) Off() int32 { return int32(binary.LittleEndian.Uint32(r[:])) } +func (r *Reloc) Siz() uint8 { return r[4] } +func (r *Reloc) Type() uint16 { return binary.LittleEndian.Uint16(r[5:]) } +func (r *Reloc) Add() int64 { return int64(binary.LittleEndian.Uint64(r[7:])) } func (r *Reloc) Sym() SymRef { - return SymRef{binary.LittleEndian.Uint32(r[14:]), binary.LittleEndian.Uint32(r[18:])} + return SymRef{binary.LittleEndian.Uint32(r[15:]), binary.LittleEndian.Uint32(r[19:])} } -func (r *Reloc) SetOff(x int32) { binary.LittleEndian.PutUint32(r[:], uint32(x)) } -func (r *Reloc) SetSiz(x uint8) { r[4] = x } -func (r *Reloc) SetType(x uint8) { r[5] = x } -func (r *Reloc) SetAdd(x int64) { binary.LittleEndian.PutUint64(r[6:], uint64(x)) } +func (r *Reloc) SetOff(x int32) { binary.LittleEndian.PutUint32(r[:], uint32(x)) } +func (r *Reloc) SetSiz(x uint8) { r[4] = x } +func (r *Reloc) SetType(x uint16) { binary.LittleEndian.PutUint16(r[5:], x) } +func (r *Reloc) SetAdd(x int64) { binary.LittleEndian.PutUint64(r[7:], uint64(x)) } func (r *Reloc) SetSym(x SymRef) { - binary.LittleEndian.PutUint32(r[14:], x.PkgIdx) - binary.LittleEndian.PutUint32(r[18:], x.SymIdx) + binary.LittleEndian.PutUint32(r[15:], x.PkgIdx) + binary.LittleEndian.PutUint32(r[19:], x.SymIdx) } -func (r *Reloc) Set(off int32, size uint8, typ uint8, add int64, sym SymRef) { +func (r *Reloc) Set(off int32, size uint8, typ uint16, add int64, sym SymRef) { r.SetOff(off) r.SetSiz(size) r.SetType(typ) @@ -481,7 +481,7 @@ func (r *RefFlags) SetFlag2(x uint8) { r[9] = x } func (r *RefFlags) Write(w *Writer) { w.Bytes(r[:]) } -// Used to construct an artifically large array type when reading an +// Used to construct an artificially large array type when reading an // item from the object file relocs section or aux sym section (needs // to work on 32-bit as well as 64-bit). See issue 41621. const huge = (1<<31 - 1) / RelocSize diff --git a/src/cmd/internal/goobj/objfile_test.go b/src/cmd/internal/goobj/objfile_test.go index c6fd427c150..ed942aa9344 100644 --- a/src/cmd/internal/goobj/objfile_test.go +++ b/src/cmd/internal/goobj/objfile_test.go @@ -9,6 +9,12 @@ import ( "bytes" "cmd/internal/bio" "cmd/internal/objabi" + "fmt" + "internal/buildcfg" + "internal/testenv" + "io/ioutil" + "os" + "os/exec" "testing" ) @@ -35,7 +41,7 @@ func TestReadWrite(t *testing.T) { var r Reloc r.SetOff(12) r.SetSiz(4) - r.SetType(uint8(objabi.R_ADDR)) + r.SetType(uint16(objabi.R_ADDR)) r.SetAdd(54321) r.SetSym(SymRef{11, 22}) r.Write(w) @@ -58,7 +64,7 @@ func TestReadWrite(t *testing.T) { b = b[SymSize:] var r2 Reloc r2.fromBytes(b) - if r2.Off() != 12 || r2.Siz() != 4 || r2.Type() != uint8(objabi.R_ADDR) || r2.Add() != 54321 || r2.Sym() != (SymRef{11, 22}) { + if r2.Off() != 12 || r2.Siz() != 4 || r2.Type() != uint16(objabi.R_ADDR) || r2.Add() != 54321 || r2.Sym() != (SymRef{11, 22}) { t.Errorf("read Reloc2 mismatch: got %v %v %v %v %v", r2.Off(), r2.Siz(), r2.Type(), r2.Add(), r2.Sym()) } @@ -69,3 +75,60 @@ func TestReadWrite(t *testing.T) { t.Errorf("read Aux2 mismatch: got %v %v", a2.Type(), a2.Sym()) } } + +var issue41621prolog = ` +package main +var lines = []string{ +` + +var issue41621epilog = ` +} +func getLines() []string { + return lines +} +func main() { + println(getLines()) +} +` + +func TestIssue41621LargeNumberOfRelocations(t *testing.T) { + if testing.Short() || (buildcfg.GOARCH != "amd64") { + t.Skipf("Skipping large number of relocations test in short mode or on %s", buildcfg.GOARCH) + } + testenv.MustHaveGoBuild(t) + + tmpdir, err := ioutil.TempDir("", "lotsofrelocs") + if err != nil { + t.Fatalf("can't create temp directory: %v\n", err) + } + defer os.RemoveAll(tmpdir) + + // Emit testcase. + var w bytes.Buffer + fmt.Fprintf(&w, issue41621prolog) + for i := 0; i < 1048576+13; i++ { + fmt.Fprintf(&w, "\t\"%d\",\n", i) + } + fmt.Fprintf(&w, issue41621epilog) + err = ioutil.WriteFile(tmpdir+"/large.go", w.Bytes(), 0666) + if err != nil { + t.Fatalf("can't write output: %v\n", err) + } + + // Emit go.mod + w.Reset() + fmt.Fprintf(&w, "module issue41621\n\ngo 1.12\n") + err = ioutil.WriteFile(tmpdir+"/go.mod", w.Bytes(), 0666) + if err != nil { + t.Fatalf("can't write output: %v\n", err) + } + w.Reset() + + // Build. + cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "large") + cmd.Dir = tmpdir + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Build failed: %v, output: %s", err, out) + } +} diff --git a/src/cmd/internal/moddeps/moddeps_test.go b/src/cmd/internal/moddeps/moddeps_test.go index cba401c896a..ba574f40049 100644 --- a/src/cmd/internal/moddeps/moddeps_test.go +++ b/src/cmd/internal/moddeps/moddeps_test.go @@ -61,7 +61,7 @@ func TestAllDependencies(t *testing.T) { _, err := cmd.Output() if err != nil { t.Errorf("%s: %v\n%s", strings.Join(cmd.Args, " "), err, cmd.Stderr) - t.Logf("(Run 'go mod vendor' in %s to ensure that dependecies have been vendored.)", m.Dir) + t.Logf("(Run 'go mod vendor' in %s to ensure that dependencies have been vendored.)", m.Dir) } return } @@ -163,8 +163,8 @@ func TestAllDependencies(t *testing.T) { Env: append(os.Environ(), // Set GOROOT. "GOROOT="+gorootCopyDir, - // Explicitly clear PWD and GOROOT_FINAL so that GOROOT=gorootCopyDir is definitely used. - "PWD=", + // Explicitly override PWD and clear GOROOT_FINAL so that GOROOT=gorootCopyDir is definitely used. + "PWD="+filepath.Join(gorootCopyDir, rel), "GOROOT_FINAL=", // Add GOROOTcopy/bin and bundleDir to front of PATH. "PATH="+filepath.Join(gorootCopyDir, "bin")+string(filepath.ListSeparator)+ @@ -179,7 +179,7 @@ func TestAllDependencies(t *testing.T) { r.run(t, goBinCopy, "generate", `-run=^//go:generate bundle `, pkgs) // See issue 41409. advice := "$ cd " + m.Dir + "\n" + "$ go mod tidy # to remove extraneous dependencies\n" + - "$ go mod vendor # to vendor dependecies\n" + + "$ go mod vendor # to vendor dependencies\n" + "$ go generate -run=bundle " + pkgs + " # to regenerate bundled packages\n" if m.Path == "std" { r.run(t, goBinCopy, "generate", "syscall", "internal/syscall/...") // See issue 43440. @@ -422,6 +422,12 @@ func findGorootModules(t *testing.T) []gorootModule { // running time of this test anyway.) return filepath.SkipDir } + if strings.HasPrefix(info.Name(), "_") || strings.HasPrefix(info.Name(), ".") { + // _ and . prefixed directories can be used for internal modules + // without a vendor directory that don't contribute to the build + // but might be used for example as code generators. + return filepath.SkipDir + } if info.IsDir() || info.Name() != "go.mod" { return nil } diff --git a/src/cmd/internal/obj/arm/a.out.go b/src/cmd/internal/obj/arm/a.out.go index a1d9e28b960..fd695ad0c98 100644 --- a/src/cmd/internal/obj/arm/a.out.go +++ b/src/cmd/internal/obj/arm/a.out.go @@ -163,8 +163,8 @@ const ( C_SFCON C_LFCON - C_RACON - C_LACON + C_RACON /* <=0xff rotated constant offset from auto */ + C_LACON /* Large Auto CONstant, i.e. large offset from SP */ C_SBRA C_LBRA diff --git a/src/cmd/internal/obj/arm/asm5.go b/src/cmd/internal/obj/arm/asm5.go index ebb98b4859d..ccf5f9e7f8d 100644 --- a/src/cmd/internal/obj/arm/asm5.go +++ b/src/cmd/internal/obj/arm/asm5.go @@ -34,6 +34,7 @@ import ( "cmd/internal/obj" "cmd/internal/objabi" "fmt" + "internal/buildcfg" "log" "math" "sort" @@ -976,7 +977,7 @@ func (c *ctxt5) aclass(a *obj.Addr) int { if immrot(^uint32(c.instoffset)) != 0 { return C_NCON } - if uint32(c.instoffset) <= 0xffff && objabi.GOARM == 7 { + if uint32(c.instoffset) <= 0xffff && buildcfg.GOARM == 7 { return C_SCON } if x, y := immrot2a(uint32(c.instoffset)); x != 0 && y != 0 { @@ -3044,7 +3045,7 @@ func (c *ctxt5) omvl(p *obj.Prog, a *obj.Addr, dr int) uint32 { func (c *ctxt5) chipzero5(e float64) int { // We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions. - if objabi.GOARM < 7 || math.Float64bits(e) != 0 { + if buildcfg.GOARM < 7 || math.Float64bits(e) != 0 { return -1 } return 0 @@ -3052,7 +3053,7 @@ func (c *ctxt5) chipzero5(e float64) int { func (c *ctxt5) chipfloat5(e float64) int { // We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions. - if objabi.GOARM < 7 { + if buildcfg.GOARM < 7 { return -1 } diff --git a/src/cmd/internal/obj/arm/obj5.go b/src/cmd/internal/obj/arm/obj5.go index 7de04302d91..1454d8a7c92 100644 --- a/src/cmd/internal/obj/arm/obj5.go +++ b/src/cmd/internal/obj/arm/obj5.go @@ -34,6 +34,7 @@ import ( "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/sys" + "internal/buildcfg" "log" ) @@ -64,7 +65,7 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { ctxt.Diag("%v: TLS MRC instruction must write to R0 as it might get translated into a BL instruction", p.Line()) } - if objabi.GOARM < 7 { + if buildcfg.GOARM < 7 { // Replace it with BL runtime.read_tls_fallback(SB) for ARM CPUs that lack the tls extension. if progedit_tlsfallback == nil { progedit_tlsfallback = ctxt.Lookup("runtime.read_tls_fallback") @@ -680,57 +681,37 @@ func (c *ctxt5) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p.From.Reg = REG_R1 p.Reg = REG_R2 } else { - // Such a large stack we need to protect against wraparound - // if SP is close to zero. - // SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall) - // The +StackGuard on both sides is required to keep the left side positive: - // SP is allowed to be slightly below stackguard. See stack.h. - // CMP $StackPreempt, R1 - // MOVW.NE $StackGuard(SP), R2 - // SUB.NE R1, R2 - // MOVW.NE $(framesize+(StackGuard-StackSmall)), R3 - // CMP.NE R3, R2 - p = obj.Appendp(p, c.newprog) - - p.As = ACMP - p.From.Type = obj.TYPE_CONST - p.From.Offset = int64(uint32(objabi.StackPreempt & (1<<32 - 1))) - p.Reg = REG_R1 - - p = obj.Appendp(p, c.newprog) - p.As = AMOVW - p.From.Type = obj.TYPE_ADDR - p.From.Reg = REGSP - p.From.Offset = int64(objabi.StackGuard) - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R2 - p.Scond = C_SCOND_NE + // Such a large stack we need to protect against underflow. + // The runtime guarantees SP > objabi.StackBig, but + // framesize is large enough that SP-framesize may + // underflow, causing a direct comparison with the + // stack guard to incorrectly succeed. We explicitly + // guard against underflow. + // + // // Try subtracting from SP and check for underflow. + // // If this underflows, it sets C to 0. + // SUB.S $(framesize-StackSmall), SP, R2 + // // If C is 1 (unsigned >=), compare with guard. + // CMP.HS stackguard, R2 p = obj.Appendp(p, c.newprog) p.As = ASUB - p.From.Type = obj.TYPE_REG - p.From.Reg = REG_R1 + p.Scond = C_SBIT + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(framesize) - objabi.StackSmall + p.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 - p.Scond = C_SCOND_NE - - p = obj.Appendp(p, c.newprog) - p.As = AMOVW - p.From.Type = obj.TYPE_ADDR - p.From.Offset = int64(framesize) + (int64(objabi.StackGuard) - objabi.StackSmall) - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R3 - p.Scond = C_SCOND_NE p = obj.Appendp(p, c.newprog) p.As = ACMP + p.Scond = C_SCOND_HS p.From.Type = obj.TYPE_REG - p.From.Reg = REG_R3 + p.From.Reg = REG_R1 p.Reg = REG_R2 - p.Scond = C_SCOND_NE } - // BLS call-to-morestack + // BLS call-to-morestack (C is 0 or Z is 1) bls := obj.Appendp(p, c.newprog) bls.As = ABLS bls.To.Type = obj.TYPE_BRANCH diff --git a/src/cmd/internal/obj/arm64/a.out.go b/src/cmd/internal/obj/arm64/a.out.go index 7ab9c1475f7..bf75bb4a891 100644 --- a/src/cmd/internal/obj/arm64/a.out.go +++ b/src/cmd/internal/obj/arm64/a.out.go @@ -420,16 +420,21 @@ const ( C_LBRA C_ZAUTO // 0(RSP) + C_NSAUTO_16 // -256 <= x < 0, 0 mod 16 C_NSAUTO_8 // -256 <= x < 0, 0 mod 8 C_NSAUTO_4 // -256 <= x < 0, 0 mod 4 C_NSAUTO // -256 <= x < 0 + C_NPAUTO_16 // -512 <= x < 0, 0 mod 16 C_NPAUTO // -512 <= x < 0, 0 mod 8 + C_NQAUTO_16 // -1024 <= x < 0, 0 mod 16 C_NAUTO4K // -4095 <= x < 0 + C_PSAUTO_16 // 0 to 255, 0 mod 16 C_PSAUTO_8 // 0 to 255, 0 mod 8 C_PSAUTO_4 // 0 to 255, 0 mod 4 C_PSAUTO // 0 to 255 C_PPAUTO_16 // 0 to 504, 0 mod 16 C_PPAUTO // 0 to 504, 0 mod 8 + C_PQAUTO_16 // 0 to 1008, 0 mod 16 C_UAUTO4K_16 // 0 to 4095, 0 mod 16 C_UAUTO4K_8 // 0 to 4095, 0 mod 8 C_UAUTO4K_4 // 0 to 4095, 0 mod 4 @@ -454,17 +459,22 @@ const ( C_SEXT16 // 0 to 65520 C_LEXT - C_ZOREG // 0(R) - C_NSOREG_8 // must mirror C_NSAUTO_8, etc + C_ZOREG // 0(R) + C_NSOREG_16 // must mirror C_NSAUTO_16, etc + C_NSOREG_8 C_NSOREG_4 C_NSOREG + C_NPOREG_16 C_NPOREG + C_NQOREG_16 C_NOREG4K + C_PSOREG_16 C_PSOREG_8 C_PSOREG_4 C_PSOREG C_PPOREG_16 C_PPOREG + C_PQOREG_16 C_UOREG4K_16 C_UOREG4K_8 C_UOREG4K_4 @@ -898,6 +908,7 @@ const ( AFDIVD AFDIVS AFLDPD + AFLDPQ AFLDPS AFMOVQ AFMOVD @@ -912,6 +923,7 @@ const ( AFSQRTD AFSQRTS AFSTPD + AFSTPQ AFSTPS AFSUBD AFSUBS @@ -1019,6 +1031,8 @@ const ( AVEXT AVRBIT AVRAX1 + AVUMAX + AVUMIN AVUSHR AVUSHLL AVUSHLL2 diff --git a/src/cmd/internal/obj/arm64/anames.go b/src/cmd/internal/obj/arm64/anames.go index a98f8c7ed54..9cc58716488 100644 --- a/src/cmd/internal/obj/arm64/anames.go +++ b/src/cmd/internal/obj/arm64/anames.go @@ -392,6 +392,7 @@ var Anames = []string{ "FDIVD", "FDIVS", "FLDPD", + "FLDPQ", "FLDPS", "FMOVQ", "FMOVD", @@ -406,6 +407,7 @@ var Anames = []string{ "FSQRTD", "FSQRTS", "FSTPD", + "FSTPQ", "FSTPS", "FSUBD", "FSUBS", @@ -513,6 +515,8 @@ var Anames = []string{ "VEXT", "VRBIT", "VRAX1", + "VUMAX", + "VUMIN", "VUSHR", "VUSHLL", "VUSHLL2", diff --git a/src/cmd/internal/obj/arm64/anames7.go b/src/cmd/internal/obj/arm64/anames7.go index f7e99517cee..2ecd8164b6b 100644 --- a/src/cmd/internal/obj/arm64/anames7.go +++ b/src/cmd/internal/obj/arm64/anames7.go @@ -42,15 +42,21 @@ var cnames7 = []string{ "SBRA", "LBRA", "ZAUTO", + "NSAUTO_16", "NSAUTO_8", "NSAUTO_4", "NSAUTO", + "NPAUTO_16", "NPAUTO", + "NQAUTO_16", "NAUTO4K", + "PSAUTO_16", "PSAUTO_8", "PSAUTO_4", "PSAUTO", + "PPAUTO_16", "PPAUTO", + "PQAUTO_16", "UAUTO4K_16", "UAUTO4K_8", "UAUTO4K_4", @@ -74,15 +80,21 @@ var cnames7 = []string{ "SEXT16", "LEXT", "ZOREG", + "NSOREG_16", "NSOREG_8", "NSOREG_4", "NSOREG", + "NPOREG_16", "NPOREG", + "NQOREG_16", "NOREG4K", + "PSOREG_16", "PSOREG_8", "PSOREG_4", "PSOREG", + "PPOREG_16", "PPOREG", + "PQOREG_16", "UOREG4K_16", "UOREG4K_8", "UOREG4K_4", diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index 70072cfba41..575436d764a 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -321,15 +321,17 @@ var optab = []Optab{ {ACMP, C_VCON, C_REG, C_NONE, C_NONE, 13, 20, 0, 0, 0}, {AADD, C_SHIFT, C_REG, C_NONE, C_REG, 3, 4, 0, 0, 0}, {AADD, C_SHIFT, C_NONE, C_NONE, C_REG, 3, 4, 0, 0, 0}, + {AADD, C_SHIFT, C_RSP, C_NONE, C_RSP, 26, 4, 0, 0, 0}, + {AADD, C_SHIFT, C_NONE, C_NONE, C_RSP, 26, 4, 0, 0, 0}, {AMVN, C_SHIFT, C_NONE, C_NONE, C_REG, 3, 4, 0, 0, 0}, {ACMP, C_SHIFT, C_REG, C_NONE, C_NONE, 3, 4, 0, 0, 0}, - {ANEG, C_SHIFT, C_NONE, C_NONE, C_REG, 26, 4, 0, 0, 0}, + {ACMP, C_SHIFT, C_RSP, C_NONE, C_NONE, 26, 4, 0, 0, 0}, + {ANEG, C_SHIFT, C_NONE, C_NONE, C_REG, 3, 4, 0, 0, 0}, {AADD, C_REG, C_RSP, C_NONE, C_RSP, 27, 4, 0, 0, 0}, {AADD, C_REG, C_NONE, C_NONE, C_RSP, 27, 4, 0, 0, 0}, {ACMP, C_REG, C_RSP, C_NONE, C_NONE, 27, 4, 0, 0, 0}, {AADD, C_EXTREG, C_RSP, C_NONE, C_RSP, 27, 4, 0, 0, 0}, {AADD, C_EXTREG, C_NONE, C_NONE, C_RSP, 27, 4, 0, 0, 0}, - {AMVN, C_EXTREG, C_NONE, C_NONE, C_RSP, 27, 4, 0, 0, 0}, {ACMP, C_EXTREG, C_RSP, C_NONE, C_NONE, 27, 4, 0, 0, 0}, {AADD, C_REG, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0}, {AADD, C_REG, C_NONE, C_NONE, C_REG, 1, 4, 0, 0, 0}, @@ -404,8 +406,8 @@ var optab = []Optab{ /* MOVs that become MOVK/MOVN/MOVZ/ADD/SUB/OR */ {AMOVW, C_MOVCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0}, {AMOVD, C_MOVCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0}, - {AMOVW, C_BITCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0}, - {AMOVD, C_BITCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0}, + {AMOVW, C_BITCON, C_NONE, C_NONE, C_RSP, 32, 4, 0, 0, 0}, + {AMOVD, C_BITCON, C_NONE, C_NONE, C_RSP, 32, 4, 0, 0, 0}, {AMOVW, C_MOVCON2, C_NONE, C_NONE, C_REG, 12, 8, 0, NOTUSETMP, 0}, {AMOVD, C_MOVCON2, C_NONE, C_NONE, C_REG, 12, 8, 0, NOTUSETMP, 0}, {AMOVD, C_MOVCON3, C_NONE, C_NONE, C_REG, 12, 12, 0, NOTUSETMP, 0}, @@ -499,6 +501,8 @@ var optab = []Optab{ {AVMOV, C_REG, C_NONE, C_NONE, C_ELEM, 78, 4, 0, 0, 0}, {AVMOV, C_ARNG, C_NONE, C_NONE, C_ARNG, 83, 4, 0, 0, 0}, {AVDUP, C_ELEM, C_NONE, C_NONE, C_ARNG, 79, 4, 0, 0, 0}, + {AVDUP, C_ELEM, C_NONE, C_NONE, C_VREG, 80, 4, 0, 0, 0}, + {AVDUP, C_REG, C_NONE, C_NONE, C_ARNG, 82, 4, 0, 0, 0}, {AVMOVI, C_ADDCON, C_NONE, C_NONE, C_ARNG, 86, 4, 0, 0, 0}, {AVFMLA, C_ARNG, C_ARNG, C_NONE, C_ARNG, 72, 4, 0, 0, 0}, {AVEXT, C_VCON, C_ARNG, C_ARNG, C_ARNG, 94, 4, 0, 0, 0}, @@ -689,6 +693,46 @@ var optab = []Optab{ /* pre/post-indexed/signed-offset load/store register pair (unscaled, signed 10-bit quad-aligned and long offset) */ + {AFLDPQ, C_NQAUTO_16, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, 0}, + {AFLDPQ, C_NQAUTO_16, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPRE}, + {AFLDPQ, C_NQAUTO_16, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPOST}, + {AFLDPQ, C_PQAUTO_16, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, 0}, + {AFLDPQ, C_PQAUTO_16, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPRE}, + {AFLDPQ, C_PQAUTO_16, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPOST}, + {AFLDPQ, C_UAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, 0}, + {AFLDPQ, C_NAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, 0}, + {AFLDPQ, C_LAUTO, C_NONE, C_NONE, C_PAIR, 75, 12, REGSP, LFROM, 0}, + {AFLDPQ, C_NQOREG_16, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, 0}, + {AFLDPQ, C_NQOREG_16, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE}, + {AFLDPQ, C_NQOREG_16, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST}, + {AFLDPQ, C_PQOREG_16, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, 0}, + {AFLDPQ, C_PQOREG_16, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE}, + {AFLDPQ, C_PQOREG_16, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST}, + {AFLDPQ, C_UOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, 0}, + {AFLDPQ, C_NOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, 0}, + {AFLDPQ, C_LOREG, C_NONE, C_NONE, C_PAIR, 75, 12, 0, LFROM, 0}, + {AFLDPQ, C_ADDR, C_NONE, C_NONE, C_PAIR, 88, 12, 0, 0, 0}, + + {AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NQAUTO_16, 67, 4, REGSP, 0, 0}, + {AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NQAUTO_16, 67, 4, REGSP, 0, C_XPRE}, + {AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NQAUTO_16, 67, 4, REGSP, 0, C_XPOST}, + {AFSTPQ, C_PAIR, C_NONE, C_NONE, C_PQAUTO_16, 67, 4, REGSP, 0, 0}, + {AFSTPQ, C_PAIR, C_NONE, C_NONE, C_PQAUTO_16, 67, 4, REGSP, 0, C_XPRE}, + {AFSTPQ, C_PAIR, C_NONE, C_NONE, C_PQAUTO_16, 67, 4, REGSP, 0, C_XPOST}, + {AFSTPQ, C_PAIR, C_NONE, C_NONE, C_UAUTO4K, 76, 8, REGSP, 0, 0}, + {AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, 76, 8, REGSP, 0, 0}, + {AFSTPQ, C_PAIR, C_NONE, C_NONE, C_LAUTO, 77, 12, REGSP, LTO, 0}, + {AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NQOREG_16, 67, 4, 0, 0, 0}, + {AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NQOREG_16, 67, 4, 0, 0, C_XPRE}, + {AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NQOREG_16, 67, 4, 0, 0, C_XPOST}, + {AFSTPQ, C_PAIR, C_NONE, C_NONE, C_PQOREG_16, 67, 4, 0, 0, 0}, + {AFSTPQ, C_PAIR, C_NONE, C_NONE, C_PQOREG_16, 67, 4, 0, 0, C_XPRE}, + {AFSTPQ, C_PAIR, C_NONE, C_NONE, C_PQOREG_16, 67, 4, 0, 0, C_XPOST}, + {AFSTPQ, C_PAIR, C_NONE, C_NONE, C_UOREG4K, 76, 8, 0, 0, 0}, + {AFSTPQ, C_PAIR, C_NONE, C_NONE, C_NOREG4K, 76, 8, 0, 0, 0}, + {AFSTPQ, C_PAIR, C_NONE, C_NONE, C_LOREG, 77, 12, 0, LTO, 0}, + {AFSTPQ, C_PAIR, C_NONE, C_NONE, C_ADDR, 87, 12, 0, 0, 0}, + {ALDP, C_NPAUTO, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, 0}, {ALDP, C_NPAUTO, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPRE}, {ALDP, C_NPAUTO, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPOST}, @@ -696,14 +740,8 @@ var optab = []Optab{ {ALDP, C_PPAUTO, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPRE}, {ALDP, C_PPAUTO, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPOST}, {ALDP, C_UAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, 0}, - {ALDP, C_UAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, C_XPRE}, - {ALDP, C_UAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, C_XPOST}, {ALDP, C_NAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, 0}, - {ALDP, C_NAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, C_XPRE}, - {ALDP, C_NAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, C_XPOST}, {ALDP, C_LAUTO, C_NONE, C_NONE, C_PAIR, 75, 12, REGSP, LFROM, 0}, - {ALDP, C_LAUTO, C_NONE, C_NONE, C_PAIR, 75, 12, REGSP, LFROM, C_XPRE}, - {ALDP, C_LAUTO, C_NONE, C_NONE, C_PAIR, 75, 12, REGSP, LFROM, C_XPOST}, {ALDP, C_NPOREG, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, 0}, {ALDP, C_NPOREG, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE}, {ALDP, C_NPOREG, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST}, @@ -711,14 +749,8 @@ var optab = []Optab{ {ALDP, C_PPOREG, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE}, {ALDP, C_PPOREG, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST}, {ALDP, C_UOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, 0}, - {ALDP, C_UOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, C_XPRE}, - {ALDP, C_UOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, C_XPOST}, {ALDP, C_NOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, 0}, - {ALDP, C_NOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, C_XPRE}, - {ALDP, C_NOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, C_XPOST}, {ALDP, C_LOREG, C_NONE, C_NONE, C_PAIR, 75, 12, 0, LFROM, 0}, - {ALDP, C_LOREG, C_NONE, C_NONE, C_PAIR, 75, 12, 0, LFROM, C_XPRE}, - {ALDP, C_LOREG, C_NONE, C_NONE, C_PAIR, 75, 12, 0, LFROM, C_XPOST}, {ALDP, C_ADDR, C_NONE, C_NONE, C_PAIR, 88, 12, 0, 0, 0}, {ASTP, C_PAIR, C_NONE, C_NONE, C_NPAUTO, 67, 4, REGSP, 0, 0}, @@ -728,14 +760,8 @@ var optab = []Optab{ {ASTP, C_PAIR, C_NONE, C_NONE, C_PPAUTO, 67, 4, REGSP, 0, C_XPRE}, {ASTP, C_PAIR, C_NONE, C_NONE, C_PPAUTO, 67, 4, REGSP, 0, C_XPOST}, {ASTP, C_PAIR, C_NONE, C_NONE, C_UAUTO4K, 76, 8, REGSP, 0, 0}, - {ASTP, C_PAIR, C_NONE, C_NONE, C_UAUTO4K, 76, 8, REGSP, 0, C_XPRE}, - {ASTP, C_PAIR, C_NONE, C_NONE, C_UAUTO4K, 76, 8, REGSP, 0, C_XPOST}, - {ASTP, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, 76, 12, REGSP, 0, 0}, - {ASTP, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, 76, 12, REGSP, 0, C_XPRE}, - {ASTP, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, 76, 12, REGSP, 0, C_XPOST}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, 76, 8, REGSP, 0, 0}, {ASTP, C_PAIR, C_NONE, C_NONE, C_LAUTO, 77, 12, REGSP, LTO, 0}, - {ASTP, C_PAIR, C_NONE, C_NONE, C_LAUTO, 77, 12, REGSP, LTO, C_XPRE}, - {ASTP, C_PAIR, C_NONE, C_NONE, C_LAUTO, 77, 12, REGSP, LTO, C_XPOST}, {ASTP, C_PAIR, C_NONE, C_NONE, C_NPOREG, 67, 4, 0, 0, 0}, {ASTP, C_PAIR, C_NONE, C_NONE, C_NPOREG, 67, 4, 0, 0, C_XPRE}, {ASTP, C_PAIR, C_NONE, C_NONE, C_NPOREG, 67, 4, 0, 0, C_XPOST}, @@ -743,14 +769,8 @@ var optab = []Optab{ {ASTP, C_PAIR, C_NONE, C_NONE, C_PPOREG, 67, 4, 0, 0, C_XPRE}, {ASTP, C_PAIR, C_NONE, C_NONE, C_PPOREG, 67, 4, 0, 0, C_XPOST}, {ASTP, C_PAIR, C_NONE, C_NONE, C_UOREG4K, 76, 8, 0, 0, 0}, - {ASTP, C_PAIR, C_NONE, C_NONE, C_UOREG4K, 76, 8, 0, 0, C_XPRE}, - {ASTP, C_PAIR, C_NONE, C_NONE, C_UOREG4K, 76, 8, 0, 0, C_XPOST}, {ASTP, C_PAIR, C_NONE, C_NONE, C_NOREG4K, 76, 8, 0, 0, 0}, - {ASTP, C_PAIR, C_NONE, C_NONE, C_NOREG4K, 76, 8, 0, 0, C_XPRE}, - {ASTP, C_PAIR, C_NONE, C_NONE, C_NOREG4K, 76, 8, 0, 0, C_XPOST}, {ASTP, C_PAIR, C_NONE, C_NONE, C_LOREG, 77, 12, 0, LTO, 0}, - {ASTP, C_PAIR, C_NONE, C_NONE, C_LOREG, 77, 12, 0, LTO, C_XPRE}, - {ASTP, C_PAIR, C_NONE, C_NONE, C_LOREG, 77, 12, 0, LTO, C_XPOST}, {ASTP, C_PAIR, C_NONE, C_NONE, C_ADDR, 87, 12, 0, 0, 0}, // differ from LDP/STP for C_NSAUTO_4/C_PSAUTO_4/C_NSOREG_4/C_PSOREG_4 @@ -761,14 +781,8 @@ var optab = []Optab{ {ALDPW, C_PSAUTO_4, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPRE}, {ALDPW, C_PSAUTO_4, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPOST}, {ALDPW, C_UAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, 0}, - {ALDPW, C_UAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, C_XPRE}, - {ALDPW, C_UAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, C_XPOST}, {ALDPW, C_NAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, 0}, - {ALDPW, C_NAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, C_XPRE}, - {ALDPW, C_NAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, C_XPOST}, {ALDPW, C_LAUTO, C_NONE, C_NONE, C_PAIR, 75, 12, REGSP, LFROM, 0}, - {ALDPW, C_LAUTO, C_NONE, C_NONE, C_PAIR, 75, 12, REGSP, LFROM, C_XPRE}, - {ALDPW, C_LAUTO, C_NONE, C_NONE, C_PAIR, 75, 12, REGSP, LFROM, C_XPOST}, {ALDPW, C_NSOREG_4, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, 0}, {ALDPW, C_NSOREG_4, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE}, {ALDPW, C_NSOREG_4, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST}, @@ -776,14 +790,8 @@ var optab = []Optab{ {ALDPW, C_PSOREG_4, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE}, {ALDPW, C_PSOREG_4, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST}, {ALDPW, C_UOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, 0}, - {ALDPW, C_UOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, C_XPRE}, - {ALDPW, C_UOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, C_XPOST}, {ALDPW, C_NOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, 0}, - {ALDPW, C_NOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, C_XPRE}, - {ALDPW, C_NOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, C_XPOST}, {ALDPW, C_LOREG, C_NONE, C_NONE, C_PAIR, 75, 12, 0, LFROM, 0}, - {ALDPW, C_LOREG, C_NONE, C_NONE, C_PAIR, 75, 12, 0, LFROM, C_XPRE}, - {ALDPW, C_LOREG, C_NONE, C_NONE, C_PAIR, 75, 12, 0, LFROM, C_XPOST}, {ALDPW, C_ADDR, C_NONE, C_NONE, C_PAIR, 88, 12, 0, 0, 0}, {ASTPW, C_PAIR, C_NONE, C_NONE, C_NSAUTO_4, 67, 4, REGSP, 0, 0}, @@ -793,14 +801,8 @@ var optab = []Optab{ {ASTPW, C_PAIR, C_NONE, C_NONE, C_PSAUTO_4, 67, 4, REGSP, 0, C_XPRE}, {ASTPW, C_PAIR, C_NONE, C_NONE, C_PSAUTO_4, 67, 4, REGSP, 0, C_XPOST}, {ASTPW, C_PAIR, C_NONE, C_NONE, C_UAUTO4K, 76, 8, REGSP, 0, 0}, - {ASTPW, C_PAIR, C_NONE, C_NONE, C_UAUTO4K, 76, 8, REGSP, 0, C_XPRE}, - {ASTPW, C_PAIR, C_NONE, C_NONE, C_UAUTO4K, 76, 8, REGSP, 0, C_XPOST}, - {ASTPW, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, 76, 12, REGSP, 0, 0}, - {ASTPW, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, 76, 12, REGSP, 0, C_XPRE}, - {ASTPW, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, 76, 12, REGSP, 0, C_XPOST}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, 76, 8, REGSP, 0, 0}, {ASTPW, C_PAIR, C_NONE, C_NONE, C_LAUTO, 77, 12, REGSP, LTO, 0}, - {ASTPW, C_PAIR, C_NONE, C_NONE, C_LAUTO, 77, 12, REGSP, LTO, C_XPRE}, - {ASTPW, C_PAIR, C_NONE, C_NONE, C_LAUTO, 77, 12, REGSP, LTO, C_XPOST}, {ASTPW, C_PAIR, C_NONE, C_NONE, C_NSOREG_4, 67, 4, 0, 0, 0}, {ASTPW, C_PAIR, C_NONE, C_NONE, C_NSOREG_4, 67, 4, 0, 0, C_XPRE}, {ASTPW, C_PAIR, C_NONE, C_NONE, C_NSOREG_4, 67, 4, 0, 0, C_XPOST}, @@ -808,14 +810,8 @@ var optab = []Optab{ {ASTPW, C_PAIR, C_NONE, C_NONE, C_PSOREG_4, 67, 4, 0, 0, C_XPRE}, {ASTPW, C_PAIR, C_NONE, C_NONE, C_PSOREG_4, 67, 4, 0, 0, C_XPOST}, {ASTPW, C_PAIR, C_NONE, C_NONE, C_UOREG4K, 76, 8, 0, 0, 0}, - {ASTPW, C_PAIR, C_NONE, C_NONE, C_UOREG4K, 76, 8, 0, 0, C_XPRE}, - {ASTPW, C_PAIR, C_NONE, C_NONE, C_UOREG4K, 76, 8, 0, 0, C_XPOST}, {ASTPW, C_PAIR, C_NONE, C_NONE, C_NOREG4K, 76, 8, 0, 0, 0}, - {ASTPW, C_PAIR, C_NONE, C_NONE, C_NOREG4K, 76, 8, 0, 0, C_XPRE}, - {ASTPW, C_PAIR, C_NONE, C_NONE, C_NOREG4K, 76, 8, 0, 0, C_XPOST}, {ASTPW, C_PAIR, C_NONE, C_NONE, C_LOREG, 77, 12, 0, LTO, 0}, - {ASTPW, C_PAIR, C_NONE, C_NONE, C_LOREG, 77, 12, 0, LTO, C_XPRE}, - {ASTPW, C_PAIR, C_NONE, C_NONE, C_LOREG, 77, 12, 0, LTO, C_XPOST}, {ASTPW, C_PAIR, C_NONE, C_NONE, C_ADDR, 87, 12, 0, 0, 0}, {ASWPD, C_REG, C_NONE, C_NONE, C_ZOREG, 47, 4, 0, 0, 0}, // RegTo2=C_REG @@ -1204,19 +1200,16 @@ func (c *ctxt7) flushpool(p *obj.Prog, skip int) { // addpool128 adds a 128-bit constant to literal pool by two consecutive DWORD // instructions, the 128-bit constant is formed by ah.Offset<<64+al.Offset. func (c *ctxt7) addpool128(p *obj.Prog, al, ah *obj.Addr) { - lit := al.Offset q := c.newprog() q.As = ADWORD q.To.Type = obj.TYPE_CONST - q.To.Offset = lit - q.Pc = int64(c.pool.size) + q.To.Offset = al.Offset - lit = ah.Offset t := c.newprog() t.As = ADWORD t.To.Type = obj.TYPE_CONST - t.To.Offset = lit - t.Pc = int64(c.pool.size + 8) + t.To.Offset = ah.Offset + q.Link = t if c.blitrl == nil { @@ -1227,6 +1220,7 @@ func (c *ctxt7) addpool128(p *obj.Prog, al, ah *obj.Addr) { } c.elitrl = t + c.pool.size = roundUp(c.pool.size, 16) c.pool.size += 16 p.Pool = q } @@ -1259,88 +1253,8 @@ func (c *ctxt7) addpool(p *obj.Prog, a *obj.Addr) { sz = 8 } - switch cls { - // TODO(aram): remove. - default: - if a.Name != obj.NAME_EXTERN { - fmt.Printf("addpool: %v in %v shouldn't go to default case\n", DRconv(cls), p) - } - - t.To.Offset = a.Offset - t.To.Sym = a.Sym - t.To.Type = a.Type - t.To.Name = a.Name - - /* This is here because MOV uint12<<12, R is disabled in optab. - Because of this, we need to load the constant from memory. */ - case C_ADDCON: - fallthrough - - case C_ZAUTO, - C_PSAUTO, - C_PSAUTO_8, - C_PSAUTO_4, - C_PPAUTO_16, - C_PPAUTO, - C_UAUTO4K_16, - C_UAUTO4K_8, - C_UAUTO4K_4, - C_UAUTO4K_2, - C_UAUTO4K, - C_UAUTO8K_16, - C_UAUTO8K_8, - C_UAUTO8K_4, - C_UAUTO8K, - C_UAUTO16K_16, - C_UAUTO16K_8, - C_UAUTO16K, - C_UAUTO32K_16, - C_UAUTO32K, - C_UAUTO64K, - C_NSAUTO_8, - C_NSAUTO_4, - C_NSAUTO, - C_NPAUTO, - C_NAUTO4K, - C_LAUTO, - C_PSOREG, - C_PSOREG_8, - C_PSOREG_4, - C_PPOREG_16, - C_PPOREG, - C_UOREG4K_16, - C_UOREG4K_8, - C_UOREG4K_4, - C_UOREG4K_2, - C_UOREG4K, - C_UOREG8K_16, - C_UOREG8K_8, - C_UOREG8K_4, - C_UOREG8K, - C_UOREG16K_16, - C_UOREG16K_8, - C_UOREG16K, - C_UOREG32K_16, - C_UOREG32K, - C_UOREG64K, - C_NSOREG_8, - C_NSOREG_4, - C_NSOREG, - C_NPOREG, - C_NOREG4K, - C_LOREG, - C_LACON, - C_ADDCON2, - C_LCON, - C_VCON: - if a.Name == obj.NAME_EXTERN { - fmt.Printf("addpool: %v in %v needs reloc\n", DRconv(cls), p) - } - - t.To.Type = obj.TYPE_CONST - t.To.Offset = lit - break - } + t.To.Type = obj.TYPE_CONST + t.To.Offset = lit for q := c.blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */ if q.To == t.To { @@ -1351,7 +1265,6 @@ func (c *ctxt7) addpool(p *obj.Prog, a *obj.Addr) { q := c.newprog() *q = *t - q.Pc = int64(c.pool.size) if c.blitrl == nil { c.blitrl = q c.pool.start = uint32(p.Pc) @@ -1359,11 +1272,24 @@ func (c *ctxt7) addpool(p *obj.Prog, a *obj.Addr) { c.elitrl.Link = q } c.elitrl = q - c.pool.size = -c.pool.size & (funcAlign - 1) + if q.As == ADWORD { + // make DWORD 8-byte aligned, this is not required by ISA, + // just to avoid performance penalties when loading from + // the constant pool across a cache line. + c.pool.size = roundUp(c.pool.size, 8) + } c.pool.size += uint32(sz) p.Pool = q } +// roundUp rounds up x to "to". +func roundUp(x, to uint32) uint32 { + if to == 0 || to&(to-1) != 0 { + log.Fatalf("rounded up to a value that is not a power of 2: %d\n", to) + } + return (x + to - 1) &^ (to - 1) +} + func (c *ctxt7) regoff(a *obj.Addr) uint32 { c.instoffset = 0 c.aclass(a) @@ -1421,6 +1347,22 @@ func isADDWop(op obj.As) bool { return false } +func isADDSop(op obj.As) bool { + switch op { + case AADDS, AADDSW, ASUBS, ASUBSW: + return true + } + return false +} + +func isNEGop(op obj.As) bool { + switch op { + case ANEG, ANEGW, ANEGS, ANEGSW: + return true + } + return false +} + func isRegShiftOrExt(a *obj.Addr) bool { return (a.Index-obj.RBaseARM64)®_EXT != 0 || (a.Index-obj.RBaseARM64)®_LSL != 0 } @@ -1590,6 +1532,9 @@ func autoclass(l int64) int { } if l < 0 { + if l >= -256 && (l&15) == 0 { + return C_NSAUTO_16 + } if l >= -256 && (l&7) == 0 { return C_NSAUTO_8 } @@ -1599,9 +1544,15 @@ func autoclass(l int64) int { if l >= -256 { return C_NSAUTO } + if l >= -512 && (l&15) == 0 { + return C_NPAUTO_16 + } if l >= -512 && (l&7) == 0 { return C_NPAUTO } + if l >= -1024 && (l&15) == 0 { + return C_NQAUTO_16 + } if l >= -4095 { return C_NAUTO4K } @@ -1609,6 +1560,9 @@ func autoclass(l int64) int { } if l <= 255 { + if (l & 15) == 0 { + return C_PSAUTO_16 + } if (l & 7) == 0 { return C_PSAUTO_8 } @@ -1625,6 +1579,11 @@ func autoclass(l int64) int { return C_PPAUTO } } + if l <= 1008 { + if l&15 == 0 { + return C_PQAUTO_16 + } + } if l <= 4095 { if l&15 == 0 { return C_UAUTO4K_16 @@ -2038,9 +1997,10 @@ func (c *ctxt7) oplook(p *obj.Prog) *Optab { } a1 = a0 + 1 p.From.Class = int8(a1) - // more specific classification of 32-bit integers if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE { - if p.As == AMOVW || isADDWop(p.As) { + if p.As == AMOVW || isADDWop(p.As) || isANDWop(p.As) { + // For 32-bit instruction with constant, we need to + // treat its offset value as 32 bits to classify it. ra0 := c.con32class(&p.From) // do not break C_ADDCON2 when S bit is set if (p.As == AADDSW || p.As == ASUBSW) && ra0 == C_ADDCON2 { @@ -2049,16 +2009,8 @@ func (c *ctxt7) oplook(p *obj.Prog) *Optab { a1 = ra0 + 1 p.From.Class = int8(a1) } - if isANDWop(p.As) && a0 != C_BITCON { - // For 32-bit logical instruction with constant, - // the BITCON test is special in that it looks at - // the 64-bit which has the high 32-bit as a copy - // of the low 32-bit. We have handled that and - // don't pass it to con32class. - a1 = c.con32class(&p.From) + 1 - p.From.Class = int8(a1) - } if ((p.As == AMOVD) || isANDop(p.As) || isADDop(p.As)) && (a0 == C_LCON || a0 == C_VCON) { + // more specific classification of 64-bit integers a1 = c.con64class(&p.From) + 1 p.From.Class = int8(a1) } @@ -2193,64 +2145,99 @@ func cmp(a int, b int) bool { return true } + case C_NSAUTO_8: + if b == C_NSAUTO_16 { + return true + } + case C_NSAUTO_4: - if b == C_NSAUTO_8 { + if b == C_NSAUTO_16 || b == C_NSAUTO_8 { return true } case C_NSAUTO: switch b { - case C_NSAUTO_4, C_NSAUTO_8: + case C_NSAUTO_4, C_NSAUTO_8, C_NSAUTO_16: + return true + } + + case C_NPAUTO_16: + switch b { + case C_NSAUTO_16: return true } case C_NPAUTO: switch b { - case C_NSAUTO_8: + case C_NSAUTO_16, C_NSAUTO_8, C_NPAUTO_16: + return true + } + + case C_NQAUTO_16: + switch b { + case C_NSAUTO_16, C_NPAUTO_16: return true } case C_NAUTO4K: switch b { - case C_NSAUTO_8, C_NSAUTO_4, C_NSAUTO, C_NPAUTO: + case C_NSAUTO_16, C_NSAUTO_8, C_NSAUTO_4, C_NSAUTO, C_NPAUTO_16, + C_NPAUTO, C_NQAUTO_16: + return true + } + + case C_PSAUTO_16: + if b == C_ZAUTO { return true } case C_PSAUTO_8: - if b == C_ZAUTO { + if b == C_ZAUTO || b == C_PSAUTO_16 { return true } case C_PSAUTO_4: switch b { - case C_ZAUTO, C_PSAUTO_8: + case C_ZAUTO, C_PSAUTO_16, C_PSAUTO_8: return true } case C_PSAUTO: switch b { - case C_ZAUTO, C_PSAUTO_8, C_PSAUTO_4: + case C_ZAUTO, C_PSAUTO_16, C_PSAUTO_8, C_PSAUTO_4: + return true + } + + case C_PPAUTO_16: + switch b { + case C_ZAUTO, C_PSAUTO_16: return true } case C_PPAUTO: switch b { - case C_ZAUTO, C_PSAUTO_8, C_PPAUTO_16: + case C_ZAUTO, C_PSAUTO_16, C_PSAUTO_8, C_PPAUTO_16: + return true + } + + case C_PQAUTO_16: + switch b { + case C_ZAUTO, C_PSAUTO_16, C_PPAUTO_16: return true } case C_UAUTO4K: switch b { - case C_ZAUTO, C_PSAUTO, C_PSAUTO_4, C_PSAUTO_8, - C_PPAUTO, C_PPAUTO_16, + case C_ZAUTO, C_PSAUTO, C_PSAUTO_4, C_PSAUTO_8, C_PSAUTO_16, + C_PPAUTO, C_PPAUTO_16, C_PQAUTO_16, C_UAUTO4K_2, C_UAUTO4K_4, C_UAUTO4K_8, C_UAUTO4K_16: return true } case C_UAUTO8K: switch b { - case C_ZAUTO, C_PSAUTO, C_PSAUTO_4, C_PSAUTO_8, - C_PPAUTO, C_PPAUTO_16, + case C_ZAUTO, C_PSAUTO, C_PSAUTO_4, C_PSAUTO_8, C_PSAUTO_16, + C_PPAUTO, C_PPAUTO_16, C_PQAUTO_16, C_UAUTO4K_2, C_UAUTO4K_4, C_UAUTO4K_8, C_UAUTO4K_16, C_UAUTO8K_4, C_UAUTO8K_8, C_UAUTO8K_16: return true @@ -2258,8 +2245,8 @@ func cmp(a int, b int) bool { case C_UAUTO16K: switch b { - case C_ZAUTO, C_PSAUTO, C_PSAUTO_4, C_PSAUTO_8, - C_PPAUTO, C_PPAUTO_16, + case C_ZAUTO, C_PSAUTO, C_PSAUTO_4, C_PSAUTO_8, C_PSAUTO_16, + C_PPAUTO, C_PPAUTO_16, C_PQAUTO_16, C_UAUTO4K_4, C_UAUTO4K_8, C_UAUTO4K_16, C_UAUTO8K_4, C_UAUTO8K_8, C_UAUTO8K_16, C_UAUTO16K_8, C_UAUTO16K_16: @@ -2268,8 +2255,8 @@ func cmp(a int, b int) bool { case C_UAUTO32K: switch b { - case C_ZAUTO, C_PSAUTO, C_PSAUTO_4, C_PSAUTO_8, - C_PPAUTO, C_PPAUTO_16, + case C_ZAUTO, C_PSAUTO, C_PSAUTO_4, C_PSAUTO_8, C_PSAUTO_16, + C_PPAUTO, C_PPAUTO_16, C_PQAUTO_16, C_UAUTO4K_8, C_UAUTO4K_16, C_UAUTO8K_8, C_UAUTO8K_16, C_UAUTO16K_8, C_UAUTO16K_16, @@ -2279,17 +2266,17 @@ func cmp(a int, b int) bool { case C_UAUTO64K: switch b { - case C_ZAUTO, C_PSAUTO, C_PSAUTO_4, C_PSAUTO_8, - C_PPAUTO_16, C_UAUTO4K_16, C_UAUTO8K_16, C_UAUTO16K_16, + case C_ZAUTO, C_PSAUTO, C_PSAUTO_4, C_PSAUTO_8, C_PSAUTO_16, + C_PPAUTO_16, C_PQAUTO_16, C_UAUTO4K_16, C_UAUTO8K_16, C_UAUTO16K_16, C_UAUTO32K_16: return true } case C_LAUTO: switch b { - case C_ZAUTO, C_NSAUTO, C_NSAUTO_4, C_NSAUTO_8, C_NPAUTO, C_NAUTO4K, - C_PSAUTO, C_PSAUTO_4, C_PSAUTO_8, - C_PPAUTO, C_PPAUTO_16, + case C_ZAUTO, C_NSAUTO, C_NSAUTO_4, C_NSAUTO_8, C_NSAUTO_16, C_NPAUTO_16, C_NPAUTO, C_NQAUTO_16, C_NAUTO4K, + C_PSAUTO, C_PSAUTO_4, C_PSAUTO_8, C_PSAUTO_16, + C_PPAUTO, C_PPAUTO_16, C_PQAUTO_16, C_UAUTO4K, C_UAUTO4K_2, C_UAUTO4K_4, C_UAUTO4K_8, C_UAUTO4K_16, C_UAUTO8K, C_UAUTO8K_4, C_UAUTO8K_8, C_UAUTO8K_16, C_UAUTO16K, C_UAUTO16K_8, C_UAUTO16K_16, @@ -2298,64 +2285,98 @@ func cmp(a int, b int) bool { return true } + case C_NSOREG_8: + if b == C_NSOREG_16 { + return true + } + case C_NSOREG_4: - if b == C_NSOREG_8 { + if b == C_NSOREG_8 || b == C_NSOREG_16 { return true } case C_NSOREG: switch b { - case C_NSOREG_4, C_NSOREG_8: + case C_NSOREG_4, C_NSOREG_8, C_NSOREG_16: + return true + } + + case C_NPOREG_16: + switch b { + case C_NSOREG_16: return true } case C_NPOREG: switch b { - case C_NSOREG_8: + case C_NSOREG_16, C_NSOREG_8, C_NPOREG_16: + return true + } + + case C_NQOREG_16: + switch b { + case C_NSOREG_16, C_NPOREG_16: return true } case C_NOREG4K: switch b { - case C_NSOREG_8, C_NSOREG_4, C_NSOREG, C_NPOREG: + case C_NSOREG_16, C_NSOREG_8, C_NSOREG_4, C_NSOREG, C_NPOREG_16, C_NPOREG, C_NQOREG_16: + return true + } + + case C_PSOREG_16: + if b == C_ZOREG { return true } case C_PSOREG_8: - if b == C_ZOREG { + if b == C_ZOREG || b == C_PSOREG_16 { return true } case C_PSOREG_4: switch b { - case C_ZOREG, C_PSOREG_8: + case C_ZOREG, C_PSOREG_16, C_PSOREG_8: return true } case C_PSOREG: switch b { - case C_ZOREG, C_PSOREG_8, C_PSOREG_4: + case C_ZOREG, C_PSOREG_16, C_PSOREG_8, C_PSOREG_4: + return true + } + + case C_PPOREG_16: + switch b { + case C_ZOREG, C_PSOREG_16: return true } case C_PPOREG: switch b { - case C_ZOREG, C_PSOREG_8, C_PPOREG_16: + case C_ZOREG, C_PSOREG_16, C_PSOREG_8, C_PPOREG_16: + return true + } + + case C_PQOREG_16: + switch b { + case C_ZOREG, C_PSOREG_16, C_PPOREG_16: return true } case C_UOREG4K: switch b { - case C_ZOREG, C_PSOREG, C_PSOREG_4, C_PSOREG_8, - C_PPOREG, C_PPOREG_16, + case C_ZOREG, C_PSOREG, C_PSOREG_4, C_PSOREG_8, C_PSOREG_16, + C_PPOREG, C_PPOREG_16, C_PQOREG_16, C_UOREG4K_2, C_UOREG4K_4, C_UOREG4K_8, C_UOREG4K_16: return true } case C_UOREG8K: switch b { - case C_ZOREG, C_PSOREG, C_PSOREG_4, C_PSOREG_8, - C_PPOREG, C_PPOREG_16, + case C_ZOREG, C_PSOREG, C_PSOREG_4, C_PSOREG_8, C_PSOREG_16, + C_PPOREG, C_PPOREG_16, C_PQOREG_16, C_UOREG4K_2, C_UOREG4K_4, C_UOREG4K_8, C_UOREG4K_16, C_UOREG8K_4, C_UOREG8K_8, C_UOREG8K_16: return true @@ -2363,8 +2384,8 @@ func cmp(a int, b int) bool { case C_UOREG16K: switch b { - case C_ZOREG, C_PSOREG, C_PSOREG_4, C_PSOREG_8, - C_PPOREG, C_PPOREG_16, + case C_ZOREG, C_PSOREG, C_PSOREG_4, C_PSOREG_8, C_PSOREG_16, + C_PPOREG, C_PPOREG_16, C_PQOREG_16, C_UOREG4K_4, C_UOREG4K_8, C_UOREG4K_16, C_UOREG8K_4, C_UOREG8K_8, C_UOREG8K_16, C_UOREG16K_8, C_UOREG16K_16: @@ -2373,8 +2394,8 @@ func cmp(a int, b int) bool { case C_UOREG32K: switch b { - case C_ZOREG, C_PSOREG, C_PSOREG_4, C_PSOREG_8, - C_PPOREG, C_PPOREG_16, + case C_ZOREG, C_PSOREG, C_PSOREG_4, C_PSOREG_8, C_PSOREG_16, + C_PPOREG, C_PPOREG_16, C_PQOREG_16, C_UOREG4K_8, C_UOREG4K_16, C_UOREG8K_8, C_UOREG8K_16, C_UOREG16K_8, C_UOREG16K_16, @@ -2384,17 +2405,17 @@ func cmp(a int, b int) bool { case C_UOREG64K: switch b { - case C_ZOREG, C_PSOREG, C_PSOREG_4, C_PSOREG_8, - C_PPOREG_16, C_UOREG4K_16, C_UOREG8K_16, C_UOREG16K_16, + case C_ZOREG, C_PSOREG, C_PSOREG_4, C_PSOREG_8, C_PSOREG_16, + C_PPOREG_16, C_PQOREG_16, C_UOREG4K_16, C_UOREG8K_16, C_UOREG16K_16, C_UOREG32K_16: return true } case C_LOREG: switch b { - case C_ZOREG, C_NSOREG, C_NSOREG_4, C_NSOREG_8, C_NPOREG, C_NOREG4K, - C_PSOREG, C_PSOREG_4, C_PSOREG_8, - C_PPOREG, C_PPOREG_16, + case C_ZOREG, C_NSOREG, C_NSOREG_4, C_NSOREG_8, C_NSOREG_16, C_NPOREG, C_NPOREG_16, C_NQOREG_16, C_NOREG4K, + C_PSOREG, C_PSOREG_4, C_PSOREG_8, C_PSOREG_16, + C_PPOREG, C_PPOREG_16, C_PQOREG_16, C_UOREG4K, C_UOREG4K_2, C_UOREG4K_4, C_UOREG4K_8, C_UOREG4K_16, C_UOREG8K, C_UOREG8K_4, C_UOREG8K_8, C_UOREG8K_16, C_UOREG16K, C_UOREG16K_8, C_UOREG16K_16, @@ -2722,6 +2743,10 @@ func buildop(ctxt *obj.Link) { obj.ATEXT: break + case AFLDPQ: + break + case AFSTPQ: + break case ALDP: oprangeset(AFLDPD, t) @@ -2922,6 +2947,8 @@ func buildop(ctxt *obj.Link) { oprangeset(AVBSL, t) oprangeset(AVBIT, t) oprangeset(AVCMTST, t) + oprangeset(AVUMAX, t) + oprangeset(AVUMIN, t) oprangeset(AVUZP1, t) oprangeset(AVUZP2, t) oprangeset(AVBIF, t) @@ -3204,6 +3231,9 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 |= (uint32(rf&31) << 16) | (uint32(r&31) << 5) | uint32(rt&31) case 2: /* add/sub $(uimm12|uimm24)[,R],R; cmp $(uimm12|uimm24),R */ + if p.To.Reg == REG_RSP && isADDSop(p.As) { + c.ctxt.Diag("illegal destination register: %v\n", p) + } o1 = c.opirr(p, p.As) rt := int(p.To.Reg) @@ -3229,13 +3259,17 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { if is64bit == 0 && amount >= 32 { c.ctxt.Diag("shift amount out of range 0 to 31: %v", p) } + shift := (p.From.Offset >> 22) & 3 + if (shift > 2 || shift < 0) && (isADDop(p.As) || isADDWop(p.As) || isNEGop(p.As)) { + c.ctxt.Diag("unsupported shift operator: %v", p) + } o1 |= uint32(p.From.Offset) /* includes reg, op, etc */ rt := int(p.To.Reg) if p.To.Type == obj.TYPE_NONE { rt = REGZERO } r := int(p.Reg) - if p.As == AMVN || p.As == AMVNW { + if p.As == AMVN || p.As == AMVNW || isNEGop(p.As) { r = REGZERO } else if r == 0 { r = rt @@ -3385,6 +3419,9 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { o4 = os[3] case 13: /* addop $vcon, [R], R (64 bit literal); cmp $lcon,R -> addop $lcon,R, ZR */ + if p.To.Reg == REG_RSP && isADDSop(p.As) { + c.ctxt.Diag("illegal destination register: %v\n", p) + } o := uint32(0) num := uint8(0) cls := oclass(&p.From) @@ -3584,7 +3621,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { v := int32(p.From.Offset) if v < -256 || v > 255 { - c.ctxt.Diag("offset out of range [-255,254]: %v", p) + c.ctxt.Diag("offset out of range [-256,255]: %v", p) } o1 = c.opldr(p, p.As) if o.scond == C_XPOST { @@ -3602,7 +3639,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { v := int32(p.To.Offset) if v < -256 || v > 255 { - c.ctxt.Diag("offset out of range [-255,254]: %v", p) + c.ctxt.Diag("offset out of range [-256,255]: %v", p) } o1 = c.opstr(p, p.As) if o.scond == C_XPOST { @@ -3640,14 +3677,45 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { rt := int(p.To.Reg) o1 |= (uint32(rf&31) << 16) | (REGZERO & 31 << 5) | uint32(rt&31) - case 26: /* negX Rm< subX Rm<> 10) & 63 + shift := (p.From.Offset >> 22) & 3 + if shift != 0 { + c.ctxt.Diag("illegal combination: %v", p) + break + } - o1 |= uint32(p.From.Offset) /* includes reg, op, etc */ + if amount > 4 { + c.ctxt.Diag("the left shift amount out of range 0 to 4: %v", p) + break + } + rf := (p.From.Offset >> 16) & 31 rt := int(p.To.Reg) - o1 |= (REGZERO & 31 << 5) | uint32(rt&31) + r := int(p.Reg) + if p.To.Type == obj.TYPE_NONE { + rt = REGZERO + } + if r == 0 { + r = rt + } + + o1 = c.opxrrr(p, p.As, false) + o1 |= uint32(rf)<<16 | uint32(amount&7)<<10 | (uint32(r&31) << 5) | uint32(rt&31) case 27: /* op Rm<> 5) & 7 if amount > 4 { @@ -4264,6 +4332,9 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { if p.Reg == REGTMP { c.ctxt.Diag("cannot use REGTMP as source: %v\n", p) } + if p.To.Reg == REG_RSP && isADDSop(p.As) { + c.ctxt.Diag("illegal destination register: %v\n", p) + } if isADDWop(p.As) || isANDWop(p.As) { o1 = c.omovconst(AMOVW, p, &p.From, REGTMP) } else { @@ -4434,6 +4505,10 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { if af != ARNG_2D && af != ARNG_2S && af != ARNG_4S { c.ctxt.Diag("invalid arrangement: %v", p) } + case AVUMAX, AVUMIN: + if af == ARNG_2D { + c.ctxt.Diag("invalid arrangement: %v", p) + } } switch p.As { case AVAND, AVEOR: @@ -4650,13 +4725,13 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 |= (uint32(Q&1) << 30) | (uint32(imm5&0x1f) << 16) o1 |= (uint32(rf&31) << 5) | uint32(rt&31) - case 80: /* vmov V.[index], Vn */ + case 80: /* vmov/vdup V.[index], Vn */ rf := int(p.From.Reg) rt := int(p.To.Reg) imm5 := 0 index := int(p.From.Index) switch p.As { - case AVMOV: + case AVMOV, AVDUP: o1 = 1<<30 | 15<<25 | 1<<10 switch (p.From.Reg >> 5) & 15 { case ARNG_B: @@ -4706,7 +4781,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 = c.maskOpvldvst(p, o1) o1 |= uint32(r&31) << 5 - case 82: /* vmov Rn, Vd. */ + case 82: /* vmov/vdup Rn, Vd. */ rf := int(p.From.Reg) rt := int(p.To.Reg) o1 = 7<<25 | 3<<10 @@ -4734,7 +4809,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { Q = 1 imm5 = 2 default: - c.ctxt.Diag("invalid arrangement on VMOV Rn, Vd.: %v\n", p) + c.ctxt.Diag("invalid arrangement: %v\n", p) } o1 |= (Q & 1 << 30) | (imm5 & 0x1f << 16) o1 |= (uint32(rf&31) << 5) | uint32(rt&31) @@ -6110,6 +6185,12 @@ func (c *ctxt7) oprrr(p *obj.Prog, a obj.As) uint32 { case AVCMTST: return 0xE<<24 | 1<<21 | 0x23<<10 + case AVUMAX: + return 1<<29 | 7<<25 | 1<<21 | 0x19<<10 + + case AVUMIN: + return 1<<29 | 7<<25 | 1<<21 | 0x1b<<10 + case AVUZP1: return 7<<25 | 3<<11 @@ -6387,11 +6468,10 @@ func (c *ctxt7) opbit(p *obj.Prog, a obj.As) uint32 { func (c *ctxt7) opxrrr(p *obj.Prog, a obj.As, extend bool) uint32 { extension := uint32(0) if !extend { - switch a { - case AADD, ACMN, AADDS, ASUB, ACMP, ASUBS: + if isADDop(a) { extension = LSL0_64 - - case AADDW, ACMNW, AADDSW, ASUBW, ACMPW, ASUBSW: + } + if isADDWop(a) { extension = LSL0_32 } } @@ -7192,7 +7272,7 @@ func (c *ctxt7) opextr(p *obj.Prog, a obj.As, v int32, rn int, rm int, rt int) u return o } -/* genrate instruction encoding for LDP/LDPW/LDPSW/STP/STPW */ +/* genrate instruction encoding for ldp and stp series */ func (c *ctxt7) opldpstp(p *obj.Prog, o *Optab, vo int32, rbase, rl, rh, ldp uint32) uint32 { wback := false if o.scond == C_XPOST || o.scond == C_XPRE { @@ -7205,30 +7285,36 @@ func (c *ctxt7) opldpstp(p *obj.Prog, o *Optab, vo int32, rbase, rl, rh, ldp uin if wback == true { c.checkUnpredictable(p, false, true, p.To.Reg, p.From.Reg, int16(p.From.Offset)) } - case AFLDPD, AFLDPS: + case AFLDPD, AFLDPQ, AFLDPS: c.checkUnpredictable(p, true, false, p.From.Reg, p.To.Reg, int16(p.To.Offset)) } var ret uint32 // check offset switch p.As { + case AFLDPQ, AFSTPQ: + if vo < -1024 || vo > 1008 || vo%16 != 0 { + c.ctxt.Diag("invalid offset %v\n", p) + } + vo /= 16 + ret = 2<<30 | 1<<26 case AFLDPD, AFSTPD: if vo < -512 || vo > 504 || vo%8 != 0 { c.ctxt.Diag("invalid offset %v\n", p) } vo /= 8 ret = 1<<30 | 1<<26 - case ALDP, ASTP: - if vo < -512 || vo > 504 || vo%8 != 0 { - c.ctxt.Diag("invalid offset %v\n", p) - } - vo /= 8 - ret = 2 << 30 case AFLDPS, AFSTPS: if vo < -256 || vo > 252 || vo%4 != 0 { c.ctxt.Diag("invalid offset %v\n", p) } vo /= 4 ret = 1 << 26 + case ALDP, ASTP: + if vo < -512 || vo > 504 || vo%8 != 0 { + c.ctxt.Diag("invalid offset %v\n", p) + } + vo /= 8 + ret = 2 << 30 case ALDPW, ASTPW: if vo < -256 || vo > 252 || vo%4 != 0 { c.ctxt.Diag("invalid offset %v\n", p) @@ -7246,7 +7332,7 @@ func (c *ctxt7) opldpstp(p *obj.Prog, o *Optab, vo int32, rbase, rl, rh, ldp uin } // check register pair switch p.As { - case AFLDPD, AFLDPS, AFSTPD, AFSTPS: + case AFLDPQ, AFLDPD, AFLDPS, AFSTPQ, AFSTPD, AFSTPS: if rl < REG_F0 || REG_F31 < rl || rh < REG_F0 || REG_F31 < rh { c.ctxt.Diag("invalid register pair %v\n", p) } diff --git a/src/cmd/internal/obj/arm64/obj7.go b/src/cmd/internal/obj/arm64/obj7.go index 8f7648e5d5e..e41fb3bb753 100644 --- a/src/cmd/internal/obj/arm64/obj7.go +++ b/src/cmd/internal/obj/arm64/obj7.go @@ -35,6 +35,7 @@ import ( "cmd/internal/objabi" "cmd/internal/src" "cmd/internal/sys" + "internal/buildcfg" "log" "math" ) @@ -107,55 +108,35 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p.From.Reg = REG_R1 p.Reg = REG_R2 } else { - // Such a large stack we need to protect against wraparound - // if SP is close to zero. - // SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall) - // The +StackGuard on both sides is required to keep the left side positive: - // SP is allowed to be slightly below stackguard. See stack.h. - // CMP $StackPreempt, R1 - // BEQ label_of_call_to_morestack - // ADD $StackGuard, SP, R2 - // SUB R1, R2 - // MOV $(framesize+(StackGuard-StackSmall)), R3 - // CMP R3, R2 - p = obj.Appendp(p, c.newprog) + // Such a large stack we need to protect against underflow. + // The runtime guarantees SP > objabi.StackBig, but + // framesize is large enough that SP-framesize may + // underflow, causing a direct comparison with the + // stack guard to incorrectly succeed. We explicitly + // guard against underflow. + // + // SUBS $(framesize-StackSmall), SP, R2 + // // On underflow, jump to morestack + // BLO label_of_call_to_morestack + // CMP stackguard, R2 - p.As = ACMP + p = obj.Appendp(p, c.newprog) + p.As = ASUBS p.From.Type = obj.TYPE_CONST - p.From.Offset = objabi.StackPreempt - p.Reg = REG_R1 - - p = obj.Appendp(p, c.newprog) - q = p - p.As = ABEQ - p.To.Type = obj.TYPE_BRANCH - - p = obj.Appendp(p, c.newprog) - p.As = AADD - p.From.Type = obj.TYPE_CONST - p.From.Offset = int64(objabi.StackGuard) + p.From.Offset = int64(framesize) - objabi.StackSmall p.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p = obj.Appendp(p, c.newprog) - p.As = ASUB - p.From.Type = obj.TYPE_REG - p.From.Reg = REG_R1 - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R2 - - p = obj.Appendp(p, c.newprog) - p.As = AMOVD - p.From.Type = obj.TYPE_CONST - p.From.Offset = int64(framesize) + (int64(objabi.StackGuard) - objabi.StackSmall) - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R3 + q = p + p.As = ABLO + p.To.Type = obj.TYPE_BRANCH p = obj.Appendp(p, c.newprog) p.As = ACMP p.From.Type = obj.TYPE_REG - p.From.Reg = REG_R3 + p.From.Reg = REG_R1 p.Reg = REG_R2 } @@ -314,13 +295,13 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { } } - // For 32-bit logical instruction with constant, - // rewrite the high 32-bit to be a repetition of - // the low 32-bit, so that the BITCON test can be - // shared for both 32-bit and 64-bit. 32-bit ops - // will zero the high 32-bit of the destination - // register anyway. - if isANDWop(p.As) && p.From.Type == obj.TYPE_CONST { + // For 32-bit instruction with constant, rewrite + // the high 32-bit to be a repetition of the low + // 32-bit, so that the BITCON test can be shared + // for both 32-bit and 64-bit. 32-bit ops will + // zero the high 32-bit of the destination register + // anyway. + if (isANDWop(p.As) || isADDWop(p.As) || p.As == AMOVW) && p.From.Type == obj.TYPE_CONST { v := p.From.Offset & 0xffffffff p.From.Offset = v | v<<32 } @@ -539,6 +520,12 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } } + if p.Mark&LEAF != 0 && c.autosize < objabi.StackSmall { + // A leaf function with a small stack can be marked + // NOSPLIT, avoiding a stack check. + p.From.Sym.Set(obj.AttrNoSplit, true) + } + if !p.From.Sym.NoSplit() { p = c.stacksplit(p, c.autosize) // emit split check } @@ -590,7 +577,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q1.To.Reg = REGSP q1.Spadj = c.autosize - if objabi.GOOS == "ios" { + if buildcfg.GOOS == "ios" { // iOS does not support SA_ONSTACK. We will run the signal handler // on the G stack. If we write below SP, it may be clobbered by // the signal handler. So we save LR after decrementing SP. diff --git a/src/cmd/internal/obj/data.go b/src/cmd/internal/obj/data.go index f32e07acfed..bcba53c3a4b 100644 --- a/src/cmd/internal/obj/data.go +++ b/src/cmd/internal/obj/data.go @@ -135,6 +135,13 @@ func (s *LSym) WriteAddr(ctxt *Link, off int64, siz int, rsym *LSym, roff int64) s.writeAddr(ctxt, off, siz, rsym, roff, objabi.R_ADDR) } +// WriteWeakAddr writes an address of size siz into s at offset off. +// rsym and roff specify the relocation for the address. +// This is a weak reference. +func (s *LSym) WriteWeakAddr(ctxt *Link, off int64, siz int, rsym *LSym, roff int64) { + s.writeAddr(ctxt, off, siz, rsym, roff, objabi.R_WEAKADDR) +} + // WriteCURelativeAddr writes a pointer-sized address into s at offset off. // rsym and roff specify the relocation for the address which will be // resolved by the linker to an offset from the DW_AT_low_pc attribute of diff --git a/src/cmd/internal/obj/dwarf.go b/src/cmd/internal/obj/dwarf.go index 87c62e29816..6dd53ffd121 100644 --- a/src/cmd/internal/obj/dwarf.go +++ b/src/cmd/internal/obj/dwarf.go @@ -402,6 +402,31 @@ func (ctxt *Link) DwarfIntConst(myimportpath, name, typename string, val int64) dwarf.PutIntConst(dwCtxt{ctxt}, s, ctxt.Lookup(dwarf.InfoPrefix+typename), myimportpath+"."+name, val) } +// DwarfGlobal creates a link symbol containing a DWARF entry for +// a global variable. +func (ctxt *Link) DwarfGlobal(myimportpath, typename string, varSym *LSym) { + if myimportpath == "" || varSym.Local() { + return + } + var varname string + if varSym.Pkg == "_" { + // The frontend uses package "_" to mark symbols that should not + // be referenced by index, e.g. linkname'd symbols. + varname = varSym.Name + } else { + // Convert "". into a fully qualified package.sym name. + varname = objabi.PathToPrefix(myimportpath) + varSym.Name[len(`""`):] + } + dieSymName := dwarf.InfoPrefix + varname + dieSym := ctxt.LookupInit(dieSymName, func(s *LSym) { + s.Type = objabi.SDWARFVAR + s.Set(AttrDuplicateOK, true) // needed for shared linkage + ctxt.Data = append(ctxt.Data, s) + }) + typeSym := ctxt.Lookup(dwarf.InfoPrefix + typename) + dwarf.PutGlobal(dwCtxt{ctxt}, dieSym, typeSym, varSym, varname) +} + func (ctxt *Link) DwarfAbstractFunc(curfn interface{}, s *LSym, myimportpath string) { absfn := ctxt.DwFixups.AbsFuncDwarfSym(s) if absfn.Size != 0 { diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index a48db3bdc87..28626e6e037 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -360,7 +360,21 @@ func (p *Prog) SetFrom3(a Addr) { p.RestArgs = []AddrPos{{a, Source}} } -// SetTo2 assings []Args{{a, 1}} to p.RestArgs when the second destination +// SetFrom3Reg calls p.SetFrom3 with a register Addr containing reg. +// +// Deprecated: for the same reasons as Prog.GetFrom3. +func (p *Prog) SetFrom3Reg(reg int16) { + p.SetFrom3(Addr{Type: TYPE_REG, Reg: reg}) +} + +// SetFrom3Const calls p.SetFrom3 with a const Addr containing x. +// +// Deprecated: for the same reasons as Prog.GetFrom3. +func (p *Prog) SetFrom3Const(off int64) { + p.SetFrom3(Addr{Type: TYPE_CONST, Offset: off}) +} + +// SetTo2 assigns []Args{{a, 1}} to p.RestArgs when the second destination // operand does not fit into prog.RegTo2. func (p *Prog) SetTo2(a Addr) { p.RestArgs = []AddrPos{{a, Destination}} @@ -459,6 +473,7 @@ type FuncInfo struct { Autot map[*LSym]struct{} Pcln Pcln InlMarks []InlMark + spills []RegSpill dwarfInfoSym *LSym dwarfLocSym *LSym @@ -470,6 +485,7 @@ type FuncInfo struct { GCLocals *LSym StackObjects *LSym OpenCodedDeferInfo *LSym + ArgInfo *LSym // argument info for traceback FuncInfoSym *LSym } @@ -538,6 +554,11 @@ func (fi *FuncInfo) AddInlMark(p *Prog, id int32) { fi.InlMarks = append(fi.InlMarks, InlMark{p: p, id: id}) } +// AddSpill appends a spill record to the list for FuncInfo fi +func (fi *FuncInfo) AddSpill(s RegSpill) { + fi.spills = append(fi.spills, s) +} + // Record the type symbol for an auto variable so that the linker // an emit DWARF type information for the type. func (fi *FuncInfo) RecordAutoType(gotype *LSym) { @@ -583,6 +604,48 @@ func ParseABI(abistr string) (ABI, bool) { } } +// ABISet is a bit set of ABI values. +type ABISet uint8 + +const ( + // ABISetCallable is the set of all ABIs any function could + // potentially be called using. + ABISetCallable ABISet = (1 << ABI0) | (1 << ABIInternal) +) + +// Ensure ABISet is big enough to hold all ABIs. +var _ ABISet = 1 << (ABICount - 1) + +func ABISetOf(abi ABI) ABISet { + return 1 << abi +} + +func (a *ABISet) Set(abi ABI, value bool) { + if value { + *a |= 1 << abi + } else { + *a &^= 1 << abi + } +} + +func (a *ABISet) Get(abi ABI) bool { + return (*a>>abi)&1 != 0 +} + +func (a ABISet) String() string { + s := "{" + for i := ABI(0); a != 0; i++ { + if a&(1< objabi.StackBig { + // Such a large stack we need to protect against underflow. + // The runtime guarantees SP > objabi.StackBig, but + // framesize is large enough that SP-framesize may + // underflow, causing a direct comparison with the + // stack guard to incorrectly succeed. We explicitly + // guard against underflow. + // + // SGTU $(framesize-StackSmall), SP, R2 + // BNE R2, label-of-call-to-morestack + + p = obj.Appendp(p, c.newprog) + p.As = ASGTU + p.From.Type = obj.TYPE_CONST + p.From.Offset = offset + p.Reg = REGSP + p.To.Type = obj.TYPE_REG + p.To.Reg = REG_R2 + + p = obj.Appendp(p, c.newprog) + q = p + p.As = ABNE + p.From.Type = obj.TYPE_REG + p.From.Reg = REG_R2 + p.To.Type = obj.TYPE_BRANCH + p.Mark |= BRANCH + } + + // Check against the stack guard. We've ensured this won't underflow. // ADD $-(framesize-StackSmall), SP, R2 // SGTU R2, stackguard, R1 p = obj.Appendp(p, c.newprog) p.As = add p.From.Type = obj.TYPE_CONST - p.From.Offset = -(int64(framesize) - objabi.StackSmall) + p.From.Offset = -offset p.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 - p = obj.Appendp(p, c.newprog) - p.As = ASGTU - p.From.Type = obj.TYPE_REG - p.From.Reg = REG_R2 - p.Reg = REG_R1 - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R1 - } else { - // Such a large stack we need to protect against wraparound. - // If SP is close to zero: - // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall) - // The +StackGuard on both sides is required to keep the left side positive: - // SP is allowed to be slightly below stackguard. See stack.h. - // - // Preemption sets stackguard to StackPreempt, a very large value. - // That breaks the math above, so we have to check for that explicitly. - // // stackguard is R1 - // MOV $StackPreempt, R2 - // BEQ R1, R2, label-of-call-to-morestack - // ADD $StackGuard, SP, R2 - // SUB R1, R2 - // MOV $(framesize+(StackGuard-StackSmall)), R1 - // SGTU R2, R1, R1 - p = obj.Appendp(p, c.newprog) - - p.As = mov - p.From.Type = obj.TYPE_CONST - p.From.Offset = objabi.StackPreempt - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R2 - - p = obj.Appendp(p, c.newprog) - q = p - p.As = ABEQ - p.From.Type = obj.TYPE_REG - p.From.Reg = REG_R1 - p.Reg = REG_R2 - p.To.Type = obj.TYPE_BRANCH - p.Mark |= BRANCH - - p = obj.Appendp(p, c.newprog) - p.As = add - p.From.Type = obj.TYPE_CONST - p.From.Offset = int64(objabi.StackGuard) - p.Reg = REGSP - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R2 - - p = obj.Appendp(p, c.newprog) - p.As = sub - p.From.Type = obj.TYPE_REG - p.From.Reg = REG_R1 - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R2 - - p = obj.Appendp(p, c.newprog) - p.As = mov - p.From.Type = obj.TYPE_CONST - p.From.Offset = int64(framesize) + int64(objabi.StackGuard) - objabi.StackSmall - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R1 - p = obj.Appendp(p, c.newprog) p.As = ASGTU p.From.Type = obj.TYPE_REG diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go index 85f0570e5df..24fb5a19dec 100644 --- a/src/cmd/internal/obj/objfile.go +++ b/src/cmd/internal/obj/objfile.go @@ -383,7 +383,7 @@ func (w *writer) Sym(s *LSym) { func (w *writer) Hash64(s *LSym) { if !s.ContentAddressable() || len(s.R) != 0 { - panic("Hash of non-content-addresable symbol") + panic("Hash of non-content-addressable symbol") } b := contentHash64(s) w.Bytes(b[:]) @@ -391,7 +391,7 @@ func (w *writer) Hash64(s *LSym) { func (w *writer) Hash(s *LSym) { if !s.ContentAddressable() { - panic("Hash of non-content-addresable symbol") + panic("Hash of non-content-addressable symbol") } b := w.contentHash(s) w.Bytes(b[:]) @@ -498,7 +498,7 @@ func (w *writer) Reloc(r *Reloc) { var o goobj.Reloc o.SetOff(r.Off) o.SetSiz(r.Siz) - o.SetType(uint8(r.Type)) + o.SetType(uint16(r.Type)) o.SetAdd(r.Add) o.SetSym(makeSymRef(r.Sym)) o.Write(w.Writer) diff --git a/src/cmd/internal/obj/pcln.go b/src/cmd/internal/obj/pcln.go index 67c4f9a62bd..7af81335fb1 100644 --- a/src/cmd/internal/obj/pcln.go +++ b/src/cmd/internal/obj/pcln.go @@ -37,7 +37,7 @@ func funcpctab(ctxt *Link, func_ *LSym, desc string, valfunc func(*Link, *LSym, oldval := val fn := func_.Func() if fn.Text == nil { - // Return the emtpy symbol we've built so far. + // Return the empty symbol we've built so far. return sym } diff --git a/src/cmd/internal/obj/plist.go b/src/cmd/internal/obj/plist.go index 177083261c1..6beb4dd94cf 100644 --- a/src/cmd/internal/obj/plist.go +++ b/src/cmd/internal/obj/plist.go @@ -75,38 +75,60 @@ func Flushplist(ctxt *Link, plist *Plist, newprog ProgAlloc, myimportpath string newprog = ctxt.NewProg } - // Add reference to Go arguments for C or assembly functions without them. - for _, s := range text { - if !strings.HasPrefix(s.Name, "\"\".") { - continue - } - if s.ABIWrapper() { - // Don't create an args_stackmap symbol reference for an ABI - // wrapper function - continue - } - found := false - for p := s.Func().Text; p != nil; p = p.Link { - if p.As == AFUNCDATA && p.From.Type == TYPE_CONST && p.From.Offset == objabi.FUNCDATA_ArgsPointerMaps { - found = true - break + // Add reference to Go arguments for assembly functions without them. + if ctxt.IsAsm { + for _, s := range text { + if !strings.HasPrefix(s.Name, "\"\".") { + continue + } + // The current args_stackmap generation in the compiler assumes + // that the function in question is ABI0, so avoid introducing + // an args_stackmap reference if the func is not ABI0 (better to + // have no stackmap than an incorrect/lying stackmap). + if s.ABI() != ABI0 { + continue + } + foundArgMap, foundArgInfo := false, false + for p := s.Func().Text; p != nil; p = p.Link { + if p.As == AFUNCDATA && p.From.Type == TYPE_CONST { + if p.From.Offset == objabi.FUNCDATA_ArgsPointerMaps { + foundArgMap = true + } + if p.From.Offset == objabi.FUNCDATA_ArgInfo { + foundArgInfo = true + } + if foundArgMap && foundArgInfo { + break + } + } + } + if !foundArgMap { + p := Appendp(s.Func().Text, newprog) + p.As = AFUNCDATA + p.From.Type = TYPE_CONST + p.From.Offset = objabi.FUNCDATA_ArgsPointerMaps + p.To.Type = TYPE_MEM + p.To.Name = NAME_EXTERN + p.To.Sym = ctxt.LookupDerived(s, s.Name+".args_stackmap") + } + if !foundArgInfo { + p := Appendp(s.Func().Text, newprog) + p.As = AFUNCDATA + p.From.Type = TYPE_CONST + p.From.Offset = objabi.FUNCDATA_ArgInfo + p.To.Type = TYPE_MEM + p.To.Name = NAME_EXTERN + p.To.Sym = ctxt.LookupDerived(s, fmt.Sprintf("%s.arginfo%d", s.Name, s.ABI())) } - } - - if !found { - p := Appendp(s.Func().Text, newprog) - p.As = AFUNCDATA - p.From.Type = TYPE_CONST - p.From.Offset = objabi.FUNCDATA_ArgsPointerMaps - p.To.Type = TYPE_MEM - p.To.Name = NAME_EXTERN - p.To.Sym = ctxt.LookupDerived(s, s.Name+".args_stackmap") } } // Turn functions into machine code images. for _, s := range text { mkfwd(s) + if ctxt.Arch.ErrorCheck != nil { + ctxt.Arch.ErrorCheck(ctxt, s) + } linkpatch(ctxt, s, newprog) ctxt.Arch.Preprocess(ctxt, s, newprog) ctxt.Arch.Assemble(ctxt, s, newprog) @@ -133,7 +155,7 @@ func (ctxt *Link) InitTextSym(s *LSym, flag int) { ctxt.Diag("symbol %s listed multiple times", s.Name) } name := strings.Replace(s.Name, "\"\"", ctxt.Pkgpath, -1) - s.Func().FuncID = objabi.GetFuncID(name, flag&WRAPPER != 0) + s.Func().FuncID = objabi.GetFuncID(name, flag&WRAPPER != 0 || flag&ABIWRAPPER != 0) s.Func().FuncFlag = toFuncFlag(flag) s.Set(AttrOnList, true) s.Set(AttrDuplicateOK, flag&DUPOK != 0) diff --git a/src/cmd/internal/obj/ppc64/a.out.go b/src/cmd/internal/obj/ppc64/a.out.go index 4c97302f837..428cac528ac 100644 --- a/src/cmd/internal/obj/ppc64/a.out.go +++ b/src/cmd/internal/obj/ppc64/a.out.go @@ -368,30 +368,21 @@ const ( C_LCON /* other 32 */ C_DCON /* other 64 (could subdivide further) */ C_SACON /* $n(REG) where n <= int16 */ - C_SECON - C_LACON /* $n(REG) where int16 < n <= int32 */ - C_LECON - C_DACON /* $n(REG) where int32 < n */ + C_LACON /* $n(REG) where int16 < n <= int32 */ + C_DACON /* $n(REG) where int32 < n */ C_SBRA C_LBRA C_LBRAPIC - C_SAUTO - C_LAUTO - C_SEXT - C_LEXT C_ZOREG // conjecture: either (1) register + zeroed offset, or (2) "R0" implies zero or C_REG - C_SOREG // register + signed offset - C_LOREG + C_SOREG // D/DS form memory operation + C_LOREG // 32 bit addis + D/DS-form memory operation C_FPSCR - C_MSR C_XER C_LR C_CTR C_ANY C_GOK C_ADDR - C_GOTADDR - C_TOCADDR C_TLS_LE C_TLS_IE C_TEXTSIZE diff --git a/src/cmd/internal/obj/ppc64/anames9.go b/src/cmd/internal/obj/ppc64/anames9.go index 4699a15d3be..b2632aa9ed0 100644 --- a/src/cmd/internal/obj/ppc64/anames9.go +++ b/src/cmd/internal/obj/ppc64/anames9.go @@ -20,30 +20,21 @@ var cnames9 = []string{ "LCON", "DCON", "SACON", - "SECON", "LACON", - "LECON", "DACON", "SBRA", "LBRA", "LBRAPIC", - "SAUTO", - "LAUTO", - "SEXT", - "LEXT", "ZOREG", "SOREG", "LOREG", "FPSCR", - "MSR", "XER", "LR", "CTR", "ANY", "GOK", "ADDR", - "GOTADDR", - "TOCADDR", "TLS_LE", "TLS_IE", "TEXTSIZE", diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go index 41e263b2c07..316959f62d7 100644 --- a/src/cmd/internal/obj/ppc64/asm9.go +++ b/src/cmd/internal/obj/ppc64/asm9.go @@ -64,567 +64,464 @@ const ( type Optab struct { as obj.As // Opcode - a1 uint8 - a2 uint8 - a3 uint8 - a4 uint8 - type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r - size int8 - param int16 + a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog. + a2 uint8 // p.Reg argument (int16 Register) + a3 uint8 // p.RestArgs[0] (obj.AddrPos) + a4 uint8 // p.RestArgs[1] + a5 uint8 // p.RestARgs[2] + a6 uint8 // p.To (obj.Addr) + type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r + size int8 // Text space in bytes to lay operation } -// This optab contains a list of opcodes with the operand -// combinations that are implemented. Not all opcodes are in this -// table, but are added later in buildop by calling opset for those -// opcodes which allow the same operand combinations as an opcode -// already in the table. +// optab contains an array to be sliced of accepted operand combinations for an +// instruction. Unused arguments and fields are not explicitly enumerated, and +// should not be listed for clarity. Unused arguments and values should always +// assume the default value for the given type. // -// The type field in the Optabl identifies the case in asmout where -// the instruction word is assembled. +// optab does not list every valid ppc64 opcode, it enumerates representative +// operand combinations for a class of instruction. The variable oprange indexes +// all valid ppc64 opcodes. +// +// oprange is initialized to point a slice within optab which contains the valid +// operand combinations for a given instruction. This is initialized from buildop. +// +// Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface +// to arrange entries to minimize text size of each opcode. var optab = []Optab{ - {obj.ATEXT, C_LEXT, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0}, - {obj.ATEXT, C_LEXT, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0}, - {obj.ATEXT, C_ADDR, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0}, - {obj.ATEXT, C_ADDR, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0}, + {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0}, + {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0}, + {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0}, + {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0}, /* move register */ - {AMOVD, C_REG, C_NONE, C_NONE, C_REG, 1, 4, 0}, - {AMOVB, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0}, - {AMOVBZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0}, - {AMOVW, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0}, - {AMOVWZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0}, - {AADD, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0}, - {AADD, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0}, - {AADD, C_SCON, C_REG, C_NONE, C_REG, 4, 4, 0}, - {AADD, C_SCON, C_NONE, C_NONE, C_REG, 4, 4, 0}, - {AADD, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0}, - {AADD, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0}, - {AADD, C_UCON, C_REG, C_NONE, C_REG, 20, 4, 0}, - {AADD, C_UCON, C_NONE, C_NONE, C_REG, 20, 4, 0}, - {AADD, C_ANDCON, C_REG, C_NONE, C_REG, 22, 8, 0}, - {AADD, C_ANDCON, C_NONE, C_NONE, C_REG, 22, 8, 0}, - {AADD, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0}, - {AADD, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0}, - {AADDIS, C_ADDCON, C_REG, C_NONE, C_REG, 20, 4, 0}, - {AADDIS, C_ADDCON, C_NONE, C_NONE, C_REG, 20, 4, 0}, - {AADDC, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0}, - {AADDC, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0}, - {AADDC, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0}, - {AADDC, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0}, - {AADDC, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0}, - {AADDC, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0}, - {AAND, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, no literal */ - {AAND, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0}, - {AANDCC, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, - {AANDCC, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0}, - {AANDCC, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0}, - {AANDCC, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0}, - {AANDCC, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0}, - {AANDCC, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0}, - {AANDCC, C_ADDCON, C_NONE, C_NONE, C_REG, 23, 8, 0}, - {AANDCC, C_ADDCON, C_REG, C_NONE, C_REG, 23, 8, 0}, - {AANDCC, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0}, - {AANDCC, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0}, - {AANDISCC, C_ANDCON, C_NONE, C_NONE, C_REG, 59, 4, 0}, - {AANDISCC, C_ANDCON, C_REG, C_NONE, C_REG, 59, 4, 0}, - {AMULLW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0}, - {AMULLW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0}, - {AMULLW, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0}, - {AMULLW, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0}, - {AMULLW, C_ANDCON, C_REG, C_NONE, C_REG, 4, 4, 0}, - {AMULLW, C_ANDCON, C_NONE, C_NONE, C_REG, 4, 4, 0}, - {AMULLW, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0}, - {AMULLW, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0}, - {ASUBC, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0}, - {ASUBC, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0}, - {ASUBC, C_REG, C_NONE, C_ADDCON, C_REG, 27, 4, 0}, - {ASUBC, C_REG, C_NONE, C_LCON, C_REG, 28, 12, 0}, - {AOR, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, literal not cc (or/xor) */ - {AOR, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0}, - {AOR, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0}, - {AOR, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0}, - {AOR, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0}, - {AOR, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0}, - {AOR, C_ADDCON, C_NONE, C_NONE, C_REG, 23, 8, 0}, - {AOR, C_ADDCON, C_REG, C_NONE, C_REG, 23, 8, 0}, - {AOR, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0}, - {AOR, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0}, - {AORIS, C_ANDCON, C_NONE, C_NONE, C_REG, 59, 4, 0}, - {AORIS, C_ANDCON, C_REG, C_NONE, C_REG, 59, 4, 0}, - {ADIVW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0}, /* op r1[,r2],r3 */ - {ADIVW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0}, - {ASUB, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0}, /* op r2[,r1],r3 */ - {ASUB, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0}, - {ASLW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0}, - {ASLW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, - {ASLD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0}, - {ASLD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, - {ASLD, C_SCON, C_REG, C_NONE, C_REG, 25, 4, 0}, - {ASLD, C_SCON, C_NONE, C_NONE, C_REG, 25, 4, 0}, - {AEXTSWSLI, C_SCON, C_NONE, C_NONE, C_REG, 25, 4, 0}, - {AEXTSWSLI, C_SCON, C_REG, C_NONE, C_REG, 25, 4, 0}, - {ASLW, C_SCON, C_REG, C_NONE, C_REG, 57, 4, 0}, - {ASLW, C_SCON, C_NONE, C_NONE, C_REG, 57, 4, 0}, - {ASRAW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0}, - {ASRAW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, - {ASRAW, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0}, - {ASRAW, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0}, - {ASRAD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0}, - {ASRAD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, - {ASRAD, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0}, - {ASRAD, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0}, - {ARLWMI, C_SCON, C_REG, C_LCON, C_REG, 62, 4, 0}, - {ARLWMI, C_REG, C_REG, C_LCON, C_REG, 63, 4, 0}, - {ACLRLSLWI, C_SCON, C_REG, C_LCON, C_REG, 62, 4, 0}, - {ARLDMI, C_SCON, C_REG, C_LCON, C_REG, 30, 4, 0}, - {ARLDC, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0}, - {ARLDCL, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0}, - {ARLDCL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0}, - {ARLDICL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0}, - {ARLDICL, C_SCON, C_REG, C_LCON, C_REG, 14, 4, 0}, - {ARLDCL, C_REG, C_NONE, C_LCON, C_REG, 14, 4, 0}, - {AFADD, C_FREG, C_NONE, C_NONE, C_FREG, 2, 4, 0}, - {AFADD, C_FREG, C_FREG, C_NONE, C_FREG, 2, 4, 0}, - {AFABS, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0}, - {AFABS, C_NONE, C_NONE, C_NONE, C_FREG, 33, 4, 0}, - {AFMOVD, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0}, - {AFMADD, C_FREG, C_FREG, C_FREG, C_FREG, 34, 4, 0}, - {AFMUL, C_FREG, C_NONE, C_NONE, C_FREG, 32, 4, 0}, - {AFMUL, C_FREG, C_FREG, C_NONE, C_FREG, 32, 4, 0}, + {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, + {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4}, + {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, + {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4}, + {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, + {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4}, + {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4}, + {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4}, + {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8}, + {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8}, + {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, + {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, + {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4}, + {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4}, + {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, + {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4}, + {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, + {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4}, + {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, + {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, + {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */ + {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4}, + {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, + {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4}, + {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4}, + {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8}, + {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8}, + {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12}, + {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12}, + {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4}, + {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4}, + {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, + {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4}, + {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, + {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4}, + {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, + {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4}, + {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, + {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, + {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, + {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4}, + {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4}, + {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12}, + {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */ + {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4}, + {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, + {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4}, + {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4}, + {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8}, + {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8}, + {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12}, + {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12}, + {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4}, + {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4}, + {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */ + {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4}, + {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */ + {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4}, + {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4}, + {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4}, + {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4}, + {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4}, + {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4}, + {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4}, + {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4}, + {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4}, + {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4}, + {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4}, + {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4}, + {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4}, + {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4}, + {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4}, + {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4}, + {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4}, + {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4}, + {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4}, + {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, + {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, + {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, + {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, + {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4}, + {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4}, + {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4}, + {as: AFABS, a6: C_FREG, type_: 33, size: 4}, + {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4}, + {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4}, + {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4}, - /* store, short offset */ - {AMOVD, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO}, - {AMOVW, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO}, - {AMOVWZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO}, - {AMOVBZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO}, - {AMOVBZU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO}, - {AMOVB, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO}, - {AMOVBU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO}, - {AMOVD, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB}, - {AMOVW, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB}, - {AMOVWZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB}, - {AMOVBZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB}, - {AMOVB, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB}, - {AMOVD, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP}, - {AMOVW, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP}, - {AMOVWZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP}, - {AMOVBZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP}, - {AMOVB, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP}, - {AMOVD, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO}, - {AMOVW, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO}, - {AMOVWZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO}, - {AMOVBZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO}, - {AMOVBZU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO}, - {AMOVB, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO}, - {AMOVBU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO}, + {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, + {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8}, - /* load, short offset */ - {AMOVD, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO}, - {AMOVW, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO}, - {AMOVWZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO}, - {AMOVBZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO}, - {AMOVBZU, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO}, - {AMOVB, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO}, - {AMOVBU, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO}, - {AMOVD, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB}, - {AMOVW, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB}, - {AMOVWZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB}, - {AMOVBZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB}, - {AMOVB, C_SEXT, C_NONE, C_NONE, C_REG, 9, 8, REGSB}, - {AMOVD, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP}, - {AMOVW, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP}, - {AMOVWZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP}, - {AMOVBZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP}, - {AMOVB, C_SAUTO, C_NONE, C_NONE, C_REG, 9, 8, REGSP}, - {AMOVD, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO}, - {AMOVW, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO}, - {AMOVWZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO}, - {AMOVBZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO}, - {AMOVBZU, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO}, - {AMOVB, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO}, - {AMOVBU, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO}, + {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, + {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, - /* store, long offset */ - {AMOVD, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB}, - {AMOVW, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB}, - {AMOVWZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB}, - {AMOVBZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB}, - {AMOVB, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB}, - {AMOVD, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP}, - {AMOVW, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP}, - {AMOVWZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP}, - {AMOVBZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP}, - {AMOVB, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP}, - {AMOVD, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO}, - {AMOVW, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO}, - {AMOVWZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO}, - {AMOVBZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO}, - {AMOVB, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO}, - {AMOVD, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0}, - {AMOVW, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0}, - {AMOVWZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0}, - {AMOVBZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0}, - {AMOVB, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0}, + {as: AMOVHBR, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4}, + {as: AMOVHBR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4}, - /* load, long offset */ - {AMOVD, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB}, - {AMOVW, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB}, - {AMOVWZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB}, - {AMOVBZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB}, - {AMOVB, C_LEXT, C_NONE, C_NONE, C_REG, 37, 12, REGSB}, - {AMOVD, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP}, - {AMOVW, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP}, - {AMOVWZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP}, - {AMOVBZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP}, - {AMOVB, C_LAUTO, C_NONE, C_NONE, C_REG, 37, 12, REGSP}, - {AMOVD, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO}, - {AMOVW, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO}, - {AMOVWZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO}, - {AMOVBZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO}, - {AMOVB, C_LOREG, C_NONE, C_NONE, C_REG, 37, 12, REGZERO}, - {AMOVD, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0}, - {AMOVW, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0}, - {AMOVWZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0}, - {AMOVBZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0}, - {AMOVB, C_ADDR, C_NONE, C_NONE, C_REG, 76, 12, 0}, + {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12}, + {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12}, + {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8}, + {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, + {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, + {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, + {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4}, - {AMOVD, C_TLS_LE, C_NONE, C_NONE, C_REG, 79, 4, 0}, - {AMOVD, C_TLS_IE, C_NONE, C_NONE, C_REG, 80, 8, 0}, + {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, + {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, + {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, + {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, + {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, + {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, + {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4}, - {AMOVD, C_GOTADDR, C_NONE, C_NONE, C_REG, 81, 8, 0}, - {AMOVD, C_TOCADDR, C_NONE, C_NONE, C_REG, 95, 8, 0}, + {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4}, + {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4}, + {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4}, + {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, + {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4}, + {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, + {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, + {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, + {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, + {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8}, + {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12}, + {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4}, + {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, + {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, + {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, + {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4}, + {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4}, - /* load constant */ - {AMOVD, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, - {AMOVD, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP}, - {AMOVD, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB}, - {AMOVD, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP}, - {AMOVD, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO}, - {AMOVD, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO}, - {AMOVW, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */ - {AMOVW, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP}, - {AMOVW, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB}, - {AMOVW, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP}, - {AMOVW, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO}, - {AMOVW, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO}, - {AMOVWZ, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */ - {AMOVWZ, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP}, - {AMOVWZ, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB}, - {AMOVWZ, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP}, - {AMOVWZ, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO}, - {AMOVWZ, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO}, + {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4}, + {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4}, + {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4}, + {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, + {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4}, + {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, + {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, + {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4}, + {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, + {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, + {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4}, + {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, + {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4}, + {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, + {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, + {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4}, + {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4}, - /* load unsigned/long constants (TO DO: check) */ - {AMOVD, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO}, - {AMOVD, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0}, - {AMOVW, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO}, - {AMOVW, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0}, - {AMOVWZ, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO}, - {AMOVWZ, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0}, - {AMOVHBR, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0}, - {AMOVHBR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0}, - {AMOVHBR, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0}, - {AMOVHBR, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0}, - {ASYSCALL, C_NONE, C_NONE, C_NONE, C_NONE, 5, 4, 0}, - {ASYSCALL, C_REG, C_NONE, C_NONE, C_NONE, 77, 12, 0}, - {ASYSCALL, C_SCON, C_NONE, C_NONE, C_NONE, 77, 12, 0}, - {ABEQ, C_NONE, C_NONE, C_NONE, C_SBRA, 16, 4, 0}, - {ABEQ, C_CREG, C_NONE, C_NONE, C_SBRA, 16, 4, 0}, - {ABR, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, - {ABR, C_NONE, C_NONE, C_NONE, C_LBRAPIC, 11, 8, 0}, - {ABC, C_SCON, C_REG, C_NONE, C_SBRA, 16, 4, 0}, - {ABC, C_SCON, C_REG, C_NONE, C_LBRA, 17, 4, 0}, - {ABR, C_NONE, C_NONE, C_NONE, C_LR, 18, 4, 0}, - {ABR, C_NONE, C_NONE, C_SCON, C_LR, 18, 4, 0}, - {ABR, C_NONE, C_NONE, C_NONE, C_CTR, 18, 4, 0}, - {ABR, C_REG, C_NONE, C_NONE, C_CTR, 18, 4, 0}, - {ABR, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0}, - {ABC, C_NONE, C_REG, C_NONE, C_LR, 18, 4, 0}, - {ABC, C_NONE, C_REG, C_NONE, C_CTR, 18, 4, 0}, - {ABC, C_SCON, C_REG, C_NONE, C_LR, 18, 4, 0}, - {ABC, C_SCON, C_REG, C_NONE, C_CTR, 18, 4, 0}, - {ABC, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0}, - {AFMOVD, C_SEXT, C_NONE, C_NONE, C_FREG, 8, 4, REGSB}, - {AFMOVD, C_SAUTO, C_NONE, C_NONE, C_FREG, 8, 4, REGSP}, - {AFMOVD, C_SOREG, C_NONE, C_NONE, C_FREG, 8, 4, REGZERO}, - {AFMOVD, C_LEXT, C_NONE, C_NONE, C_FREG, 36, 8, REGSB}, - {AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, 36, 8, REGSP}, - {AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 36, 8, REGZERO}, - {AFMOVD, C_ZCON, C_NONE, C_NONE, C_FREG, 24, 4, 0}, - {AFMOVD, C_ADDCON, C_NONE, C_NONE, C_FREG, 24, 8, 0}, - {AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 75, 8, 0}, - {AFMOVD, C_FREG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB}, - {AFMOVD, C_FREG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP}, - {AFMOVD, C_FREG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO}, - {AFMOVD, C_FREG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB}, - {AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP}, - {AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO}, - {AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, 74, 8, 0}, - {AFMOVSX, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0}, - {AFMOVSX, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0}, - {AFMOVSX, C_FREG, C_REG, C_NONE, C_ZOREG, 44, 4, 0}, - {AFMOVSX, C_FREG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0}, - {AFMOVSZ, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0}, - {AFMOVSZ, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0}, - {ASYNC, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0}, - {AWORD, C_LCON, C_NONE, C_NONE, C_NONE, 40, 4, 0}, - {ADWORD, C_LCON, C_NONE, C_NONE, C_NONE, 31, 8, 0}, - {ADWORD, C_DCON, C_NONE, C_NONE, C_NONE, 31, 8, 0}, - {AADDME, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0}, - {AEXTSB, C_REG, C_NONE, C_NONE, C_REG, 48, 4, 0}, - {AEXTSB, C_NONE, C_NONE, C_NONE, C_REG, 48, 4, 0}, - {AISEL, C_LCON, C_REG, C_REG, C_REG, 84, 4, 0}, - {AISEL, C_ZCON, C_REG, C_REG, C_REG, 84, 4, 0}, - {ANEG, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0}, - {ANEG, C_NONE, C_NONE, C_NONE, C_REG, 47, 4, 0}, - {AREM, C_REG, C_NONE, C_NONE, C_REG, 50, 12, 0}, - {AREM, C_REG, C_REG, C_NONE, C_REG, 50, 12, 0}, - {AREMU, C_REG, C_NONE, C_NONE, C_REG, 50, 16, 0}, - {AREMU, C_REG, C_REG, C_NONE, C_REG, 50, 16, 0}, - {AREMD, C_REG, C_NONE, C_NONE, C_REG, 51, 12, 0}, - {AREMD, C_REG, C_REG, C_NONE, C_REG, 51, 12, 0}, - {AMTFSB0, C_SCON, C_NONE, C_NONE, C_NONE, 52, 4, 0}, - {AMOVFL, C_FPSCR, C_NONE, C_NONE, C_FREG, 53, 4, 0}, - {AMOVFL, C_FREG, C_NONE, C_NONE, C_FPSCR, 64, 4, 0}, - {AMOVFL, C_FREG, C_NONE, C_LCON, C_FPSCR, 64, 4, 0}, - {AMOVFL, C_LCON, C_NONE, C_NONE, C_FPSCR, 65, 4, 0}, - {AMOVD, C_MSR, C_NONE, C_NONE, C_REG, 54, 4, 0}, /* mfmsr */ - {AMOVD, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsrd */ - {AMOVWZ, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsr */ + {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8}, + {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4}, + {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8}, + {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4}, + {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8}, + {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4}, + {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4}, + {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8}, + {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8}, + {as: AFMOVSX, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4}, + {as: AFMOVSX, a1: C_FREG, a6: C_ZOREG, type_: 44, size: 4}, + + {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4}, + + {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4}, + {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4}, + {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4}, + {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4}, + {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4}, + {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4}, + {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4}, + {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4}, + + {as: ASYSCALL, type_: 5, size: 4}, + {as: ASYSCALL, a1: C_REG, type_: 77, size: 12}, + {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12}, + {as: ABEQ, a6: C_SBRA, type_: 16, size: 4}, + {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4}, + {as: ABR, a6: C_LBRA, type_: 11, size: 4}, + {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, + {as: ABC, a1: C_SCON, a2: C_REG, a6: C_SBRA, type_: 16, size: 4}, + {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LBRA, type_: 17, size: 4}, + {as: ABR, a6: C_LR, type_: 18, size: 4}, + {as: ABR, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, + {as: ABR, a6: C_CTR, type_: 18, size: 4}, + {as: ABR, a1: C_REG, a6: C_CTR, type_: 18, size: 4}, + {as: ABR, a6: C_ZOREG, type_: 15, size: 8}, + {as: ABC, a2: C_REG, a6: C_LR, type_: 18, size: 4}, + {as: ABC, a2: C_REG, a6: C_CTR, type_: 18, size: 4}, + {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LR, type_: 18, size: 4}, + {as: ABC, a1: C_SCON, a2: C_REG, a6: C_CTR, type_: 18, size: 4}, + {as: ABC, a6: C_ZOREG, type_: 15, size: 8}, + {as: ASYNC, type_: 46, size: 4}, + {as: AWORD, a1: C_LCON, type_: 40, size: 4}, + {as: ADWORD, a1: C_LCON, type_: 31, size: 8}, + {as: ADWORD, a1: C_DCON, type_: 31, size: 8}, + {as: ADWORD, a1: C_LACON, type_: 31, size: 8}, + {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4}, + {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4}, + {as: AEXTSB, a6: C_REG, type_: 48, size: 4}, + {as: AISEL, a1: C_LCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4}, + {as: AISEL, a1: C_ZCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4}, + {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4}, + {as: ANEG, a6: C_REG, type_: 47, size: 4}, + {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12}, + {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12}, + {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16}, + {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16}, + {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12}, + {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12}, + {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4}, /* Other ISA 2.05+ instructions */ - {APOPCNTD, C_REG, C_NONE, C_NONE, C_REG, 93, 4, 0}, /* population count, x-form */ - {ACMPB, C_REG, C_REG, C_NONE, C_REG, 92, 4, 0}, /* compare byte, x-form */ - {ACMPEQB, C_REG, C_REG, C_NONE, C_CREG, 92, 4, 0}, /* compare equal byte, x-form, ISA 3.0 */ - {ACMPEQB, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0}, - {AFTDIV, C_FREG, C_FREG, C_NONE, C_SCON, 92, 4, 0}, /* floating test for sw divide, x-form */ - {AFTSQRT, C_FREG, C_NONE, C_NONE, C_SCON, 93, 4, 0}, /* floating test for sw square root, x-form */ - {ACOPY, C_REG, C_NONE, C_NONE, C_REG, 92, 4, 0}, /* copy/paste facility, x-form */ - {ADARN, C_SCON, C_NONE, C_NONE, C_REG, 92, 4, 0}, /* deliver random number, x-form */ - {ALDMX, C_SOREG, C_NONE, C_NONE, C_REG, 45, 4, 0}, /* load doubleword monitored, x-form */ - {AMADDHD, C_REG, C_REG, C_REG, C_REG, 83, 4, 0}, /* multiply-add high/low doubleword, va-form */ - {AADDEX, C_REG, C_REG, C_SCON, C_REG, 94, 4, 0}, /* add extended using alternate carry, z23-form */ - {ACRAND, C_CREG, C_NONE, C_NONE, C_CREG, 2, 4, 0}, /* logical ops for condition registers xl-form */ + {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */ + {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */ + {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */ + {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4}, + {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */ + {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */ + {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */ + {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */ + {as: ALDMX, a1: C_SOREG, a6: C_REG, type_: 45, size: 4}, /* load doubleword monitored, x-form */ + {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */ + {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */ + {as: ACRAND, a1: C_CREG, a6: C_CREG, type_: 2, size: 4}, /* logical ops for condition registers xl-form */ /* Vector instructions */ /* Vector load */ - {ALV, C_SOREG, C_NONE, C_NONE, C_VREG, 45, 4, 0}, /* vector load, x-form */ + {as: ALV, a1: C_SOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */ /* Vector store */ - {ASTV, C_VREG, C_NONE, C_NONE, C_SOREG, 44, 4, 0}, /* vector store, x-form */ + {as: ASTV, a1: C_VREG, a6: C_SOREG, type_: 44, size: 4}, /* vector store, x-form */ /* Vector logical */ - {AVAND, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector and, vx-form */ - {AVOR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector or, vx-form */ + {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */ + {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */ /* Vector add */ - {AVADDUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned modulo, vx-form */ - {AVADDCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add & write carry unsigned, vx-form */ - {AVADDUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned saturate, vx-form */ - {AVADDSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add signed saturate, vx-form */ - {AVADDE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector add extended, va-form */ + {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */ + {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */ + {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */ + {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */ + {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */ /* Vector subtract */ - {AVSUBUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned modulo, vx-form */ - {AVSUBCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract & write carry unsigned, vx-form */ - {AVSUBUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned saturate, vx-form */ - {AVSUBSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract signed saturate, vx-form */ - {AVSUBE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector subtract extended, va-form */ + {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */ + {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */ + {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */ + {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */ + {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */ /* Vector multiply */ - {AVMULESB, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 9}, /* vector multiply, vx-form */ - {AVPMSUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector polynomial multiply & sum, vx-form */ - {AVMSUMUDM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector multiply-sum, va-form */ + {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */ + {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */ + {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */ /* Vector rotate */ - {AVR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector rotate, vx-form */ + {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */ /* Vector shift */ - {AVS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector shift, vx-form */ - {AVSA, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector shift algebraic, vx-form */ - {AVSOI, C_ANDCON, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector shift by octet immediate, va-form */ + {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */ + {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */ + {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */ /* Vector count */ - {AVCLZ, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector count leading zeros, vx-form */ - {AVPOPCNT, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector population count, vx-form */ + {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */ + {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */ /* Vector compare */ - {AVCMPEQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare equal, vc-form */ - {AVCMPGT, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare greater than, vc-form */ - {AVCMPNEZB, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare not equal, vx-form */ + {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */ + {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */ + {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */ /* Vector merge */ - {AVMRGOW, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector merge odd word, vx-form */ + {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */ /* Vector permute */ - {AVPERM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector permute, va-form */ + {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */ /* Vector bit permute */ - {AVBPERMQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector bit permute, vx-form */ + {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */ /* Vector select */ - {AVSEL, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector select, va-form */ + {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */ /* Vector splat */ - {AVSPLTB, C_SCON, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector splat, vx-form */ - {AVSPLTB, C_ADDCON, C_VREG, C_NONE, C_VREG, 82, 4, 0}, - {AVSPLTISB, C_SCON, C_NONE, C_NONE, C_VREG, 82, 4, 0}, /* vector splat immediate, vx-form */ - {AVSPLTISB, C_ADDCON, C_NONE, C_NONE, C_VREG, 82, 4, 0}, + {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */ + {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, + {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */ + {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4}, /* Vector AES */ - {AVCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector AES cipher, vx-form */ - {AVNCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector AES inverse cipher, vx-form */ - {AVSBOX, C_VREG, C_NONE, C_NONE, C_VREG, 82, 4, 0}, /* vector AES subbytes, vx-form */ + {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */ + {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */ + {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */ /* Vector SHA */ - {AVSHASIGMA, C_ANDCON, C_VREG, C_ANDCON, C_VREG, 82, 4, 0}, /* vector SHA sigma, vx-form */ + {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */ /* VSX vector load */ - {ALXVD2X, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx vector load, xx1-form */ - {ALXV, C_SOREG, C_NONE, C_NONE, C_VSREG, 96, 4, 0}, /* vsx vector load, dq-form */ - {ALXVL, C_REG, C_REG, C_NONE, C_VSREG, 98, 4, 0}, /* vsx vector load length */ + {as: ALXVD2X, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */ + {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */ + {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */ /* VSX vector store */ - {ASTXVD2X, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx vector store, xx1-form */ - {ASTXV, C_VSREG, C_NONE, C_NONE, C_SOREG, 97, 4, 0}, /* vsx vector store, dq-form */ - {ASTXVL, C_VSREG, C_REG, C_NONE, C_REG, 99, 4, 0}, /* vsx vector store with length x-form */ + {as: ASTXVD2X, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */ + {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */ + {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */ /* VSX scalar load */ - {ALXSDX, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar load, xx1-form */ + {as: ALXSDX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */ /* VSX scalar store */ - {ASTXSDX, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar store, xx1-form */ + {as: ASTXSDX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */ /* VSX scalar as integer load */ - {ALXSIWAX, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar as integer load, xx1-form */ + {as: ALXSIWAX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */ /* VSX scalar store as integer */ - {ASTXSIWX, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar as integer store, xx1-form */ + {as: ASTXSIWX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */ /* VSX move from VSR */ - {AMFVSRD, C_VSREG, C_NONE, C_NONE, C_REG, 88, 4, 0}, /* vsx move from vsr, xx1-form */ - {AMFVSRD, C_FREG, C_NONE, C_NONE, C_REG, 88, 4, 0}, - {AMFVSRD, C_VREG, C_NONE, C_NONE, C_REG, 88, 4, 0}, + {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4}, /* vsx move from vsr, xx1-form */ + {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4}, + {as: AMFVSRD, a1: C_VREG, a6: C_REG, type_: 88, size: 4}, /* VSX move to VSR */ - {AMTVSRD, C_REG, C_NONE, C_NONE, C_VSREG, 88, 4, 0}, /* vsx move to vsr, xx1-form */ - {AMTVSRD, C_REG, C_REG, C_NONE, C_VSREG, 88, 4, 0}, - {AMTVSRD, C_REG, C_NONE, C_NONE, C_FREG, 88, 4, 0}, - {AMTVSRD, C_REG, C_NONE, C_NONE, C_VREG, 88, 4, 0}, + {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 88, size: 4}, /* vsx move to vsr, xx1-form */ + {as: AMTVSRD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 88, size: 4}, + {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 88, size: 4}, + {as: AMTVSRD, a1: C_REG, a6: C_VREG, type_: 88, size: 4}, /* VSX logical */ - {AXXLAND, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx and, xx3-form */ - {AXXLOR, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx or, xx3-form */ + {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */ + {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */ /* VSX select */ - {AXXSEL, C_VSREG, C_VSREG, C_VSREG, C_VSREG, 91, 4, 0}, /* vsx select, xx4-form */ + {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */ /* VSX merge */ - {AXXMRGHW, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx merge, xx3-form */ + {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */ /* VSX splat */ - {AXXSPLTW, C_VSREG, C_NONE, C_SCON, C_VSREG, 89, 4, 0}, /* vsx splat, xx2-form */ - {AXXSPLTIB, C_SCON, C_NONE, C_NONE, C_VSREG, 100, 4, 0}, /* vsx splat, xx2-form */ + {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */ + {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */ /* VSX permute */ - {AXXPERM, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx permute, xx3-form */ + {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */ /* VSX shift */ - {AXXSLDWI, C_VSREG, C_VSREG, C_SCON, C_VSREG, 90, 4, 0}, /* vsx shift immediate, xx3-form */ + {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */ /* VSX reverse bytes */ - {AXXBRQ, C_VSREG, C_NONE, C_NONE, C_VSREG, 101, 4, 0}, /* vsx reverse bytes */ + {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */ /* VSX scalar FP-FP conversion */ - {AXSCVDPSP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-fp conversion, xx2-form */ + {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */ /* VSX vector FP-FP conversion */ - {AXVCVDPSP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-fp conversion, xx2-form */ + {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */ /* VSX scalar FP-integer conversion */ - {AXSCVDPSXDS, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-integer conversion, xx2-form */ + {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */ /* VSX scalar integer-FP conversion */ - {AXSCVSXDDP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar integer-fp conversion, xx2-form */ + {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */ /* VSX vector FP-integer conversion */ - {AXVCVDPSXDS, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-integer conversion, xx2-form */ + {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */ /* VSX vector integer-FP conversion */ - {AXVCVSXDDP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector integer-fp conversion, xx2-form */ + {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */ - /* 64-bit special registers */ - {AMOVD, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0}, - {AMOVD, C_REG, C_NONE, C_NONE, C_LR, 66, 4, 0}, - {AMOVD, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0}, - {AMOVD, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0}, - {AMOVD, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0}, - {AMOVD, C_LR, C_NONE, C_NONE, C_REG, 66, 4, 0}, - {AMOVD, C_CTR, C_NONE, C_NONE, C_REG, 66, 4, 0}, - {AMOVD, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0}, + {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4}, + {as: ACMP, a1: C_REG, a2: C_REG, a6: C_REG, type_: 70, size: 4}, + {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4}, + {as: ACMP, a1: C_REG, a2: C_REG, a6: C_ADDCON, type_: 71, size: 4}, + {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4}, + {as: ACMPU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 70, size: 4}, + {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4}, + {as: ACMPU, a1: C_REG, a2: C_REG, a6: C_ANDCON, type_: 71, size: 4}, + {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4}, + {as: AFCMPO, a1: C_FREG, a2: C_REG, a6: C_FREG, type_: 70, size: 4}, + {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4}, + {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4}, + {as: ADCBF, a1: C_ZOREG, type_: 43, size: 4}, + {as: ADCBF, a1: C_SOREG, type_: 43, size: 4}, + {as: ADCBF, a1: C_ZOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4}, + {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4}, + {as: AECOWX, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 44, size: 4}, + {as: AECIWX, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 45, size: 4}, + {as: AECOWX, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4}, + {as: AECIWX, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4}, + {as: ALDAR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4}, + {as: ALDAR, a1: C_ZOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4}, + {as: AEIEIO, type_: 46, size: 4}, + {as: ATLBIE, a1: C_REG, type_: 49, size: 4}, + {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4}, + {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4}, + {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4}, + {as: ASTSW, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4}, + {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4}, + {as: ALSW, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4}, + {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4}, + {as: obj.AUNDEF, type_: 78, size: 4}, + {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0}, + {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0}, + {as: obj.ANOP, type_: 0, size: 0}, + {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689 + {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior + {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0}, + {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL + {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL + {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code - /* 32-bit special registers (gloss over sign-extension or not?) */ - {AMOVW, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0}, - {AMOVW, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0}, - {AMOVW, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0}, - {AMOVW, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0}, - {AMOVW, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0}, - {AMOVWZ, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0}, - {AMOVWZ, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0}, - {AMOVWZ, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0}, - {AMOVWZ, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0}, - {AMOVWZ, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0}, - {AMOVFL, C_FPSCR, C_NONE, C_NONE, C_CREG, 73, 4, 0}, - {AMOVFL, C_CREG, C_NONE, C_NONE, C_CREG, 67, 4, 0}, - {AMOVW, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0}, - {AMOVWZ, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0}, - {AMOVFL, C_REG, C_NONE, C_NONE, C_LCON, 69, 4, 0}, - {AMOVFL, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0}, - {AMOVW, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0}, - {AMOVWZ, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0}, - {ACMP, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0}, - {ACMP, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0}, - {ACMP, C_REG, C_NONE, C_NONE, C_ADDCON, 71, 4, 0}, - {ACMP, C_REG, C_REG, C_NONE, C_ADDCON, 71, 4, 0}, - {ACMPU, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0}, - {ACMPU, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0}, - {ACMPU, C_REG, C_NONE, C_NONE, C_ANDCON, 71, 4, 0}, - {ACMPU, C_REG, C_REG, C_NONE, C_ANDCON, 71, 4, 0}, - {AFCMPO, C_FREG, C_NONE, C_NONE, C_FREG, 70, 4, 0}, - {AFCMPO, C_FREG, C_REG, C_NONE, C_FREG, 70, 4, 0}, - {ATW, C_LCON, C_REG, C_NONE, C_REG, 60, 4, 0}, - {ATW, C_LCON, C_REG, C_NONE, C_ADDCON, 61, 4, 0}, - {ADCBF, C_ZOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0}, - {ADCBF, C_SOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0}, - {ADCBF, C_ZOREG, C_REG, C_NONE, C_SCON, 43, 4, 0}, - {ADCBF, C_SOREG, C_NONE, C_NONE, C_SCON, 43, 4, 0}, - {AECOWX, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0}, - {AECIWX, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0}, - {AECOWX, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0}, - {AECIWX, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0}, - {ALDAR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0}, - {ALDAR, C_ZOREG, C_NONE, C_ANDCON, C_REG, 45, 4, 0}, - {AEIEIO, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0}, - {ATLBIE, C_REG, C_NONE, C_NONE, C_NONE, 49, 4, 0}, - {ATLBIE, C_SCON, C_NONE, C_NONE, C_REG, 49, 4, 0}, - {ASLBMFEE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0}, - {ASLBMTE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0}, - {ASTSW, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0}, - {ASTSW, C_REG, C_NONE, C_LCON, C_ZOREG, 41, 4, 0}, - {ALSW, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0}, - {ALSW, C_ZOREG, C_NONE, C_LCON, C_REG, 42, 4, 0}, - {obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, 78, 4, 0}, - {obj.APCDATA, C_LCON, C_NONE, C_NONE, C_LCON, 0, 0, 0}, - {obj.AFUNCDATA, C_SCON, C_NONE, C_NONE, C_ADDR, 0, 0, 0}, - {obj.ANOP, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0}, - {obj.ANOP, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // NOP operand variations added for #40689 - {obj.ANOP, C_REG, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // to preserve previous behavior - {obj.ANOP, C_FREG, C_NONE, C_NONE, C_NONE, 0, 0, 0}, - {obj.ADUFFZERO, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL - {obj.ADUFFCOPY, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL - {obj.APCALIGN, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // align code - - {obj.AXXX, C_NONE, C_NONE, C_NONE, C_NONE, 0, 4, 0}, + {as: obj.AXXX, type_: 0, size: 4}, } var oprange [ALAST & obj.AMask][]Optab @@ -674,6 +571,30 @@ func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int { return 0 } +// Get the implied register of a operand which doesn't specify one. These show up +// in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied, +// or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when +// generating constants in register like "MOVD $constant, Rx". +func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int { + switch oclass(a) { + case C_ADDCON, C_ANDCON, C_UCON, C_LCON, C_SCON, C_ZCON: + return REGZERO + case C_SACON, C_LACON: + return REGSP + case C_LOREG, C_SOREG, C_ZOREG: + switch a.Name { + case obj.NAME_EXTERN, obj.NAME_STATIC: + return REGSB + case obj.NAME_AUTO, obj.NAME_PARAM: + return REGSP + case obj.NAME_NONE: + return REGZERO + } + } + c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p) + return 0 +} + func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p := cursym.Func().Text if p == nil || p.Link == nil { // handle external functions and ELF section symbols @@ -864,50 +785,42 @@ func (c *ctxt9) aclass(a *obj.Addr) int { if a.Reg == REG_FPSCR { return C_FPSCR } - if a.Reg == REG_MSR { - return C_MSR - } return C_GOK case obj.TYPE_MEM: switch a.Name { + case obj.NAME_GOTREF, obj.NAME_TOCREF: + return C_ADDR + case obj.NAME_EXTERN, obj.NAME_STATIC: + c.instoffset = a.Offset if a.Sym == nil { break - } - c.instoffset = a.Offset - if a.Sym != nil { // use relocation - if a.Sym.Type == objabi.STLSBSS { - if c.ctxt.Flag_shared { - return C_TLS_IE - } else { - return C_TLS_LE - } + } else if a.Sym.Type == objabi.STLSBSS { + // For PIC builds, use 12 byte got initial-exec TLS accesses. + if c.ctxt.Flag_shared { + return C_TLS_IE } + // Otherwise, use 8 byte local-exec TLS accesses. + return C_TLS_LE + } else { return C_ADDR } - return C_LEXT - - case obj.NAME_GOTREF: - return C_GOTADDR - - case obj.NAME_TOCREF: - return C_TOCADDR case obj.NAME_AUTO: c.instoffset = int64(c.autosize) + a.Offset if c.instoffset >= -BIG && c.instoffset < BIG { - return C_SAUTO + return C_SOREG } - return C_LAUTO + return C_LOREG case obj.NAME_PARAM: c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize() if c.instoffset >= -BIG && c.instoffset < BIG { - return C_SAUTO + return C_SOREG } - return C_LAUTO + return C_LOREG case obj.NAME_NONE: c.instoffset = a.Offset @@ -958,11 +871,8 @@ func (c *ctxt9) aclass(a *obj.Addr) int { if s == nil { return C_GOK } - c.instoffset = a.Offset - - /* not sure why this barfs */ - return C_LCON + return C_LACON case obj.NAME_AUTO: c.instoffset = int64(c.autosize) + a.Offset @@ -1036,25 +946,28 @@ func (c *ctxt9) oplook(p *obj.Prog) *Optab { a1 = c.aclass(&p.From) + 1 p.From.Class = int8(a1) } - a1-- - a3 := C_NONE + 1 - if p.GetFrom3() != nil { - a3 = int(p.GetFrom3().Class) - if a3 == 0 { - a3 = c.aclass(p.GetFrom3()) + 1 - p.GetFrom3().Class = int8(a3) + + argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1} + for i, ap := range p.RestArgs { + argsv[i] = int(ap.Addr.Class) + if argsv[i] == 0 { + argsv[i] = c.aclass(&ap.Addr) + 1 + ap.Addr.Class = int8(argsv[i]) } - } - a3-- - a4 := int(p.To.Class) - if a4 == 0 { - a4 = c.aclass(&p.To) + 1 - p.To.Class = int8(a4) } + a3 := argsv[0] - 1 + a4 := argsv[1] - 1 + a5 := argsv[2] - 1 + + a6 := int(p.To.Class) + if a6 == 0 { + a6 = c.aclass(&p.To) + 1 + p.To.Class = int8(a6) + } + a6-- - a4-- a2 := C_NONE if p.Reg != 0 { if REG_R0 <= p.Reg && p.Reg <= REG_R31 { @@ -1068,20 +981,22 @@ func (c *ctxt9) oplook(p *obj.Prog) *Optab { } } - // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4) + // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6) ops := oprange[p.As&obj.AMask] c1 := &xcmp[a1] c3 := &xcmp[a3] c4 := &xcmp[a4] + c5 := &xcmp[a5] + c6 := &xcmp[a6] for i := range ops { op := &ops[i] - if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && c4[op.a4] { + if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] { p.Optab = uint16(cap(optab) - cap(ops) + i + 1) return op } } - c.ctxt.Diag("illegal combination %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4)) + c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6)) prasm(p) if ops == nil { ops = optab @@ -1134,13 +1049,13 @@ func cmp(a int, b int) bool { return true } - case C_LEXT: - if b == C_SEXT { + case C_SOREG: + if b == C_ZOREG { return true } - case C_LAUTO: - if b == C_SAUTO { + case C_LOREG: + if b == C_SOREG || b == C_ZOREG { return true } @@ -1149,16 +1064,6 @@ func cmp(a int, b int) bool { return r0iszero != 0 /*TypeKind(100016)*/ } - case C_LOREG: - if b == C_ZOREG || b == C_SOREG { - return true - } - - case C_SOREG: - if b == C_ZOREG { - return true - } - case C_ANY: return true } @@ -1211,6 +1116,14 @@ func (x ocmp) Less(i, j int) bool { if n != 0 { return n < 0 } + n = int(p1.a5) - int(p2.a5) + if n != 0 { + return n < 0 + } + n = int(p1.a6) - int(p2.a6) + if n != 0 { + return n < 0 + } return false } @@ -1985,6 +1898,9 @@ func buildop(ctxt *obj.Link) { case AFTSQRT: opset(AFTSQRT, r0) + case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */ + opset(AMOVWZ, r0) /* Same as above, but zero extended */ + case AADD, AADDIS, AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */ @@ -1992,9 +1908,6 @@ func buildop(ctxt *obj.Link) { AFMOVSX, AFMOVSZ, ALSW, - AMOVW, - /* load/store/move word with sign extension; special 32-bit move; move 32-bit literals */ - AMOVWZ, /* load/store/move word with zero extension; move 32-bit literals */ AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */ AMOVB, /* macro: move byte with sign extension */ AMOVBU, /* macro: move byte with sign extension & update */ @@ -2065,6 +1978,11 @@ func OPCC(o uint32, xo uint32, rc uint32) uint32 { return OPVCC(o, xo, 0, rc) } +/* Generate MD-form opcode */ +func OPMD(o, xo, rc uint32) uint32 { + return o<<26 | xo<<2 | rc&1 +} + /* the order is dest, a/s, b/imm for both arithmetic and logical operations */ func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 { return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 @@ -2220,15 +2138,12 @@ const ( OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0 - OP_MFMSR = 31<<26 | 83<<1 | 0<<10 | 0 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0 - OP_MTMSR = 31<<26 | 146<<1 | 0<<10 | 0 - OP_MTMSRD = 31<<26 | 178<<1 | 0<<10 | 0 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0 @@ -2305,7 +2220,7 @@ func (c *ctxt9) opform(insn uint32) int { // Encode instructions and create relocation for accessing s+d according to the // instruction op with source or destination (as appropriate) register reg. -func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32) (o1, o2 uint32) { +func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32) { if c.ctxt.Headtype == objabi.Haix { // Every symbol access must be made via a TOC anchor. c.ctxt.Diag("symbolAccess called for %s", s.Name) @@ -2317,8 +2232,15 @@ func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32) (o1, o2 } else { base = REG_R0 } - o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0) - o2 = AOP_IRR(op, uint32(reg), REGTMP, 0) + // If reg can be reused when computing the symbol address, + // use it instead of REGTMP. + if !reuse { + o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0) + o2 = AOP_IRR(op, uint32(reg), REGTMP, 0) + } else { + o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0) + o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0) + } rel := obj.Addrel(c.cursym) rel.Off = int32(c.pc) rel.Siz = 8 @@ -2455,20 +2377,6 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { case 0: /* pseudo ops */ break - case 1: /* mov r1,r2 ==> OR Rs,Rs,Ra */ - if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST { - v := c.regoff(&p.From) - if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 { - //nerrors--; - c.ctxt.Diag("literal operation on R0\n%v", p) - } - - o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v)) - break - } - - o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg)) - case 2: /* int/cr/fp op Rb,[Ra],Rd */ r := int(p.Reg) @@ -2483,7 +2391,7 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { v := int32(d) r := int(p.From.Reg) if r == 0 { - r = int(o.param) + r = c.getimpliedreg(&p.From, p) } if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) { c.ctxt.Diag("literal operation on R0\n%v", p) @@ -2556,25 +2464,13 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { r := int(p.To.Reg) if r == 0 { - r = int(o.param) + r = c.getimpliedreg(&p.To, p) } v := c.regoff(&p.To) if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 { if v != 0 { c.ctxt.Diag("illegal indexed instruction\n%v", p) } - if c.ctxt.Flag_shared && r == REG_R13 { - rel := obj.Addrel(c.cursym) - rel.Off = int32(c.pc) - rel.Siz = 4 - // This (and the matching part in the load case - // below) are the only places in the ppc64 toolchain - // that knows the name of the tls variable. Possibly - // we could add some assembly syntax so that the name - // of the variable does not have to be assumed. - rel.Sym = c.ctxt.Lookup("runtime.tls_g") - rel.Type = objabi.R_POWER_TLS - } o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r)) } else { if int32(int16(v)) != v { @@ -2588,24 +2484,17 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v)) } - case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */ + case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */ r := int(p.From.Reg) if r == 0 { - r = int(o.param) + r = c.getimpliedreg(&p.From, p) } v := c.regoff(&p.From) if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 { if v != 0 { c.ctxt.Diag("illegal indexed instruction\n%v", p) } - if c.ctxt.Flag_shared && r == REG_R13 { - rel := obj.Addrel(c.cursym) - rel.Off = int32(c.pc) - rel.Siz = 4 - rel.Sym = c.ctxt.Lookup("runtime.tls_g") - rel.Type = objabi.R_POWER_TLS - } o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r)) } else { if int32(int16(v)) != v { @@ -2619,21 +2508,7 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v)) } - case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */ - r := int(p.From.Reg) - - if r == 0 { - r = int(o.param) - } - v := c.regoff(&p.From) - if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 { - if v != 0 { - c.ctxt.Diag("illegal indexed instruction\n%v", p) - } - o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r)) - } else { - o1 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) - } + // Sign extend MOVB operations. This is ignored for other cases (o.size == 4). o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */ @@ -2676,34 +2551,35 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { } o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking - case 12: /* movb r,r (extsb); movw r,r (extsw) */ - if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST { - v := c.regoff(&p.From) - if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 { - c.ctxt.Diag("literal operation on R0\n%v", p) - } - - o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v)) + case 13: /* mov[bhwd]{z,} r,r */ + // This needs to handle "MOV* $0, Rx". This shows up because $0 also + // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON + // TODO: fix the above behavior and cleanup this exception. + if p.From.Type == obj.TYPE_CONST { + o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0) break } - - if p.As == AMOVW { - o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0) - } else { - o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0) + if p.To.Type == obj.TYPE_CONST { + c.ctxt.Diag("cannot move into constant 0\n%v", p) } - case 13: /* mov[bhw]z r,r; uses rlwinm not andi. to avoid changing CC */ - if p.As == AMOVBZ { + switch p.As { + case AMOVB: + o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0) + case AMOVBZ: o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31) - } else if p.As == AMOVH { + case AMOVH: o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0) - } else if p.As == AMOVHZ { + case AMOVHZ: o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31) - } else if p.As == AMOVWZ { + case AMOVW: + o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0) + case AMOVWZ: o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */ - } else { - c.ctxt.Diag("internal: bad mov[bhw]z\n%v", p) + case AMOVD: + o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg)) + default: + c.ctxt.Diag("internal: bad register move/truncation\n%v", p) } case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */ @@ -2883,13 +2759,8 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { case 19: /* mov $lcon,r ==> cau+or */ d := c.vregoff(&p.From) - - if p.From.Sym == nil { - o1 = loadu32(int(p.To.Reg), d) - o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d))) - } else { - o1, o2 = c.symbolAccess(p.From.Sym, d, p.To.Reg, OP_ADDI) - } + o1 = loadu32(int(p.To.Reg), d) + o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d))) case 20: /* add $ucon,,r | addis $addcon,r,r */ v := c.regoff(&p.From) @@ -3007,16 +2878,21 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { } case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */ - if p.To.Reg == REGTMP { - c.ctxt.Diag("can't synthesize large constant\n%v", p) - } - v := c.regoff(&p.From) + v := c.vregoff(&p.From) r := int(p.From.Reg) - if r == 0 { - r = int(o.param) + + switch p.From.Name { + case obj.NAME_EXTERN, obj.NAME_STATIC: + // Load a 32 bit constant, or relocation depending on if a symbol is attached + o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true) + default: + if r == 0 { + r = c.getimpliedreg(&p.From, p) + } + // Add a 32 bit offset to a register. + o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v)))) + o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v)) } - o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v))) - o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v)) case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */ v := c.regoff(p.GetFrom3()) @@ -3157,7 +3033,7 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { r := int(p.To.Reg) if r == 0 { - r = int(o.param) + r = c.getimpliedreg(&p.To, p) } // Offsets in DS form stores must be a multiple of 4 inst := c.opstore(p.As) @@ -3167,25 +3043,17 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v))) o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v)) - case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */ + case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */ v := c.regoff(&p.From) r := int(p.From.Reg) if r == 0 { - r = int(o.param) + r = c.getimpliedreg(&p.From, p) } - o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v))) - o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v)) + o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v))) + o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v)) - case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */ - v := c.regoff(&p.From) - - r := int(p.From.Reg) - if r == 0 { - r = int(o.param) - } - o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v))) - o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v)) + // Sign extend MOVB if needed o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) case 40: /* word */ @@ -3305,17 +3173,6 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { case 53: /* mffsX ,fr1 */ o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0) - case 54: /* mov msr,r1; mov r1, msr*/ - if oclass(&p.From) == C_REG { - if p.As == AMOVD { - o1 = AOP_RRR(OP_MTMSRD, uint32(p.From.Reg), 0, 0) - } else { - o1 = AOP_RRR(OP_MTMSR, uint32(p.From.Reg), 0, 0) - } - } else { - o1 = AOP_RRR(OP_MFMSR, uint32(p.To.Reg), 0, 0) - } - case 55: /* op Rb, Rd */ o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg)) @@ -3554,41 +3411,51 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { if c.opform(inst) == DS_FORM && v&0x3 != 0 { log.Fatalf("invalid offset for DS form load/store %v", p) } - o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst) + // Can't reuse base for store instructions. + o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false) - //if(dlm) reloc(&p->to, p->pc, 1); + case 75: // 32 bit offset symbol loads (got/toc/addr) + v := p.From.Offset - case 75: - v := c.vregoff(&p.From) // Offsets in DS form loads must be a multiple of 4 inst := c.opload(p.As) if c.opform(inst) == DS_FORM && v&0x3 != 0 { log.Fatalf("invalid offset for DS form load/store %v", p) } - o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst) - - //if(dlm) reloc(&p->from, p->pc, 1); - - case 76: - v := c.vregoff(&p.From) - // Offsets in DS form loads must be a multiple of 4 - inst := c.opload(p.As) - if c.opform(inst) == DS_FORM && v&0x3 != 0 { - log.Fatalf("invalid offset for DS form load/store %v", p) + switch p.From.Name { + case obj.NAME_GOTREF, obj.NAME_TOCREF: + if v != 0 { + c.ctxt.Diag("invalid offset for GOT/TOC access %v", p) + } + o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0) + o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0) + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 8 + rel.Sym = p.From.Sym + switch p.From.Name { + case obj.NAME_GOTREF: + rel.Type = objabi.R_ADDRPOWER_GOT + case obj.NAME_TOCREF: + rel.Type = objabi.R_ADDRPOWER_TOCREL_DS + } + default: + reuseBaseReg := p.As != AFMOVD && p.As != AFMOVS + // Reuse To.Reg as base register if not FP move. + o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg) } - o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst) + o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) - //if(dlm) reloc(&p->from, p->pc, 1); - case 79: if p.From.Offset != 0 { c.ctxt.Diag("invalid offset against tls var %v", p) } - o1 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGZERO, 0) + o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0) + o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0) rel := obj.Addrel(c.cursym) rel.Off = int32(c.pc) - rel.Siz = 4 + rel.Siz = 8 rel.Sym = p.From.Sym rel.Type = objabi.R_POWER_TLS_LE @@ -3598,25 +3465,18 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { } o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0) o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0) + o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13) rel := obj.Addrel(c.cursym) rel.Off = int32(c.pc) rel.Siz = 8 rel.Sym = p.From.Sym rel.Type = objabi.R_POWER_TLS_IE - - case 81: - v := c.vregoff(&p.To) - if v != 0 { - c.ctxt.Diag("invalid offset against GOT slot %v", p) - } - - o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0) - o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0) - rel := obj.Addrel(c.cursym) - rel.Off = int32(c.pc) - rel.Siz = 8 + rel = obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + 8 + rel.Siz = 4 rel.Sym = p.From.Sym - rel.Type = objabi.R_ADDRPOWER_GOT + rel.Type = objabi.R_POWER_TLS + case 82: /* vector instructions, VX-form and VC-form */ if p.From.Type == obj.TYPE_REG { /* reg reg none OR reg reg reg */ @@ -3785,26 +3645,6 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { cy := int(c.regoff(p.GetFrom3())) o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy)) - case 95: /* Retrieve TOC relative symbol */ - /* This code is for AIX only */ - v := c.vregoff(&p.From) - if v != 0 { - c.ctxt.Diag("invalid offset against TOC slot %v", p) - } - - inst := c.opload(p.As) - if c.opform(inst) != DS_FORM { - c.ctxt.Diag("invalid form for a TOC access in %v", p) - } - - o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0) - o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0) - rel := obj.Addrel(c.cursym) - rel.Off = int32(c.pc) - rel.Siz = 8 - rel.Sym = p.From.Sym - rel.Type = objabi.R_ADDRPOWER_TOCREL_DS - case 96: /* VSX load, DQ-form */ /* reg imm reg */ /* operand order: (RA)(DQ), XT */ @@ -3840,6 +3680,17 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { } case 101: o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg)) + + case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/ + mb := uint32(c.regoff(&p.RestArgs[0].Addr)) + me := uint32(c.regoff(&p.RestArgs[1].Addr)) + sh := uint32(c.regoff(&p.From)) + o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me) + + case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/ + mb := uint32(c.regoff(&p.RestArgs[0].Addr)) + me := uint32(c.regoff(&p.RestArgs[1].Addr)) + o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me) } out[0] = o1 @@ -4336,14 +4187,14 @@ func (c *ctxt9) oprrr(a obj.As) uint32 { case ARLDICLCC: return OPVCC(30, 0, 0, 1) case ARLDICR: - return OPVCC(30, 0, 0, 0) | 2<<1 // rldicr + return OPMD(30, 1, 0) // rldicr case ARLDICRCC: - return OPVCC(30, 0, 0, 1) | 2<<1 // rldicr. + return OPMD(30, 1, 1) // rldicr. case ARLDIC: - return OPVCC(30, 0, 0, 0) | 4<<1 // rldic + return OPMD(30, 2, 0) // rldic case ARLDICCC: - return OPVCC(30, 0, 0, 1) | 4<<1 // rldic. + return OPMD(30, 2, 1) // rldic. case ASYSCALL: return OPVCC(17, 1, 0, 0) @@ -5001,30 +4852,30 @@ func (c *ctxt9) opirr(a obj.As) uint32 { case ARLWMICC: return OPVCC(20, 0, 0, 1) case ARLDMI: - return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */ + return OPMD(30, 3, 0) /* rldimi */ case ARLDMICC: - return OPVCC(30, 0, 0, 1) | 3<<2 + return OPMD(30, 3, 1) /* rldimi. */ case ARLDIMI: - return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */ + return OPMD(30, 3, 0) /* rldimi */ case ARLDIMICC: - return OPVCC(30, 0, 0, 1) | 3<<2 + return OPMD(30, 3, 1) /* rldimi. */ case ARLWNM: return OPVCC(21, 0, 0, 0) /* rlwinm */ case ARLWNMCC: return OPVCC(21, 0, 0, 1) case ARLDCL: - return OPVCC(30, 0, 0, 0) /* rldicl */ + return OPMD(30, 0, 0) /* rldicl */ case ARLDCLCC: - return OPVCC(30, 0, 0, 1) + return OPMD(30, 0, 1) /* rldicl. */ case ARLDCR: - return OPVCC(30, 1, 0, 0) /* rldicr */ + return OPMD(30, 1, 0) /* rldicr */ case ARLDCRCC: - return OPVCC(30, 1, 0, 1) + return OPMD(30, 1, 1) /* rldicr. */ case ARLDC: - return OPVCC(30, 0, 0, 0) | 2<<2 + return OPMD(30, 2, 0) /* rldic */ case ARLDCCC: - return OPVCC(30, 0, 0, 1) | 2<<2 + return OPMD(30, 2, 1) /* rldic. */ case ASRAW: return OPVCC(31, 824, 0, 0) diff --git a/src/cmd/internal/obj/ppc64/obj9.go b/src/cmd/internal/obj/ppc64/obj9.go index a77be29cf09..c2722b0afb0 100644 --- a/src/cmd/internal/obj/ppc64/obj9.go +++ b/src/cmd/internal/obj/ppc64/obj9.go @@ -1081,80 +1081,65 @@ func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p.From.Reg = REG_R3 p.To.Type = obj.TYPE_REG p.To.Reg = REGSP - } else if framesize <= objabi.StackBig { - // large stack: SP-framesize < stackguard-StackSmall - // ADD $-(framesize-StackSmall), SP, R4 - // CMP stackguard, R4 - p = obj.Appendp(p, c.newprog) - - p.As = AADD - p.From.Type = obj.TYPE_CONST - p.From.Offset = -(int64(framesize) - objabi.StackSmall) - p.Reg = REGSP - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R4 - - p = obj.Appendp(p, c.newprog) - p.As = ACMPU - p.From.Type = obj.TYPE_REG - p.From.Reg = REG_R3 - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R4 } else { - // Such a large stack we need to protect against wraparound. - // If SP is close to zero: - // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall) - // The +StackGuard on both sides is required to keep the left side positive: - // SP is allowed to be slightly below stackguard. See stack.h. - // - // Preemption sets stackguard to StackPreempt, a very large value. - // That breaks the math above, so we have to check for that explicitly. - // // stackguard is R3 - // CMP R3, $StackPreempt - // BEQ label-of-call-to-morestack - // ADD $StackGuard, SP, R4 - // SUB R3, R4 - // MOVD $(framesize+(StackGuard-StackSmall)), R31 - // CMPU R31, R4 + // large stack: SP-framesize < stackguard-StackSmall + offset := int64(framesize) - objabi.StackSmall + if framesize > objabi.StackBig { + // Such a large stack we need to protect against underflow. + // The runtime guarantees SP > objabi.StackBig, but + // framesize is large enough that SP-framesize may + // underflow, causing a direct comparison with the + // stack guard to incorrectly succeed. We explicitly + // guard against underflow. + // + // CMPU SP, $(framesize-StackSmall) + // BLT label-of-call-to-morestack + if offset <= 0xffff { + p = obj.Appendp(p, c.newprog) + p.As = ACMPU + p.From.Type = obj.TYPE_REG + p.From.Reg = REGSP + p.To.Type = obj.TYPE_CONST + p.To.Offset = offset + } else { + // Constant is too big for CMPU. + p = obj.Appendp(p, c.newprog) + p.As = AMOVD + p.From.Type = obj.TYPE_CONST + p.From.Offset = offset + p.To.Type = obj.TYPE_REG + p.To.Reg = REG_R4 + + p = obj.Appendp(p, c.newprog) + p.As = ACMPU + p.From.Type = obj.TYPE_REG + p.From.Reg = REGSP + p.To.Type = obj.TYPE_REG + p.To.Reg = REG_R4 + } + + p = obj.Appendp(p, c.newprog) + q = p + p.As = ABLT + p.To.Type = obj.TYPE_BRANCH + } + + // Check against the stack guard. We've ensured this won't underflow. + // ADD $-(framesize-StackSmall), SP, R4 + // CMPU stackguard, R4 p = obj.Appendp(p, c.newprog) - p.As = ACMP - p.From.Type = obj.TYPE_REG - p.From.Reg = REG_R3 - p.To.Type = obj.TYPE_CONST - p.To.Offset = objabi.StackPreempt - - p = obj.Appendp(p, c.newprog) - q = p - p.As = ABEQ - p.To.Type = obj.TYPE_BRANCH - - p = obj.Appendp(p, c.newprog) p.As = AADD p.From.Type = obj.TYPE_CONST - p.From.Offset = int64(objabi.StackGuard) + p.From.Offset = -offset p.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R4 - p = obj.Appendp(p, c.newprog) - p.As = ASUB - p.From.Type = obj.TYPE_REG - p.From.Reg = REG_R3 - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R4 - - p = obj.Appendp(p, c.newprog) - p.As = AMOVD - p.From.Type = obj.TYPE_CONST - p.From.Offset = int64(framesize) + int64(objabi.StackGuard) - objabi.StackSmall - p.To.Type = obj.TYPE_REG - p.To.Reg = REGTMP - p = obj.Appendp(p, c.newprog) p.As = ACMPU p.From.Type = obj.TYPE_REG - p.From.Reg = REGTMP + p.From.Reg = REG_R3 p.To.Type = obj.TYPE_REG p.To.Reg = REG_R4 } diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go index d104f1cfa51..a305edab4b3 100644 --- a/src/cmd/internal/obj/riscv/obj.go +++ b/src/cmd/internal/obj/riscv/obj.go @@ -151,6 +151,15 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { case ASBREAK: // SBREAK is the old name for EBREAK. p.As = AEBREAK + + case AMOV: + // Put >32-bit constants in memory and load them. + if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE && p.From.Reg == 0 && int64(int32(p.From.Offset)) != p.From.Offset { + p.From.Type = obj.TYPE_MEM + p.From.Sym = ctxt.Int64Sym(p.From.Offset) + p.From.Name = obj.NAME_EXTERN + p.From.Offset = 0 + } } } @@ -302,7 +311,10 @@ func rewriteMOV(ctxt *obj.Link, newprog obj.ProgAlloc, p *obj.Prog) { // LUI top20bits(c), R // ADD bottom12bits(c), R, R if p.As != AMOV { - ctxt.Diag("unsupported constant load at %v", p) + ctxt.Diag("%v: unsupported constant load", p) + } + if p.To.Type != obj.TYPE_REG { + ctxt.Diag("%v: constant load must target register", p) } off := p.From.Offset to := p.To @@ -972,8 +984,9 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, cursym *obj.LSym, newprog obj.ProgA var to_done, to_more *obj.Prog if framesize <= objabi.StackSmall { - // small stack: SP < stackguard - // BLTU SP, stackguard, done + // small stack + // // if SP > stackguard { goto done } + // BLTU stackguard, SP, done p = obj.Appendp(p, newprog) p.As = ABLTU p.From.Type = obj.TYPE_REG @@ -981,80 +994,48 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, cursym *obj.LSym, newprog obj.ProgA p.Reg = REG_SP p.To.Type = obj.TYPE_BRANCH to_done = p - } else if framesize <= objabi.StackBig { - // large stack: SP-framesize < stackguard-StackSmall - // ADD $-(framesize-StackSmall), SP, X11 - // BLTU X11, stackguard, done - p = obj.Appendp(p, newprog) - // TODO(sorear): logic inconsistent with comment, but both match all non-x86 arches - p.As = AADDI - p.From.Type = obj.TYPE_CONST - p.From.Offset = -(int64(framesize) - objabi.StackSmall) - p.Reg = REG_SP - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_X11 - - p = obj.Appendp(p, newprog) - p.As = ABLTU - p.From.Type = obj.TYPE_REG - p.From.Reg = REG_X10 - p.Reg = REG_X11 - p.To.Type = obj.TYPE_BRANCH - to_done = p } else { - // Such a large stack we need to protect against wraparound. - // If SP is close to zero: - // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall) - // The +StackGuard on both sides is required to keep the left side positive: - // SP is allowed to be slightly below stackguard. See stack.h. - // - // Preemption sets stackguard to StackPreempt, a very large value. - // That breaks the math above, so we have to check for that explicitly. - // // stackguard is X10 - // MOV $StackPreempt, X11 - // BEQ X10, X11, more - // ADD $StackGuard, SP, X11 - // SUB X10, X11 - // MOV $(framesize+(StackGuard-StackSmall)), X10 - // BGTU X11, X10, done - p = obj.Appendp(p, newprog) - p.As = AMOV - p.From.Type = obj.TYPE_CONST - p.From.Offset = objabi.StackPreempt - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_X11 + // large stack: SP-framesize < stackguard-StackSmall + offset := int64(framesize) - objabi.StackSmall + if framesize > objabi.StackBig { + // Such a large stack we need to protect against underflow. + // The runtime guarantees SP > objabi.StackBig, but + // framesize is large enough that SP-framesize may + // underflow, causing a direct comparison with the + // stack guard to incorrectly succeed. We explicitly + // guard against underflow. + // + // MOV $(framesize-StackSmall), X11 + // BLTU SP, X11, label-of-call-to-morestack - p = obj.Appendp(p, newprog) - to_more = p - p.As = ABEQ - p.From.Type = obj.TYPE_REG - p.From.Reg = REG_X10 - p.Reg = REG_X11 - p.To.Type = obj.TYPE_BRANCH + p = obj.Appendp(p, newprog) + p.As = AMOV + p.From.Type = obj.TYPE_CONST + p.From.Offset = offset + p.To.Type = obj.TYPE_REG + p.To.Reg = REG_X11 + p = obj.Appendp(p, newprog) + p.As = ABLTU + p.From.Type = obj.TYPE_REG + p.From.Reg = REG_SP + p.Reg = REG_X11 + p.To.Type = obj.TYPE_BRANCH + to_more = p + } + + // Check against the stack guard. We've ensured this won't underflow. + // ADD $-(framesize-StackSmall), SP, X11 + // // if X11 > stackguard { goto done } + // BLTU stackguard, X11, done p = obj.Appendp(p, newprog) p.As = AADDI p.From.Type = obj.TYPE_CONST - p.From.Offset = int64(objabi.StackGuard) + p.From.Offset = -offset p.Reg = REG_SP p.To.Type = obj.TYPE_REG p.To.Reg = REG_X11 - p = obj.Appendp(p, newprog) - p.As = ASUB - p.From.Type = obj.TYPE_REG - p.From.Reg = REG_X10 - p.Reg = REG_X11 - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_X11 - - p = obj.Appendp(p, newprog) - p.As = AMOV - p.From.Type = obj.TYPE_CONST - p.From.Offset = int64(framesize) + int64(objabi.StackGuard) - objabi.StackSmall - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_X10 - p = obj.Appendp(p, newprog) p.As = ABLTU p.From.Type = obj.TYPE_REG diff --git a/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.go b/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.go index 279aeb2c32b..de412c64a78 100644 --- a/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.go +++ b/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.go @@ -25,84 +25,73 @@ func testBLTU(a, b int64) (r bool) func testBLTZ(a int64) (r bool) func testBNEZ(a int64) (r bool) +func testGoBGE(a, b int64) bool { return a >= b } +func testGoBGEU(a, b int64) bool { return uint64(a) >= uint64(b) } +func testGoBGT(a, b int64) bool { return a > b } +func testGoBGTU(a, b int64) bool { return uint64(a) > uint64(b) } +func testGoBLE(a, b int64) bool { return a <= b } +func testGoBLEU(a, b int64) bool { return uint64(a) <= uint64(b) } +func testGoBLT(a, b int64) bool { return a < b } +func testGoBLTZ(a, b int64) bool { return uint64(a) < uint64(b) } + func TestBranchCondition(t *testing.T) { tests := []struct { ins string a int64 b int64 fn func(a, b int64) bool + goFn func(a, b int64) bool want bool }{ - {"BGE", 0, 1, testBGE, false}, - {"BGE", 0, 0, testBGE, true}, - {"BGE", 0, -1, testBGE, true}, - {"BGE", -1, 0, testBGE, false}, - {"BGE", 1, 0, testBGE, true}, - {"BGEU", 0, 1, testBGEU, false}, - {"BGEU", 0, 0, testBGEU, true}, - {"BGEU", 0, -1, testBGEU, false}, - {"BGEU", -1, 0, testBGEU, true}, - {"BGEU", 1, 0, testBGEU, true}, - {"BGT", 0, 1, testBGT, false}, - {"BGT", 0, 0, testBGT, false}, - {"BGT", 0, -1, testBGT, true}, - {"BGT", -1, 0, testBGT, false}, - {"BGT", 1, 0, testBGT, true}, - {"BGTU", 0, 1, testBGTU, false}, - {"BGTU", 0, 0, testBGTU, false}, - {"BGTU", 0, -1, testBGTU, false}, - {"BGTU", -1, 0, testBGTU, true}, - {"BGTU", 1, 0, testBGTU, true}, - {"BLE", 0, 1, testBLE, true}, - {"BLE", 0, 0, testBLE, true}, - {"BLE", 0, -1, testBLE, false}, - {"BLE", -1, 0, testBLE, true}, - {"BLE", 1, 0, testBLE, false}, - {"BLEU", 0, 1, testBLEU, true}, - {"BLEU", 0, 0, testBLEU, true}, - {"BLEU", 0, -1, testBLEU, true}, - {"BLEU", -1, 0, testBLEU, false}, - {"BLEU", 1, 0, testBLEU, false}, - {"BLT", 0, 1, testBLT, true}, - {"BLT", 0, 0, testBLT, false}, - {"BLT", 0, -1, testBLT, false}, - {"BLT", -1, 0, testBLT, true}, - {"BLT", 1, 0, testBLT, false}, - {"BLTU", 0, 1, testBLTU, true}, - {"BLTU", 0, 0, testBLTU, false}, - {"BLTU", 0, -1, testBLTU, true}, - {"BLTU", -1, 0, testBLTU, false}, - {"BLTU", 1, 0, testBLTU, false}, + {"BGE", 0, 1, testBGE, testGoBGE, false}, + {"BGE", 0, 0, testBGE, testGoBGE, true}, + {"BGE", 0, -1, testBGE, testGoBGE, true}, + {"BGE", -1, 0, testBGE, testGoBGE, false}, + {"BGE", 1, 0, testBGE, testGoBGE, true}, + {"BGEU", 0, 1, testBGEU, testGoBGEU, false}, + {"BGEU", 0, 0, testBGEU, testGoBGEU, true}, + {"BGEU", 0, -1, testBGEU, testGoBGEU, false}, + {"BGEU", -1, 0, testBGEU, testGoBGEU, true}, + {"BGEU", 1, 0, testBGEU, testGoBGEU, true}, + {"BGT", 0, 1, testBGT, testGoBGT, false}, + {"BGT", 0, 0, testBGT, testGoBGT, false}, + {"BGT", 0, -1, testBGT, testGoBGT, true}, + {"BGT", -1, 0, testBGT, testGoBGT, false}, + {"BGT", 1, 0, testBGT, testGoBGT, true}, + {"BGTU", 0, 1, testBGTU, testGoBGTU, false}, + {"BGTU", 0, 0, testBGTU, testGoBGTU, false}, + {"BGTU", 0, -1, testBGTU, testGoBGTU, false}, + {"BGTU", -1, 0, testBGTU, testGoBGTU, true}, + {"BGTU", 1, 0, testBGTU, testGoBGTU, true}, + {"BLE", 0, 1, testBLE, testGoBLE, true}, + {"BLE", 0, 0, testBLE, testGoBLE, true}, + {"BLE", 0, -1, testBLE, testGoBLE, false}, + {"BLE", -1, 0, testBLE, testGoBLE, true}, + {"BLE", 1, 0, testBLE, testGoBLE, false}, + {"BLEU", 0, 1, testBLEU, testGoBLEU, true}, + {"BLEU", 0, 0, testBLEU, testGoBLEU, true}, + {"BLEU", 0, -1, testBLEU, testGoBLEU, true}, + {"BLEU", -1, 0, testBLEU, testGoBLEU, false}, + {"BLEU", 1, 0, testBLEU, testGoBLEU, false}, + {"BLT", 0, 1, testBLT, testGoBLT, true}, + {"BLT", 0, 0, testBLT, testGoBLT, false}, + {"BLT", 0, -1, testBLT, testGoBLT, false}, + {"BLT", -1, 0, testBLT, testGoBLT, true}, + {"BLT", 1, 0, testBLT, testGoBLT, false}, + {"BLTU", 0, 1, testBLTU, testGoBLTU, true}, + {"BLTU", 0, 0, testBLTU, testGoBLTU, false}, + {"BLTU", 0, -1, testBLTU, testGoBLTU, true}, + {"BLTU", -1, 0, testBLTU, testGoBLTU, false}, + {"BLTU", 1, 0, testBLTU, testGoBLTU, false}, } for _, test := range tests { t.Run(test.ins, func(t *testing.T) { - var fn func(a, b int64) bool - switch test.ins { - case "BGE": - fn = func(a, b int64) bool { return a >= b } - case "BGEU": - fn = func(a, b int64) bool { return uint64(a) >= uint64(b) } - case "BGT": - fn = func(a, b int64) bool { return a > b } - case "BGTU": - fn = func(a, b int64) bool { return uint64(a) > uint64(b) } - case "BLE": - fn = func(a, b int64) bool { return a <= b } - case "BLEU": - fn = func(a, b int64) bool { return uint64(a) <= uint64(b) } - case "BLT": - fn = func(a, b int64) bool { return a < b } - case "BLTU": - fn = func(a, b int64) bool { return uint64(a) < uint64(b) } - default: - t.Fatalf("Unknown instruction %q", test.ins) - } - if got := fn(test.a, test.b); got != test.want { - t.Errorf("Go %v %v, %v = %v, want %v", test.ins, test.a, test.b, got, test.want) - } if got := test.fn(test.a, test.b); got != test.want { t.Errorf("Assembly %v %v, %v = %v, want %v", test.ins, test.a, test.b, got, test.want) } + if got := test.goFn(test.a, test.b); got != test.want { + t.Errorf("Go %v %v, %v = %v, want %v", test.ins, test.a, test.b, got, test.want) + } }) } } diff --git a/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.s b/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.s index 8dd6f563af6..cce296feb5c 100644 --- a/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.s +++ b/src/cmd/internal/obj/riscv/testdata/testbranch/branch_test.s @@ -7,7 +7,7 @@ #include "textflag.h" // func testBEQZ(a int64) (r bool) -TEXT ·testBEQZ(SB),NOSPLIT,$0-0 +TEXT ·testBEQZ(SB),NOSPLIT,$0-9 MOV a+0(FP), X5 MOV $1, X6 BEQZ X5, b @@ -17,7 +17,7 @@ b: RET // func testBGE(a, b int64) (r bool) -TEXT ·testBGE(SB),NOSPLIT,$0-0 +TEXT ·testBGE(SB),NOSPLIT,$0-17 MOV a+0(FP), X5 MOV b+8(FP), X6 MOV $1, X7 @@ -28,7 +28,7 @@ b: RET // func testBGEU(a, b int64) (r bool) -TEXT ·testBGEU(SB),NOSPLIT,$0-0 +TEXT ·testBGEU(SB),NOSPLIT,$0-17 MOV a+0(FP), X5 MOV b+8(FP), X6 MOV $1, X7 @@ -39,7 +39,7 @@ b: RET // func testBGEZ(a int64) (r bool) -TEXT ·testBGEZ(SB),NOSPLIT,$0-0 +TEXT ·testBGEZ(SB),NOSPLIT,$0-9 MOV a+0(FP), X5 MOV $1, X6 BGEZ X5, b @@ -49,7 +49,7 @@ b: RET // func testBGT(a, b int64) (r bool) -TEXT ·testBGT(SB),NOSPLIT,$0-0 +TEXT ·testBGT(SB),NOSPLIT,$0-17 MOV a+0(FP), X5 MOV b+8(FP), X6 MOV $1, X7 @@ -60,7 +60,7 @@ b: RET // func testBGTU(a, b int64) (r bool) -TEXT ·testBGTU(SB),NOSPLIT,$0-0 +TEXT ·testBGTU(SB),NOSPLIT,$0-17 MOV a+0(FP), X5 MOV b+8(FP), X6 MOV $1, X7 @@ -71,7 +71,7 @@ b: RET // func testBGTZ(a int64) (r bool) -TEXT ·testBGTZ(SB),NOSPLIT,$0-0 +TEXT ·testBGTZ(SB),NOSPLIT,$0-9 MOV a+0(FP), X5 MOV $1, X6 BGTZ X5, b @@ -81,7 +81,7 @@ b: RET // func testBLE(a, b int64) (r bool) -TEXT ·testBLE(SB),NOSPLIT,$0-0 +TEXT ·testBLE(SB),NOSPLIT,$0-17 MOV a+0(FP), X5 MOV b+8(FP), X6 MOV $1, X7 @@ -92,7 +92,7 @@ b: RET // func testBLEU(a, b int64) (r bool) -TEXT ·testBLEU(SB),NOSPLIT,$0-0 +TEXT ·testBLEU(SB),NOSPLIT,$0-17 MOV a+0(FP), X5 MOV b+8(FP), X6 MOV $1, X7 @@ -103,7 +103,7 @@ b: RET // func testBLEZ(a int64) (r bool) -TEXT ·testBLEZ(SB),NOSPLIT,$0-0 +TEXT ·testBLEZ(SB),NOSPLIT,$0-9 MOV a+0(FP), X5 MOV $1, X6 BLEZ X5, b @@ -113,7 +113,7 @@ b: RET // func testBLT(a, b int64) (r bool) -TEXT ·testBLT(SB),NOSPLIT,$0-0 +TEXT ·testBLT(SB),NOSPLIT,$0-17 MOV a+0(FP), X5 MOV b+8(FP), X6 MOV $1, X7 @@ -124,7 +124,7 @@ b: RET // func testBLTU(a, b int64) (r bool) -TEXT ·testBLTU(SB),NOSPLIT,$0-0 +TEXT ·testBLTU(SB),NOSPLIT,$0-17 MOV a+0(FP), X5 MOV b+8(FP), X6 MOV $1, X7 @@ -135,7 +135,7 @@ b: RET // func testBLTZ(a int64) (r bool) -TEXT ·testBLTZ(SB),NOSPLIT,$0-0 +TEXT ·testBLTZ(SB),NOSPLIT,$0-9 MOV a+0(FP), X5 MOV $1, X6 BLTZ X5, b @@ -145,7 +145,7 @@ b: RET // func testBNEZ(a int64) (r bool) -TEXT ·testBNEZ(SB),NOSPLIT,$0-0 +TEXT ·testBNEZ(SB),NOSPLIT,$0-9 MOV a+0(FP), X5 MOV $1, X6 BNEZ X5, b diff --git a/src/cmd/internal/obj/s390x/objz.go b/src/cmd/internal/obj/s390x/objz.go index a02c4fc17f4..201163b0159 100644 --- a/src/cmd/internal/obj/s390x/objz.go +++ b/src/cmd/internal/obj/s390x/objz.go @@ -568,7 +568,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } func (c *ctxtz) stacksplitPre(p *obj.Prog, framesize int32) (*obj.Prog, *obj.Prog) { - var q *obj.Prog // MOVD g_stackguard(g), R3 p = obj.Appendp(p, c.newprog) @@ -589,98 +588,69 @@ func (c *ctxtz) stacksplitPre(p *obj.Prog, framesize int32) (*obj.Prog, *obj.Pro // unnecessarily. See issue #35470. p = c.ctxt.StartUnsafePoint(p, c.newprog) - q = nil if framesize <= objabi.StackSmall { // small stack: SP < stackguard // CMPUBGE stackguard, SP, label-of-call-to-morestack p = obj.Appendp(p, c.newprog) - //q1 = p p.From.Type = obj.TYPE_REG p.From.Reg = REG_R3 p.Reg = REGSP p.As = ACMPUBGE p.To.Type = obj.TYPE_BRANCH - } else if framesize <= objabi.StackBig { - // large stack: SP-framesize < stackguard-StackSmall - // ADD $-(framesize-StackSmall), SP, R4 - // CMPUBGE stackguard, R4, label-of-call-to-morestack - p = obj.Appendp(p, c.newprog) + return p, nil + } - p.As = AADD - p.From.Type = obj.TYPE_CONST - p.From.Offset = -(int64(framesize) - objabi.StackSmall) - p.Reg = REGSP - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R4 + // large stack: SP-framesize < stackguard-StackSmall - p = obj.Appendp(p, c.newprog) - p.From.Type = obj.TYPE_REG - p.From.Reg = REG_R3 - p.Reg = REG_R4 - p.As = ACMPUBGE - p.To.Type = obj.TYPE_BRANCH - - } else { - // Such a large stack we need to protect against wraparound. - // If SP is close to zero: - // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall) - // The +StackGuard on both sides is required to keep the left side positive: - // SP is allowed to be slightly below stackguard. See stack.h. + var q *obj.Prog + offset := int64(framesize) - objabi.StackSmall + if framesize > objabi.StackBig { + // Such a large stack we need to protect against underflow. + // The runtime guarantees SP > objabi.StackBig, but + // framesize is large enough that SP-framesize may + // underflow, causing a direct comparison with the + // stack guard to incorrectly succeed. We explicitly + // guard against underflow. // - // Preemption sets stackguard to StackPreempt, a very large value. - // That breaks the math above, so we have to check for that explicitly. - // // stackguard is R3 - // CMP R3, $StackPreempt - // BEQ label-of-call-to-morestack - // ADD $StackGuard, SP, R4 - // SUB R3, R4 - // MOVD $(framesize+(StackGuard-StackSmall)), TEMP - // CMPUBGE TEMP, R4, label-of-call-to-morestack - p = obj.Appendp(p, c.newprog) - - p.As = ACMP - p.From.Type = obj.TYPE_REG - p.From.Reg = REG_R3 - p.To.Type = obj.TYPE_CONST - p.To.Offset = objabi.StackPreempt - - p = obj.Appendp(p, c.newprog) - q = p - p.As = ABEQ - p.To.Type = obj.TYPE_BRANCH - - p = obj.Appendp(p, c.newprog) - p.As = AADD - p.From.Type = obj.TYPE_CONST - p.From.Offset = int64(objabi.StackGuard) - p.Reg = REGSP - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R4 - - p = obj.Appendp(p, c.newprog) - p.As = ASUB - p.From.Type = obj.TYPE_REG - p.From.Reg = REG_R3 - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R4 + // MOVD $(framesize-StackSmall), R4 + // CMPUBLT SP, R4, label-of-call-to-morestack p = obj.Appendp(p, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_CONST - p.From.Offset = int64(framesize) + int64(objabi.StackGuard) - objabi.StackSmall + p.From.Offset = offset p.To.Type = obj.TYPE_REG - p.To.Reg = REGTMP + p.To.Reg = REG_R4 p = obj.Appendp(p, c.newprog) + q = p + p.As = ACMPUBLT p.From.Type = obj.TYPE_REG - p.From.Reg = REGTMP + p.From.Reg = REGSP p.Reg = REG_R4 - p.As = ACMPUBGE p.To.Type = obj.TYPE_BRANCH } + // Check against the stack guard. We've ensured this won't underflow. + // ADD $-(framesize-StackSmall), SP, R4 + // CMPUBGE stackguard, R4, label-of-call-to-morestack + p = obj.Appendp(p, c.newprog) + p.As = AADD + p.From.Type = obj.TYPE_CONST + p.From.Offset = -offset + p.Reg = REGSP + p.To.Type = obj.TYPE_REG + p.To.Reg = REG_R4 + + p = obj.Appendp(p, c.newprog) + p.From.Type = obj.TYPE_REG + p.From.Reg = REG_R3 + p.Reg = REG_R4 + p.As = ACMPUBGE + p.To.Type = obj.TYPE_BRANCH + return p, q } diff --git a/src/cmd/internal/obj/stringer.go b/src/cmd/internal/obj/stringer.go index f67b89091c1..a4d507d49aa 100644 --- a/src/cmd/internal/obj/stringer.go +++ b/src/cmd/internal/obj/stringer.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build ignore // +build ignore // This is a mini version of the stringer tool customized for the Anames table diff --git a/src/cmd/internal/obj/sym.go b/src/cmd/internal/obj/sym.go index 4515bdd0d3a..9e8b4dd790f 100644 --- a/src/cmd/internal/obj/sym.go +++ b/src/cmd/internal/obj/sym.go @@ -35,6 +35,7 @@ import ( "cmd/internal/goobj" "cmd/internal/objabi" "fmt" + "internal/buildcfg" "log" "math" "sort" @@ -49,15 +50,15 @@ func Linknew(arch *LinkArch) *Link { ctxt.Arch = arch ctxt.Pathname = objabi.WorkingDir() - if err := ctxt.Headtype.Set(objabi.GOOS); err != nil { - log.Fatalf("unknown goos %s", objabi.GOOS) + if err := ctxt.Headtype.Set(buildcfg.GOOS); err != nil { + log.Fatalf("unknown goos %s", buildcfg.GOOS) } ctxt.Flag_optimize = true return ctxt } -// LookupDerived looks up or creates the symbol with name name derived from symbol s. +// LookupDerived looks up or creates the symbol with name derived from symbol s. // The resulting symbol will be static iff s is. func (ctxt *Link) LookupDerived(s *LSym, name string) *LSym { if s.Static() { diff --git a/src/cmd/internal/obj/textflag.go b/src/cmd/internal/obj/textflag.go index 2f55793285e..881e1922031 100644 --- a/src/cmd/internal/obj/textflag.go +++ b/src/cmd/internal/obj/textflag.go @@ -27,7 +27,8 @@ const ( // This data contains no pointers. NOPTR = 16 - // This is a wrapper function and should not count as disabling 'recover'. + // This is a wrapper function and should not count as + // disabling 'recover' or appear in tracebacks by default. WRAPPER = 32 // This function uses its incoming context register. diff --git a/src/cmd/internal/obj/util.go b/src/cmd/internal/obj/util.go index 1c34b4e833e..e8441a69694 100644 --- a/src/cmd/internal/obj/util.go +++ b/src/cmd/internal/obj/util.go @@ -8,6 +8,7 @@ import ( "bytes" "cmd/internal/objabi" "fmt" + "internal/buildcfg" "io" "strings" ) @@ -83,7 +84,7 @@ func CConv(s uint8) string { } for i := range opSuffixSpace { sset := &opSuffixSpace[i] - if sset.arch == objabi.GOARCH { + if sset.arch == buildcfg.GOARCH { return sset.cconv(s) } } @@ -330,7 +331,7 @@ func writeDconv(w io.Writer, p *Prog, a *Addr, abiDetail bool) { case TYPE_SHIFT: v := int(a.Offset) ops := "<<>>->@>" - switch objabi.GOARCH { + switch buildcfg.GOARCH { case "arm": op := ops[((v>>5)&3)<<1:] if v&(1<<4) != 0 { @@ -346,7 +347,7 @@ func writeDconv(w io.Writer, p *Prog, a *Addr, abiDetail bool) { r := (v >> 16) & 31 fmt.Fprintf(w, "%s%c%c%d", Rconv(r+RBaseARM64), op[0], op[1], (v>>10)&63) default: - panic("TYPE_SHIFT is not supported on " + objabi.GOARCH) + panic("TYPE_SHIFT is not supported on " + buildcfg.GOARCH) } case TYPE_REGREG: diff --git a/src/cmd/internal/obj/wasm/wasmobj.go b/src/cmd/internal/obj/wasm/wasmobj.go index 2e9890d86c0..ceeae7a257c 100644 --- a/src/cmd/internal/obj/wasm/wasmobj.go +++ b/src/cmd/internal/obj/wasm/wasmobj.go @@ -144,11 +144,9 @@ func instinit(ctxt *obj.Link) { gcWriteBarrier = ctxt.LookupABI("runtime.gcWriteBarrier", obj.ABIInternal) sigpanic = ctxt.LookupABI("runtime.sigpanic", obj.ABIInternal) deferreturn = ctxt.LookupABI("runtime.deferreturn", obj.ABIInternal) - // jmpdefer is defined in assembly as ABI0, but what we're - // looking for is the *call* to jmpdefer from the Go function - // deferreturn, so we're looking for the ABIInternal version - // of jmpdefer that's called by Go. - jmpdefer = ctxt.LookupABI(`"".jmpdefer`, obj.ABIInternal) + // jmpdefer is defined in assembly as ABI0. The compiler will + // generate a direct ABI0 call from Go, so look for that. + jmpdefer = ctxt.LookupABI(`"".jmpdefer`, obj.ABI0) } func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { diff --git a/src/cmd/internal/obj/x86/a.out.go b/src/cmd/internal/obj/x86/a.out.go index 3be4b59da46..b121f6df7b2 100644 --- a/src/cmd/internal/obj/x86/a.out.go +++ b/src/cmd/internal/obj/x86/a.out.go @@ -258,23 +258,25 @@ const ( REG_DR = REG_DR0 REG_TR = REG_TR0 - REGARG = -1 - REGRET = REG_AX - FREGRET = REG_X0 - REGSP = REG_SP - REGCTXT = REG_DX - REGG = REG_R14 // g register in ABIInternal - REGEXT = REG_R15 // compiler allocates external registers R15 down - FREGMIN = REG_X0 + 5 // first register variable - FREGEXT = REG_X0 + 15 // first external register - T_TYPE = 1 << 0 - T_INDEX = 1 << 1 - T_OFFSET = 1 << 2 - T_FCONST = 1 << 3 - T_SYM = 1 << 4 - T_SCONST = 1 << 5 - T_64 = 1 << 6 - T_GOTYPE = 1 << 7 + REGARG = -1 + REGRET = REG_AX + FREGRET = REG_X0 + REGSP = REG_SP + REGCTXT = REG_DX + REGENTRYTMP0 = REG_R12 // scratch register available at function entry in ABIInternal + REGENTRYTMP1 = REG_R13 // scratch register available at function entry in ABIInternal + REGG = REG_R14 // g register in ABIInternal + REGEXT = REG_R15 // compiler allocates external registers R15 down + FREGMIN = REG_X0 + 5 // first register variable + FREGEXT = REG_X0 + 15 // first external register + T_TYPE = 1 << 0 + T_INDEX = 1 << 1 + T_OFFSET = 1 << 2 + T_FCONST = 1 << 3 + T_SYM = 1 << 4 + T_SCONST = 1 << 5 + T_64 = 1 << 6 + T_GOTYPE = 1 << 7 ) // https://www.uclibc.org/docs/psABI-x86_64.pdf, figure 3.36 diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go index a6b85ac4a06..17fa76727e6 100644 --- a/src/cmd/internal/obj/x86/asm6.go +++ b/src/cmd/internal/obj/x86/asm6.go @@ -36,6 +36,7 @@ import ( "cmd/internal/sys" "encoding/binary" "fmt" + "internal/buildcfg" "log" "strings" ) @@ -1887,7 +1888,7 @@ func lookForJCC(p *obj.Prog) *obj.Prog { func fusedJump(p *obj.Prog) (bool, uint8) { var fusedSize uint8 - // The first instruction in a macro fused pair may be preceeded by the LOCK prefix, + // The first instruction in a macro fused pair may be preceded by the LOCK prefix, // or possibly an XACQUIRE/XRELEASE prefix followed by a LOCK prefix. If it is, we // need to be careful to insert any padding before the locks rather than directly after them. @@ -2460,7 +2461,7 @@ func instinit(ctxt *obj.Link) { } } -var isAndroid = objabi.GOOS == "android" +var isAndroid = buildcfg.GOOS == "android" func prefixof(ctxt *obj.Link, a *obj.Addr) int { if a.Reg < REG_CS && a.Index < REG_CS { // fast path @@ -5306,7 +5307,7 @@ bad: } } - ctxt.Diag("invalid instruction: %v", p) + ctxt.Diag("%s: invalid instruction: %v", cursym.Name, p) } // byteswapreg returns a byte-addressable register (AX, BX, CX, DX) diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go index bc3a3b4bbe7..e2732d53e30 100644 --- a/src/cmd/internal/obj/x86/obj6.go +++ b/src/cmd/internal/obj/x86/obj6.go @@ -35,8 +35,10 @@ import ( "cmd/internal/objabi" "cmd/internal/src" "cmd/internal/sys" + "internal/buildcfg" "log" "math" + "path" "strings" ) @@ -135,7 +137,7 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { p.To.Index = REG_NONE } } else { - // load_g_cx, below, always inserts the 1-instruction sequence. Rewrite it + // load_g, below, always inserts the 1-instruction sequence. Rewrite it // as the 2-instruction sequence if necessary. // MOVQ 0(TLS), BX // becomes @@ -563,6 +565,11 @@ func rewriteToPcrel(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { obj.Nopout(p) } +// Prog.mark +const ( + markBit = 1 << 0 // used in errorCheck to avoid duplicate work +) + func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { if cursym.Func().Text == nil || cursym.Func().Text.Link == nil { return @@ -639,15 +646,25 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } var regg int16 - if !p.From.Sym.NoSplit() || (p.From.Sym.Wrapper() && !p.From.Sym.ABIWrapper()) { - if ctxt.Arch.Family == sys.AMD64 && objabi.Regabi_enabled != 0 && cursym.ABI() == obj.ABIInternal { + if !p.From.Sym.NoSplit() || p.From.Sym.Wrapper() { + if ctxt.Arch.Family == sys.AMD64 && buildcfg.Experiment.RegabiG && cursym.ABI() == obj.ABIInternal { regg = REGG // use the g register directly in ABIInternal } else { p = obj.Appendp(p, newprog) - p = load_g_cx(ctxt, p, newprog) // load g into CX regg = REG_CX + if ctxt.Arch.Family == sys.AMD64 { + // Using this register means that stacksplit works w/ //go:registerparams even when !buildcfg.Experiment.RegabiG + regg = REGG // == REG_R14 + } + p = load_g(ctxt, p, newprog, regg) // load g into regg } } + var regEntryTmp0, regEntryTmp1 int16 + if ctxt.Arch.Family == sys.AMD64 { + regEntryTmp0, regEntryTmp1 = REGENTRYTMP0, REGENTRYTMP1 + } else { + regEntryTmp0, regEntryTmp1 = REG_BX, REG_DI + } if !cursym.Func().Text.From.Sym.NoSplit() { p = stacksplit(ctxt, cursym, p, newprog, autoffset, int32(textarg), regg) // emit split check @@ -697,22 +714,22 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p.To.Reg = REG_BP } - if cursym.Func().Text.From.Sym.Wrapper() && !cursym.Func().Text.From.Sym.ABIWrapper() { + if cursym.Func().Text.From.Sym.Wrapper() { // if g._panic != nil && g._panic.argp == FP { // g._panic.argp = bottom-of-frame // } // - // MOVQ g_panic(g), BX - // TESTQ BX, BX + // MOVQ g_panic(g), regEntryTmp0 + // TESTQ regEntryTmp0, regEntryTmp0 // JNE checkargp // end: // NOP // ... rest of function ... // checkargp: - // LEAQ (autoffset+8)(SP), DI - // CMPQ panic_argp(BX), DI + // LEAQ (autoffset+8)(SP), regEntryTmp1 + // CMPQ panic_argp(regEntryTmp0), regEntryTmp1 // JNE end - // MOVQ SP, panic_argp(BX) + // MOVQ SP, panic_argp(regEntryTmp0) // JMP end // // The NOP is needed to give the jumps somewhere to land. @@ -721,25 +738,25 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { // The layout is chosen to help static branch prediction: // Both conditional jumps are unlikely, so they are arranged to be forward jumps. - // MOVQ g_panic(CX), BX + // MOVQ g_panic(g), regEntryTmp0 p = obj.Appendp(p, newprog) p.As = AMOVQ p.From.Type = obj.TYPE_MEM p.From.Reg = regg p.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // g_panic p.To.Type = obj.TYPE_REG - p.To.Reg = REG_BX + p.To.Reg = regEntryTmp0 if ctxt.Arch.Family == sys.I386 { p.As = AMOVL } - // TESTQ BX, BX + // TESTQ regEntryTmp0, regEntryTmp0 p = obj.Appendp(p, newprog) p.As = ATESTQ p.From.Type = obj.TYPE_REG - p.From.Reg = REG_BX + p.From.Reg = regEntryTmp0 p.To.Type = obj.TYPE_REG - p.To.Reg = REG_BX + p.To.Reg = regEntryTmp0 if ctxt.Arch.Family == sys.I386 { p.As = ATESTL } @@ -759,14 +776,14 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { for last = end; last.Link != nil; last = last.Link { } - // LEAQ (autoffset+8)(SP), DI + // LEAQ (autoffset+8)(SP), regEntryTmp1 p = obj.Appendp(last, newprog) p.As = ALEAQ p.From.Type = obj.TYPE_MEM p.From.Reg = REG_SP p.From.Offset = int64(autoffset) + int64(ctxt.Arch.RegSize) p.To.Type = obj.TYPE_REG - p.To.Reg = REG_DI + p.To.Reg = regEntryTmp1 if ctxt.Arch.Family == sys.I386 { p.As = ALEAL } @@ -774,14 +791,14 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { // Set jne branch target. jne.To.SetTarget(p) - // CMPQ panic_argp(BX), DI + // CMPQ panic_argp(regEntryTmp0), regEntryTmp1 p = obj.Appendp(p, newprog) p.As = ACMPQ p.From.Type = obj.TYPE_MEM - p.From.Reg = REG_BX + p.From.Reg = regEntryTmp0 p.From.Offset = 0 // Panic.argp p.To.Type = obj.TYPE_REG - p.To.Reg = REG_DI + p.To.Reg = regEntryTmp1 if ctxt.Arch.Family == sys.I386 { p.As = ACMPL } @@ -792,13 +809,13 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p.To.Type = obj.TYPE_BRANCH p.To.SetTarget(end) - // MOVQ SP, panic_argp(BX) + // MOVQ SP, panic_argp(regEntryTmp0) p = obj.Appendp(p, newprog) p.As = AMOVQ p.From.Type = obj.TYPE_REG p.From.Reg = REG_SP p.To.Type = obj.TYPE_MEM - p.To.Reg = REG_BX + p.To.Reg = regEntryTmp0 p.To.Offset = 0 // Panic.argp if ctxt.Arch.Family == sys.I386 { p.As = AMOVL @@ -896,7 +913,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } if autoffset != deltasp { - ctxt.Diag("unbalanced PUSH/POP") + ctxt.Diag("%s: unbalanced PUSH/POP", cursym) } if autoffset != 0 { @@ -963,7 +980,7 @@ func indir_cx(ctxt *obj.Link, a *obj.Addr) { // Overwriting p is unusual but it lets use this in both the // prologue (caller must call appendp first) and in the epilogue. // Returns last new instruction. -func load_g_cx(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) *obj.Prog { +func load_g(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, rg int16) *obj.Prog { p.As = AMOVQ if ctxt.Arch.PtrSize == 4 { p.As = AMOVL @@ -972,7 +989,7 @@ func load_g_cx(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) *obj.Prog { p.From.Reg = REG_TLS p.From.Offset = 0 p.To.Type = obj.TYPE_REG - p.To.Reg = REG_CX + p.To.Reg = rg next := p.Link progedit(ctxt, p, newprog) @@ -1005,6 +1022,12 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA sub = ASUBL } + tmp := int16(REG_AX) // use AX for 32-bit + if ctxt.Arch.Family == sys.AMD64 { + // Avoid register parameters. + tmp = int16(REGENTRYTMP0) + } + var q1 *obj.Prog if framesize <= objabi.StackSmall { // small stack: SP <= stackguard @@ -1028,8 +1051,8 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA p = ctxt.StartUnsafePoint(p, newprog) } else if framesize <= objabi.StackBig { // large stack: SP-framesize <= stackguard-StackSmall - // LEAQ -xxx(SP), AX - // CMPQ AX, stackguard + // LEAQ -xxx(SP), tmp + // CMPQ tmp, stackguard p = obj.Appendp(p, newprog) p.As = lea @@ -1037,12 +1060,12 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA p.From.Reg = REG_SP p.From.Offset = -(int64(framesize) - objabi.StackSmall) p.To.Type = obj.TYPE_REG - p.To.Reg = REG_AX + p.To.Reg = tmp p = obj.Appendp(p, newprog) p.As = cmp p.From.Type = obj.TYPE_REG - p.From.Reg = REG_AX + p.From.Reg = tmp p.To.Type = obj.TYPE_MEM p.To.Reg = rg p.To.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 @@ -1052,71 +1075,51 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA p = ctxt.StartUnsafePoint(p, newprog) // see the comment above } else { - // Such a large stack we need to protect against wraparound. - // If SP is close to zero: - // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall) - // The +StackGuard on both sides is required to keep the left side positive: - // SP is allowed to be slightly below stackguard. See stack.h. + // Such a large stack we need to protect against underflow. + // The runtime guarantees SP > objabi.StackBig, but + // framesize is large enough that SP-framesize may + // underflow, causing a direct comparison with the + // stack guard to incorrectly succeed. We explicitly + // guard against underflow. // - // Preemption sets stackguard to StackPreempt, a very large value. - // That breaks the math above, so we have to check for that explicitly. - // MOVQ stackguard, SI - // CMPQ SI, $StackPreempt - // JEQ label-of-call-to-morestack - // LEAQ StackGuard(SP), AX - // SUBQ SI, AX - // CMPQ AX, $(framesize+(StackGuard-StackSmall)) + // MOVQ SP, tmp + // SUBQ $(framesize - StackSmall), tmp + // // If subtraction wrapped (carry set), morestack. + // JCS label-of-call-to-morestack + // CMPQ tmp, stackguard p = obj.Appendp(p, newprog) p.As = mov - p.From.Type = obj.TYPE_MEM - p.From.Reg = rg - p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 - if cursym.CFunc() { - p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 - } + p.From.Type = obj.TYPE_REG + p.From.Reg = REG_SP p.To.Type = obj.TYPE_REG - p.To.Reg = REG_SI + p.To.Reg = tmp p = ctxt.StartUnsafePoint(p, newprog) // see the comment above p = obj.Appendp(p, newprog) - p.As = cmp - p.From.Type = obj.TYPE_REG - p.From.Reg = REG_SI - p.To.Type = obj.TYPE_CONST - p.To.Offset = objabi.StackPreempt - if ctxt.Arch.Family == sys.I386 { - p.To.Offset = int64(uint32(objabi.StackPreempt & (1<<32 - 1))) - } + p.As = sub + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(framesize) - objabi.StackSmall + p.To.Type = obj.TYPE_REG + p.To.Reg = tmp p = obj.Appendp(p, newprog) - p.As = AJEQ + p.As = AJCS p.To.Type = obj.TYPE_BRANCH q1 = p - p = obj.Appendp(p, newprog) - p.As = lea - p.From.Type = obj.TYPE_MEM - p.From.Reg = REG_SP - p.From.Offset = int64(objabi.StackGuard) - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_AX - - p = obj.Appendp(p, newprog) - p.As = sub - p.From.Type = obj.TYPE_REG - p.From.Reg = REG_SI - p.To.Type = obj.TYPE_REG - p.To.Reg = REG_AX - p = obj.Appendp(p, newprog) p.As = cmp p.From.Type = obj.TYPE_REG - p.From.Reg = REG_AX - p.To.Type = obj.TYPE_CONST - p.To.Offset = int64(framesize) + (int64(objabi.StackGuard) - objabi.StackSmall) + p.From.Reg = tmp + p.To.Type = obj.TYPE_MEM + p.To.Reg = rg + p.To.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 + if cursym.CFunc() { + p.To.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 + } } // common @@ -1139,7 +1142,7 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA pcdata := ctxt.EmitEntryStackMap(cursym, spfix, newprog) spill := ctxt.StartUnsafePoint(pcdata, newprog) - pcdata = ctxt.SpillRegisterArgs(spill, newprog) + pcdata = cursym.Func().SpillRegisterArgs(spill, newprog) call := obj.Appendp(pcdata, newprog) call.Pos = cursym.Func().Text.Pos @@ -1164,7 +1167,7 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA progedit(ctxt, callend.Link, newprog) } - pcdata = ctxt.UnspillRegisterArgs(callend, newprog) + pcdata = cursym.Func().UnspillRegisterArgs(callend, newprog) pcdata = ctxt.EndUnsafePoint(pcdata, newprog, -1) jmp := obj.Appendp(pcdata, newprog) @@ -1181,6 +1184,114 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA return end } +func isR15(r int16) bool { + return r == REG_R15 || r == REG_R15B +} +func addrMentionsR15(a *obj.Addr) bool { + if a == nil { + return false + } + return isR15(a.Reg) || isR15(a.Index) +} +func progMentionsR15(p *obj.Prog) bool { + return addrMentionsR15(&p.From) || addrMentionsR15(&p.To) || isR15(p.Reg) || addrMentionsR15(p.GetFrom3()) +} + +// progOverwritesR15 reports whether p writes to R15 and does not depend on +// the previous value of R15. +func progOverwritesR15(p *obj.Prog) bool { + if !(p.To.Type == obj.TYPE_REG && isR15(p.To.Reg)) { + // Not writing to R15. + return false + } + if (p.As == AXORL || p.As == AXORQ) && p.From.Type == obj.TYPE_REG && isR15(p.From.Reg) { + // These look like uses of R15, but aren't, so we must detect these + // before the use check below. + return true + } + if addrMentionsR15(&p.From) || isR15(p.Reg) || addrMentionsR15(p.GetFrom3()) { + // use before overwrite + return false + } + if p.As == AMOVL || p.As == AMOVQ || p.As == APOPQ { + return true + // TODO: MOVB might be ok if we only ever use R15B. + } + return false +} + +func addrUsesGlobal(a *obj.Addr) bool { + if a == nil { + return false + } + return a.Name == obj.NAME_EXTERN && !a.Sym.Local() +} +func progUsesGlobal(p *obj.Prog) bool { + if p.As == obj.ACALL || p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ARET || p.As == obj.AJMP { + // These opcodes don't use a GOT to access their argument (see rewriteToUseGot), + // or R15 would be dead at them anyway. + return false + } + if p.As == ALEAQ { + // The GOT entry is placed directly in the destination register; R15 is not used. + return false + } + return addrUsesGlobal(&p.From) || addrUsesGlobal(&p.To) || addrUsesGlobal(p.GetFrom3()) +} + +func errorCheck(ctxt *obj.Link, s *obj.LSym) { + // When dynamic linking, R15 is used to access globals. Reject code that + // uses R15 after a global variable access. + if !ctxt.Flag_dynlink { + return + } + + // Flood fill all the instructions where R15's value is junk. + // If there are any uses of R15 in that set, report an error. + var work []*obj.Prog + var mentionsR15 bool + for p := s.Func().Text; p != nil; p = p.Link { + if progUsesGlobal(p) { + work = append(work, p) + p.Mark |= markBit + } + if progMentionsR15(p) { + mentionsR15 = true + } + } + if mentionsR15 { + for len(work) > 0 { + p := work[len(work)-1] + work = work[:len(work)-1] + if q := p.To.Target(); q != nil && q.Mark&markBit == 0 { + q.Mark |= markBit + work = append(work, q) + } + if p.As == obj.AJMP || p.As == obj.ARET { + continue // no fallthrough + } + if progMentionsR15(p) { + if progOverwritesR15(p) { + // R15 is overwritten by this instruction. Its value is not junk any more. + continue + } + pos := ctxt.PosTable.Pos(p.Pos) + ctxt.Diag("%s:%s: when dynamic linking, R15 is clobbered by a global variable access and is used here: %v", path.Base(pos.Filename()), pos.LineNumber(), p) + break // only report one error + } + if q := p.Link; q != nil && q.Mark&markBit == 0 { + q.Mark |= markBit + work = append(work, q) + } + } + } + + // Clean up. + for p := s.Func().Text; p != nil; p = p.Link { + p.Mark &^= markBit + } +} + var unaryDst = map[obj.As]bool{ ABSWAPL: true, ABSWAPQ: true, @@ -1269,6 +1380,7 @@ var unaryDst = map[obj.As]bool{ var Linkamd64 = obj.LinkArch{ Arch: sys.ArchAMD64, Init: instinit, + ErrorCheck: errorCheck, Preprocess: preprocess, Assemble: span6, Progedit: progedit, diff --git a/src/cmd/internal/objabi/flag.go b/src/cmd/internal/objabi/flag.go index 3fd73f3c576..e41fc570b08 100644 --- a/src/cmd/internal/objabi/flag.go +++ b/src/cmd/internal/objabi/flag.go @@ -8,6 +8,7 @@ import ( "bytes" "flag" "fmt" + "internal/buildcfg" "io" "io/ioutil" "log" @@ -91,16 +92,18 @@ func (versionFlag) Set(s string) error { name = name[strings.LastIndex(name, `\`)+1:] name = strings.TrimSuffix(name, ".exe") - // If there's an active experiment, include that, - // to distinguish go1.10.2 with an experiment - // from go1.10.2 without an experiment. - p := Expstring() - if p == DefaultExpstring() { - p = "" - } - sep := "" - if p != "" { - sep = " " + p := "" + + if s == "goexperiment" { + // test/run.go uses this to discover the full set of + // experiment tags. Report everything. + p = " X:" + strings.Join(buildcfg.AllExperiments(), ",") + } else { + // If the enabled experiments differ from the defaults, + // include that difference. + if goexperiment := buildcfg.GOEXPERIMENT(); goexperiment != "" { + p = " X:" + goexperiment + } } // The go command invokes -V=full to get a unique identifier @@ -109,12 +112,12 @@ func (versionFlag) Set(s string) error { // build ID of the binary, so that if the compiler is changed and // rebuilt, we notice and rebuild all packages. if s == "full" { - if strings.HasPrefix(Version, "devel") { + if strings.HasPrefix(buildcfg.Version, "devel") { p += " buildID=" + buildID } } - fmt.Printf("%s version %s%s%s\n", name, Version, sep, p) + fmt.Printf("%s version %s%s\n", name, buildcfg.Version, p) os.Exit(0) return nil } diff --git a/src/cmd/internal/objabi/funcdata.go b/src/cmd/internal/objabi/funcdata.go index faa2863325d..4ff0ebe13d8 100644 --- a/src/cmd/internal/objabi/funcdata.go +++ b/src/cmd/internal/objabi/funcdata.go @@ -20,6 +20,7 @@ const ( FUNCDATA_StackObjects = 2 FUNCDATA_InlTree = 3 FUNCDATA_OpenCodedDeferInfo = 4 + FUNCDATA_ArgInfo = 5 // ArgsSizeUnknown is set in Func.argsize to mark all functions // whose argument size is unknown (C vararg functions, and diff --git a/src/cmd/internal/objabi/funcid.go b/src/cmd/internal/objabi/funcid.go index 6e188e31bb4..93ebd7be943 100644 --- a/src/cmd/internal/objabi/funcid.go +++ b/src/cmd/internal/objabi/funcid.go @@ -24,11 +24,11 @@ type FuncID uint8 // Note: this list must match the list in runtime/symtab.go. const ( FuncID_normal FuncID = iota // not a special function + FuncID_abort FuncID_asmcgocall FuncID_asyncPreempt FuncID_cgocallback - FuncID_debugCallV1 - FuncID_externalthreadhandler + FuncID_debugCallV2 FuncID_gcBgMarkWorker FuncID_goexit FuncID_gogo @@ -49,32 +49,33 @@ const ( ) var funcIDs = map[string]FuncID{ - "asmcgocall": FuncID_asmcgocall, - "asyncPreempt": FuncID_asyncPreempt, - "cgocallback": FuncID_cgocallback, - "debugCallV1": FuncID_debugCallV1, - "externalthreadhandler": FuncID_externalthreadhandler, - "gcBgMarkWorker": FuncID_gcBgMarkWorker, - "go": FuncID_rt0_go, - "goexit": FuncID_goexit, - "gogo": FuncID_gogo, - "gopanic": FuncID_gopanic, - "handleAsyncEvent": FuncID_handleAsyncEvent, - "jmpdefer": FuncID_jmpdefer, - "main": FuncID_runtime_main, - "mcall": FuncID_mcall, - "morestack": FuncID_morestack, - "mstart": FuncID_mstart, - "panicwrap": FuncID_panicwrap, - "runfinq": FuncID_runfinq, - "sigpanic": FuncID_sigpanic, - "switch": FuncID_systemstack_switch, - "systemstack": FuncID_systemstack, + "abort": FuncID_abort, + "asmcgocall": FuncID_asmcgocall, + "asyncPreempt": FuncID_asyncPreempt, + "cgocallback": FuncID_cgocallback, + "debugCallV2": FuncID_debugCallV2, + "gcBgMarkWorker": FuncID_gcBgMarkWorker, + "go": FuncID_rt0_go, + "goexit": FuncID_goexit, + "gogo": FuncID_gogo, + "gopanic": FuncID_gopanic, + "handleAsyncEvent": FuncID_handleAsyncEvent, + "jmpdefer": FuncID_jmpdefer, + "main": FuncID_runtime_main, + "mcall": FuncID_mcall, + "morestack": FuncID_morestack, + "mstart": FuncID_mstart, + "panicwrap": FuncID_panicwrap, + "runfinq": FuncID_runfinq, + "sigpanic": FuncID_sigpanic, + "switch": FuncID_systemstack_switch, + "systemstack": FuncID_systemstack, // Don't show in call stack but otherwise not special. "deferreturn": FuncID_wrapper, "runOpenDeferFrame": FuncID_wrapper, "reflectcallSave": FuncID_wrapper, + "deferCallSave": FuncID_wrapper, } // Get the function ID for the named function in the named file. diff --git a/src/cmd/internal/objabi/line.go b/src/cmd/internal/objabi/line.go index 0733b65138d..0b1e0bb181c 100644 --- a/src/cmd/internal/objabi/line.go +++ b/src/cmd/internal/objabi/line.go @@ -5,6 +5,7 @@ package objabi import ( + "internal/buildcfg" "os" "path/filepath" "strings" @@ -38,8 +39,8 @@ func AbsFile(dir, file, rewrites string) string { } abs, rewritten := ApplyRewrites(abs, rewrites) - if !rewritten && hasPathPrefix(abs, GOROOT) { - abs = "$GOROOT" + abs[len(GOROOT):] + if !rewritten && hasPathPrefix(abs, buildcfg.GOROOT) { + abs = "$GOROOT" + abs[len(buildcfg.GOROOT):] } if abs == "" { diff --git a/src/cmd/internal/objabi/path.go b/src/cmd/internal/objabi/path.go index 1a0784cf7f0..aacab9a0ca7 100644 --- a/src/cmd/internal/objabi/path.go +++ b/src/cmd/internal/objabi/path.go @@ -47,6 +47,8 @@ func PathToPrefix(s string) string { // some cases need to be aware of when they are building such a // package, for example to enable features such as ABI selectors in // assembly sources. +// +// Keep in sync with cmd/dist/build.go:IsRuntimePackagePath. func IsRuntimePackagePath(pkgpath string) bool { rval := false switch pkgpath { @@ -56,7 +58,7 @@ func IsRuntimePackagePath(pkgpath string) bool { rval = true case "syscall": rval = true - case "crypto/x509/internal/macos": // libc function wrappers need to be ABIInternal + case "internal/bytealg": rval = true default: rval = strings.HasPrefix(pkgpath, "runtime/internal") diff --git a/src/cmd/internal/objabi/reloctype.go b/src/cmd/internal/objabi/reloctype.go index 649f6901944..ea55fa3b0ad 100644 --- a/src/cmd/internal/objabi/reloctype.go +++ b/src/cmd/internal/objabi/reloctype.go @@ -50,11 +50,6 @@ const ( // R_ADDROFF resolves to a 32-bit offset from the beginning of the section // holding the data being relocated to the referenced symbol. R_ADDROFF - // R_WEAKADDROFF resolves just like R_ADDROFF but is a weak relocation. - // A weak relocation does not make the symbol it refers to reachable, - // and is only honored by the linker if the symbol is in some other way - // reachable. - R_WEAKADDROFF R_SIZE R_CALL R_CALLARM @@ -172,8 +167,8 @@ const ( // R_POWER_TLS_LE is used to implement the "local exec" model for tls // access. It resolves to the offset of the thread-local symbol from the - // thread pointer (R13) and inserts this value into the low 16 bits of an - // instruction word. + // thread pointer (R13) and is split against a pair of instructions to + // support a 32 bit displacement. R_POWER_TLS_LE // R_POWER_TLS_IE is used to implement the "initial exec" model for tls access. It @@ -183,10 +178,12 @@ const ( // symbol from the thread pointer (R13)). R_POWER_TLS_IE - // R_POWER_TLS marks an X-form instruction such as "MOVD 0(R13)(R31*1), g" as - // accessing a particular thread-local symbol. It does not affect code generation - // but is used by the system linker when relaxing "initial exec" model code to - // "local exec" model code. + // R_POWER_TLS marks an X-form instruction such as "ADD R3,R13,R4" as completing + // a sequence of GOT-relative relocations to compute a TLS address. This can be + // used by the system linker to to rewrite the GOT-relative TLS relocation into a + // simpler thread-pointer relative relocation. See table 3.26 and 3.28 in the + // ppc64 elfv2 1.4 ABI on this transformation. Likewise, the second argument + // (usually called RB in X-form instructions) is assumed to be R13. R_POWER_TLS // R_ADDRPOWER_DS is similar to R_ADDRPOWER above, but assumes the second @@ -256,6 +253,15 @@ const ( // of a symbol. This isn't a real relocation, it can be placed in anywhere // in a symbol and target any symbols. R_XCOFFREF + + // R_WEAK marks the relocation as a weak reference. + // A weak relocation does not make the symbol it refers to reachable, + // and is only honored by the linker if the symbol is in some other way + // reachable. + R_WEAK = -1 << 15 + + R_WEAKADDR = R_WEAK | R_ADDR + R_WEAKADDROFF = R_WEAK | R_ADDROFF ) // IsDirectCall reports whether r is a relocation for a direct call. diff --git a/src/cmd/internal/objabi/reloctype_string.go b/src/cmd/internal/objabi/reloctype_string.go index 658a44f8b81..8882d19f883 100644 --- a/src/cmd/internal/objabi/reloctype_string.go +++ b/src/cmd/internal/objabi/reloctype_string.go @@ -13,66 +13,65 @@ func _() { _ = x[R_ADDRARM64-3] _ = x[R_ADDRMIPS-4] _ = x[R_ADDROFF-5] - _ = x[R_WEAKADDROFF-6] - _ = x[R_SIZE-7] - _ = x[R_CALL-8] - _ = x[R_CALLARM-9] - _ = x[R_CALLARM64-10] - _ = x[R_CALLIND-11] - _ = x[R_CALLPOWER-12] - _ = x[R_CALLMIPS-13] - _ = x[R_CALLRISCV-14] - _ = x[R_CONST-15] - _ = x[R_PCREL-16] - _ = x[R_TLS_LE-17] - _ = x[R_TLS_IE-18] - _ = x[R_GOTOFF-19] - _ = x[R_PLT0-20] - _ = x[R_PLT1-21] - _ = x[R_PLT2-22] - _ = x[R_USEFIELD-23] - _ = x[R_USETYPE-24] - _ = x[R_USEIFACE-25] - _ = x[R_USEIFACEMETHOD-26] - _ = x[R_METHODOFF-27] - _ = x[R_POWER_TOC-28] - _ = x[R_GOTPCREL-29] - _ = x[R_JMPMIPS-30] - _ = x[R_DWARFSECREF-31] - _ = x[R_DWARFFILEREF-32] - _ = x[R_ARM64_TLS_LE-33] - _ = x[R_ARM64_TLS_IE-34] - _ = x[R_ARM64_GOTPCREL-35] - _ = x[R_ARM64_GOT-36] - _ = x[R_ARM64_PCREL-37] - _ = x[R_ARM64_LDST8-38] - _ = x[R_ARM64_LDST16-39] - _ = x[R_ARM64_LDST32-40] - _ = x[R_ARM64_LDST64-41] - _ = x[R_ARM64_LDST128-42] - _ = x[R_POWER_TLS_LE-43] - _ = x[R_POWER_TLS_IE-44] - _ = x[R_POWER_TLS-45] - _ = x[R_ADDRPOWER_DS-46] - _ = x[R_ADDRPOWER_GOT-47] - _ = x[R_ADDRPOWER_PCREL-48] - _ = x[R_ADDRPOWER_TOCREL-49] - _ = x[R_ADDRPOWER_TOCREL_DS-50] - _ = x[R_RISCV_PCREL_ITYPE-51] - _ = x[R_RISCV_PCREL_STYPE-52] - _ = x[R_RISCV_TLS_IE_ITYPE-53] - _ = x[R_RISCV_TLS_IE_STYPE-54] - _ = x[R_PCRELDBL-55] - _ = x[R_ADDRMIPSU-56] - _ = x[R_ADDRMIPSTLS-57] - _ = x[R_ADDRCUOFF-58] - _ = x[R_WASMIMPORT-59] - _ = x[R_XCOFFREF-60] + _ = x[R_SIZE-6] + _ = x[R_CALL-7] + _ = x[R_CALLARM-8] + _ = x[R_CALLARM64-9] + _ = x[R_CALLIND-10] + _ = x[R_CALLPOWER-11] + _ = x[R_CALLMIPS-12] + _ = x[R_CALLRISCV-13] + _ = x[R_CONST-14] + _ = x[R_PCREL-15] + _ = x[R_TLS_LE-16] + _ = x[R_TLS_IE-17] + _ = x[R_GOTOFF-18] + _ = x[R_PLT0-19] + _ = x[R_PLT1-20] + _ = x[R_PLT2-21] + _ = x[R_USEFIELD-22] + _ = x[R_USETYPE-23] + _ = x[R_USEIFACE-24] + _ = x[R_USEIFACEMETHOD-25] + _ = x[R_METHODOFF-26] + _ = x[R_POWER_TOC-27] + _ = x[R_GOTPCREL-28] + _ = x[R_JMPMIPS-29] + _ = x[R_DWARFSECREF-30] + _ = x[R_DWARFFILEREF-31] + _ = x[R_ARM64_TLS_LE-32] + _ = x[R_ARM64_TLS_IE-33] + _ = x[R_ARM64_GOTPCREL-34] + _ = x[R_ARM64_GOT-35] + _ = x[R_ARM64_PCREL-36] + _ = x[R_ARM64_LDST8-37] + _ = x[R_ARM64_LDST16-38] + _ = x[R_ARM64_LDST32-39] + _ = x[R_ARM64_LDST64-40] + _ = x[R_ARM64_LDST128-41] + _ = x[R_POWER_TLS_LE-42] + _ = x[R_POWER_TLS_IE-43] + _ = x[R_POWER_TLS-44] + _ = x[R_ADDRPOWER_DS-45] + _ = x[R_ADDRPOWER_GOT-46] + _ = x[R_ADDRPOWER_PCREL-47] + _ = x[R_ADDRPOWER_TOCREL-48] + _ = x[R_ADDRPOWER_TOCREL_DS-49] + _ = x[R_RISCV_PCREL_ITYPE-50] + _ = x[R_RISCV_PCREL_STYPE-51] + _ = x[R_RISCV_TLS_IE_ITYPE-52] + _ = x[R_RISCV_TLS_IE_STYPE-53] + _ = x[R_PCRELDBL-54] + _ = x[R_ADDRMIPSU-55] + _ = x[R_ADDRMIPSTLS-56] + _ = x[R_ADDRCUOFF-57] + _ = x[R_WASMIMPORT-58] + _ = x[R_XCOFFREF-59] } -const _RelocType_name = "R_ADDRR_ADDRPOWERR_ADDRARM64R_ADDRMIPSR_ADDROFFR_WEAKADDROFFR_SIZER_CALLR_CALLARMR_CALLARM64R_CALLINDR_CALLPOWERR_CALLMIPSR_CALLRISCVR_CONSTR_PCRELR_TLS_LER_TLS_IER_GOTOFFR_PLT0R_PLT1R_PLT2R_USEFIELDR_USETYPER_USEIFACER_USEIFACEMETHODR_METHODOFFR_POWER_TOCR_GOTPCRELR_JMPMIPSR_DWARFSECREFR_DWARFFILEREFR_ARM64_TLS_LER_ARM64_TLS_IER_ARM64_GOTPCRELR_ARM64_GOTR_ARM64_PCRELR_ARM64_LDST8R_ARM64_LDST16R_ARM64_LDST32R_ARM64_LDST64R_ARM64_LDST128R_POWER_TLS_LER_POWER_TLS_IER_POWER_TLSR_ADDRPOWER_DSR_ADDRPOWER_GOTR_ADDRPOWER_PCRELR_ADDRPOWER_TOCRELR_ADDRPOWER_TOCREL_DSR_RISCV_PCREL_ITYPER_RISCV_PCREL_STYPER_RISCV_TLS_IE_ITYPER_RISCV_TLS_IE_STYPER_PCRELDBLR_ADDRMIPSUR_ADDRMIPSTLSR_ADDRCUOFFR_WASMIMPORTR_XCOFFREF" +const _RelocType_name = "R_ADDRR_ADDRPOWERR_ADDRARM64R_ADDRMIPSR_ADDROFFR_SIZER_CALLR_CALLARMR_CALLARM64R_CALLINDR_CALLPOWERR_CALLMIPSR_CALLRISCVR_CONSTR_PCRELR_TLS_LER_TLS_IER_GOTOFFR_PLT0R_PLT1R_PLT2R_USEFIELDR_USETYPER_USEIFACER_USEIFACEMETHODR_METHODOFFR_POWER_TOCR_GOTPCRELR_JMPMIPSR_DWARFSECREFR_DWARFFILEREFR_ARM64_TLS_LER_ARM64_TLS_IER_ARM64_GOTPCRELR_ARM64_GOTR_ARM64_PCRELR_ARM64_LDST8R_ARM64_LDST16R_ARM64_LDST32R_ARM64_LDST64R_ARM64_LDST128R_POWER_TLS_LER_POWER_TLS_IER_POWER_TLSR_ADDRPOWER_DSR_ADDRPOWER_GOTR_ADDRPOWER_PCRELR_ADDRPOWER_TOCRELR_ADDRPOWER_TOCREL_DSR_RISCV_PCREL_ITYPER_RISCV_PCREL_STYPER_RISCV_TLS_IE_ITYPER_RISCV_TLS_IE_STYPER_PCRELDBLR_ADDRMIPSUR_ADDRMIPSTLSR_ADDRCUOFFR_WASMIMPORTR_XCOFFREF" -var _RelocType_index = [...]uint16{0, 6, 17, 28, 38, 47, 60, 66, 72, 81, 92, 101, 112, 122, 133, 140, 147, 155, 163, 171, 177, 183, 189, 199, 208, 218, 234, 245, 256, 266, 275, 288, 302, 316, 330, 346, 357, 370, 383, 397, 411, 425, 440, 454, 468, 479, 493, 508, 525, 543, 564, 583, 602, 622, 642, 652, 663, 676, 687, 699, 709} +var _RelocType_index = [...]uint16{0, 6, 17, 28, 38, 47, 53, 59, 68, 79, 88, 99, 109, 120, 127, 134, 142, 150, 158, 164, 170, 176, 186, 195, 205, 221, 232, 243, 253, 262, 275, 289, 303, 317, 333, 344, 357, 370, 384, 398, 412, 427, 441, 455, 466, 480, 495, 512, 530, 551, 570, 589, 609, 629, 639, 650, 663, 674, 686, 696} func (i RelocType) String() string { i -= 1 diff --git a/src/cmd/internal/objabi/stack.go b/src/cmd/internal/objabi/stack.go index 05a1d4a4b58..0c82a7c6dd1 100644 --- a/src/cmd/internal/objabi/stack.go +++ b/src/cmd/internal/objabi/stack.go @@ -4,6 +4,8 @@ package objabi +import "internal/buildcfg" + // For the linkers. Must match Go definitions. const ( @@ -13,10 +15,6 @@ const ( StackSmall = 128 ) -const ( - StackPreempt = -1314 // 0xfff...fade -) - // Initialize StackGuard and StackLimit according to target system. var StackGuard = 928*stackGuardMultiplier() + StackSystem var StackLimit = StackGuard - StackSystem - StackSmall @@ -26,7 +24,7 @@ var StackLimit = StackGuard - StackSystem - StackSmall // builds that have larger stack frames or for specific targets. func stackGuardMultiplier() int { // On AIX, a larger stack is needed for syscalls. - if GOOS == "aix" { + if buildcfg.GOOS == "aix" { return 2 } return stackGuardMultiplierDefault diff --git a/src/cmd/internal/objabi/util.go b/src/cmd/internal/objabi/util.go index 1f99f8ed5d6..63640950d9f 100644 --- a/src/cmd/internal/objabi/util.go +++ b/src/cmd/internal/objabi/util.go @@ -6,32 +6,9 @@ package objabi import ( "fmt" - "log" - "os" "strings" -) -func envOr(key, value string) string { - if x := os.Getenv(key); x != "" { - return x - } - return value -} - -var ( - defaultGOROOT string // set by linker - - GOROOT = envOr("GOROOT", defaultGOROOT) - GOARCH = envOr("GOARCH", defaultGOARCH) - GOOS = envOr("GOOS", defaultGOOS) - GO386 = envOr("GO386", defaultGO386) - GOARM = goarm() - GOMIPS = gomips() - GOMIPS64 = gomips64() - GOPPC64 = goppc64() - GOWASM = gowasm() - GO_LDSO = defaultGO_LDSO - Version = version + "internal/buildcfg" ) const ( @@ -39,163 +16,10 @@ const ( MachoRelocOffset = 2048 // reserve enough space for ELF relocations ) -func goarm() int { - def := defaultGOARM - if GOOS == "android" && GOARCH == "arm" { - // Android arm devices always support GOARM=7. - def = "7" - } - switch v := envOr("GOARM", def); v { - case "5": - return 5 - case "6": - return 6 - case "7": - return 7 - } - // Fail here, rather than validate at multiple call sites. - log.Fatalf("Invalid GOARM value. Must be 5, 6, or 7.") - panic("unreachable") -} - -func gomips() string { - switch v := envOr("GOMIPS", defaultGOMIPS); v { - case "hardfloat", "softfloat": - return v - } - log.Fatalf("Invalid GOMIPS value. Must be hardfloat or softfloat.") - panic("unreachable") -} - -func gomips64() string { - switch v := envOr("GOMIPS64", defaultGOMIPS64); v { - case "hardfloat", "softfloat": - return v - } - log.Fatalf("Invalid GOMIPS64 value. Must be hardfloat or softfloat.") - panic("unreachable") -} - -func goppc64() int { - switch v := envOr("GOPPC64", defaultGOPPC64); v { - case "power8": - return 8 - case "power9": - return 9 - } - log.Fatalf("Invalid GOPPC64 value. Must be power8 or power9.") - panic("unreachable") -} - -type gowasmFeatures struct { - SignExt bool - SatConv bool -} - -func (f gowasmFeatures) String() string { - var flags []string - if f.SatConv { - flags = append(flags, "satconv") - } - if f.SignExt { - flags = append(flags, "signext") - } - return strings.Join(flags, ",") -} - -func gowasm() (f gowasmFeatures) { - for _, opt := range strings.Split(envOr("GOWASM", ""), ",") { - switch opt { - case "satconv": - f.SatConv = true - case "signext": - f.SignExt = true - case "": - // ignore - default: - log.Fatalf("Invalid GOWASM value. No such feature: " + opt) - } - } - return -} - -func Getgoextlinkenabled() string { - return envOr("GO_EXTLINK_ENABLED", defaultGO_EXTLINK_ENABLED) -} - -func init() { - for _, f := range strings.Split(goexperiment, ",") { - if f != "" { - addexp(f) - } - } - - // regabi is only supported on amd64. - if GOARCH != "amd64" { - Regabi_enabled = 0 - } -} - -// Note: must agree with runtime.framepointer_enabled. -var Framepointer_enabled = GOARCH == "amd64" || GOARCH == "arm64" - -func addexp(s string) { - // Could do general integer parsing here, but the runtime copy doesn't yet. - v := 1 - name := s - if len(name) > 2 && name[:2] == "no" { - v = 0 - name = name[2:] - } - for i := 0; i < len(exper); i++ { - if exper[i].name == name { - if exper[i].val != nil { - *exper[i].val = v - } - return - } - } - - fmt.Printf("unknown experiment %s\n", s) - os.Exit(2) -} - -var ( - Fieldtrack_enabled int - Preemptibleloops_enabled int - Staticlockranking_enabled int - Regabi_enabled int -) - -// Toolchain experiments. -// These are controlled by the GOEXPERIMENT environment -// variable recorded when the toolchain is built. -// This list is also known to cmd/gc. -var exper = []struct { - name string - val *int -}{ - {"fieldtrack", &Fieldtrack_enabled}, - {"preemptibleloops", &Preemptibleloops_enabled}, - {"staticlockranking", &Staticlockranking_enabled}, - {"regabi", &Regabi_enabled}, -} - -var defaultExpstring = Expstring() - -func DefaultExpstring() string { - return defaultExpstring -} - -func Expstring() string { - buf := "X" - for i := range exper { - if *exper[i].val != 0 { - buf += "," + exper[i].name - } - } - if buf == "X" { - buf += ",none" - } - return "X:" + buf[2:] +// HeaderString returns the toolchain configuration string written in +// Go object headers. This string ensures we don't attempt to import +// or link object files that are incompatible with each other. This +// string always starts with "go object ". +func HeaderString() string { + return fmt.Sprintf("go object %s %s %s X:%s\n", buildcfg.GOOS, buildcfg.GOARCH, buildcfg.Version, strings.Join(buildcfg.EnabledExperiments(), ",")) } diff --git a/src/cmd/internal/objfile/goobj.go b/src/cmd/internal/objfile/goobj.go index f19bec5dcb2..dd21d223511 100644 --- a/src/cmd/internal/objfile/goobj.go +++ b/src/cmd/internal/objfile/goobj.go @@ -168,7 +168,7 @@ func (f *goobjFile) symbols() ([]Sym, error) { code = 'T' case objabi.SRODATA: code = 'R' - case objabi.SDATA: + case objabi.SNOPTRDATA, objabi.SDATA: code = 'D' case objabi.SBSS, objabi.SNOPTRBSS, objabi.STLSBSS: code = 'B' diff --git a/src/cmd/internal/objfile/objfile.go b/src/cmd/internal/objfile/objfile.go index a58e0e159c8..dcfd158ec20 100644 --- a/src/cmd/internal/objfile/objfile.go +++ b/src/cmd/internal/objfile/objfile.go @@ -6,6 +6,7 @@ package objfile import ( + "cmd/internal/archive" "debug/dwarf" "debug/gosym" "fmt" @@ -73,6 +74,8 @@ func Open(name string) (*File, error) { } if f, err := openGoFile(r); err == nil { return f, nil + } else if _, ok := err.(archive.ErrGoObjOtherVersion); ok { + return nil, fmt.Errorf("open %s: %v", name, err) } for _, try := range openers { if raw, err := try(r); err == nil { diff --git a/src/cmd/internal/sys/supported.go b/src/cmd/internal/sys/supported.go index ef7c017bd4a..fa477b837ff 100644 --- a/src/cmd/internal/sys/supported.go +++ b/src/cmd/internal/sys/supported.go @@ -15,7 +15,7 @@ func RaceDetectorSupported(goos, goarch string) bool { return goarch == "amd64" || goarch == "ppc64le" || goarch == "arm64" case "darwin": return goarch == "amd64" || goarch == "arm64" - case "freebsd", "netbsd", "windows": + case "freebsd", "netbsd", "openbsd", "windows": return goarch == "amd64" default: return false @@ -23,7 +23,8 @@ func RaceDetectorSupported(goos, goarch string) bool { } // MSanSupported reports whether goos/goarch supports the memory -// sanitizer option. There is a copy of this function in cmd/dist/test.go. +// sanitizer option. +// There is a copy of this function in misc/cgo/testsanitizers/cc_test.go. func MSanSupported(goos, goarch string) bool { switch goos { case "linux": diff --git a/src/cmd/link/dwarf_test.go b/src/cmd/link/dwarf_test.go index db710bed6ab..0419613cbe2 100644 --- a/src/cmd/link/dwarf_test.go +++ b/src/cmd/link/dwarf_test.go @@ -10,7 +10,6 @@ import ( "cmd/internal/objfile" "debug/dwarf" "internal/testenv" - "io/ioutil" "os" "os/exec" "path" @@ -59,11 +58,7 @@ func testDWARF(t *testing.T, buildmode string, expectDWARF bool, env ...string) t.Run(prog, func(t *testing.T) { t.Parallel() - tmpDir, err := ioutil.TempDir("", "go-link-TestDWARF") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) + tmpDir := t.TempDir() exe := filepath.Join(tmpDir, prog+".exe") dir := "../../runtime/testdata/" + prog @@ -91,7 +86,8 @@ func testDWARF(t *testing.T, buildmode string, expectDWARF bool, env ...string) exe = filepath.Join(tmpDir, "go.o") } - if runtime.GOOS == "darwin" { + darwinSymbolTestIsTooFlaky := true // Turn this off, it is too flaky -- See #32218 + if runtime.GOOS == "darwin" && !darwinSymbolTestIsTooFlaky { if _, err = exec.LookPath("symbols"); err == nil { // Ensure Apple's tooling can parse our object for symbols. out, err = exec.Command("symbols", exe).CombinedOutput() diff --git a/src/cmd/link/elf_test.go b/src/cmd/link/elf_test.go index 20754d09f5a..012c0b51696 100644 --- a/src/cmd/link/elf_test.go +++ b/src/cmd/link/elf_test.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build dragonfly || freebsd || linux || netbsd || openbsd // +build dragonfly freebsd linux netbsd openbsd package main @@ -69,11 +70,7 @@ func TestSectionsWithSameName(t *testing.T) { t.Skipf("can't find objcopy: %v", err) } - dir, err := ioutil.TempDir("", "go-link-TestSectionsWithSameName") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() gopath := filepath.Join(dir, "GOPATH") env := append(os.Environ(), "GOPATH="+gopath) @@ -143,11 +140,7 @@ func TestMinusRSymsWithSameName(t *testing.T) { testenv.MustHaveCGO(t) t.Parallel() - dir, err := ioutil.TempDir("", "go-link-TestMinusRSymsWithSameName") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() gopath := filepath.Join(dir, "GOPATH") env := append(os.Environ(), "GOPATH="+gopath) @@ -270,11 +263,7 @@ func TestPIESize(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() - dir, err := ioutil.TempDir("", "go-link-"+name) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() writeGo(t, dir) diff --git a/src/cmd/link/internal/amd64/asm.go b/src/cmd/link/internal/amd64/asm.go index 2d09a6160aa..fb960491de7 100644 --- a/src/cmd/link/internal/amd64/asm.go +++ b/src/cmd/link/internal/amd64/asm.go @@ -548,7 +548,7 @@ func archreloc(*ld.Target, *loader.Loader, *ld.ArchSyms, loader.Reloc, loader.Sy return -1, 0, false } -func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64) int64 { +func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64, []byte) int64 { log.Fatalf("unexpected relocation variant") return -1 } diff --git a/src/cmd/link/internal/arm/asm.go b/src/cmd/link/internal/arm/asm.go index 03caeae7bec..ab780214bb6 100644 --- a/src/cmd/link/internal/arm/asm.go +++ b/src/cmd/link/internal/arm/asm.go @@ -111,7 +111,7 @@ func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loade return false } - // Handle relocations found in ELF object files. + // Handle relocations found in ELF object files. case objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_PLT32): su := ldr.MakeSymbolUpdater(s) su.SetRelocType(rIdx, objabi.R_CALLARM) @@ -237,6 +237,21 @@ func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loade su.SetRelocSym(rIdx, 0) return true } + + case objabi.R_GOTPCREL: + if target.IsExternal() { + // External linker will do this relocation. + return true + } + if targType != sym.SDYNIMPORT { + ldr.Errorf(s, "R_GOTPCREL target is not SDYNIMPORT symbol: %v", ldr.SymName(targ)) + } + ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_ARM_GLOB_DAT)) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocSym(rIdx, syms.GOT) + su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymGot(targ))) + return true } return false @@ -369,6 +384,12 @@ func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) { relocs := ldr.Relocs(s) r := relocs.At(ri) switch r.Type() { + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_CALL), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_PC24), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_JUMP24): + // Host object relocations that will be turned into a PLT call. + // The PLT may be too far. Insert a trampoline for them. + fallthrough case objabi.R_CALLARM: var t int64 // ldr.SymValue(rs) == 0 indicates a cross-package jump to a function that is not yet @@ -415,7 +436,7 @@ func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) { // trampoline does not exist, create one trampb := ldr.MakeSymbolUpdater(tramp) ctxt.AddTramp(trampb) - if ctxt.DynlinkingGo() { + if ctxt.DynlinkingGo() || ldr.SymType(rs) == sym.SDYNIMPORT { if immrot(uint32(offset)) == 0 { ctxt.Errorf(s, "odd offset in dynlink direct call: %v+%d", ldr.SymName(rs), offset) } @@ -443,8 +464,8 @@ func gentramp(arch *sys.Arch, linkmode ld.LinkMode, ldr *loader.Loader, tramp *l tramp.SetSize(12) // 3 instructions P := make([]byte, tramp.Size()) t := ldr.SymValue(target) + offset - o1 := uint32(0xe5900000 | 11<<12 | 15<<16) // MOVW (R15), R11 // R15 is actual pc + 8 - o2 := uint32(0xe12fff10 | 11) // JMP (R11) + o1 := uint32(0xe5900000 | 12<<12 | 15<<16) // MOVW (R15), R12 // R15 is actual pc + 8 + o2 := uint32(0xe12fff10 | 12) // JMP (R12) o3 := uint32(t) // WORD $target arch.ByteOrder.PutUint32(P, o1) arch.ByteOrder.PutUint32(P[4:], o2) @@ -464,9 +485,9 @@ func gentramp(arch *sys.Arch, linkmode ld.LinkMode, ldr *loader.Loader, tramp *l func gentramppic(arch *sys.Arch, tramp *loader.SymbolBuilder, target loader.Sym, offset int64) { tramp.SetSize(16) // 4 instructions P := make([]byte, tramp.Size()) - o1 := uint32(0xe5900000 | 11<<12 | 15<<16 | 4) // MOVW 4(R15), R11 // R15 is actual pc + 8 - o2 := uint32(0xe0800000 | 11<<12 | 15<<16 | 11) // ADD R15, R11, R11 - o3 := uint32(0xe12fff10 | 11) // JMP (R11) + o1 := uint32(0xe5900000 | 12<<12 | 15<<16 | 4) // MOVW 4(R15), R12 // R15 is actual pc + 8 + o2 := uint32(0xe0800000 | 12<<12 | 15<<16 | 12) // ADD R15, R12, R12 + o3 := uint32(0xe12fff10 | 12) // JMP (R12) o4 := uint32(0) // WORD $(target-pc) // filled in with relocation arch.ByteOrder.PutUint32(P, o1) arch.ByteOrder.PutUint32(P[4:], o2) @@ -484,10 +505,10 @@ func gentramppic(arch *sys.Arch, tramp *loader.SymbolBuilder, target loader.Sym, // generate a trampoline to target+offset in dynlink mode (using GOT) func gentrampdyn(arch *sys.Arch, tramp *loader.SymbolBuilder, target loader.Sym, offset int64) { tramp.SetSize(20) // 5 instructions - o1 := uint32(0xe5900000 | 11<<12 | 15<<16 | 8) // MOVW 8(R15), R11 // R15 is actual pc + 8 - o2 := uint32(0xe0800000 | 11<<12 | 15<<16 | 11) // ADD R15, R11, R11 - o3 := uint32(0xe5900000 | 11<<12 | 11<<16) // MOVW (R11), R11 - o4 := uint32(0xe12fff10 | 11) // JMP (R11) + o1 := uint32(0xe5900000 | 12<<12 | 15<<16 | 8) // MOVW 8(R15), R12 // R15 is actual pc + 8 + o2 := uint32(0xe0800000 | 12<<12 | 15<<16 | 12) // ADD R15, R12, R12 + o3 := uint32(0xe5900000 | 12<<12 | 12<<16) // MOVW (R12), R12 + o4 := uint32(0xe12fff10 | 12) // JMP (R12) o5 := uint32(0) // WORD $target@GOT // filled in with relocation o6 := uint32(0) if offset != 0 { @@ -495,8 +516,8 @@ func gentrampdyn(arch *sys.Arch, tramp *loader.SymbolBuilder, target loader.Sym, tramp.SetSize(24) // 6 instructions o6 = o5 o5 = o4 - o4 = 0xe2800000 | 11<<12 | 11<<16 | immrot(uint32(offset)) // ADD $offset, R11, R11 - o1 = uint32(0xe5900000 | 11<<12 | 15<<16 | 12) // MOVW 12(R15), R11 + o4 = 0xe2800000 | 12<<12 | 12<<16 | immrot(uint32(offset)) // ADD $offset, R12, R12 + o1 = uint32(0xe5900000 | 12<<12 | 15<<16 | 12) // MOVW 12(R15), R12 } P := make([]byte, tramp.Size()) arch.ByteOrder.PutUint32(P, o1) @@ -565,7 +586,7 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade return val, 0, false } -func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64) int64 { +func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64, []byte) int64 { log.Fatalf("unexpected relocation variant") return -1 } diff --git a/src/cmd/link/internal/arm/obj.go b/src/cmd/link/internal/arm/obj.go index fed8dce4de4..b7d149851c4 100644 --- a/src/cmd/link/internal/arm/obj.go +++ b/src/cmd/link/internal/arm/obj.go @@ -45,6 +45,7 @@ func Init() (*sys.Arch, ld.Arch) { Minalign: minAlign, Dwarfregsp: dwarfRegSP, Dwarfreglr: dwarfRegLR, + TrampLimit: 0x1c00000, // 24-bit signed offset * 4, leave room for PLT etc. Plan9Magic: 0x647, diff --git a/src/cmd/link/internal/arm64/asm.go b/src/cmd/link/internal/arm64/asm.go index 72093268c22..c10bdc4120a 100644 --- a/src/cmd/link/internal/arm64/asm.go +++ b/src/cmd/link/internal/arm64/asm.go @@ -413,6 +413,35 @@ func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loade // (e.g. go version). return true } + + case objabi.R_ARM64_GOTPCREL: + if target.IsExternal() { + // External linker will do this relocation. + return true + } + if targType != sym.SDYNIMPORT { + ldr.Errorf(s, "R_ARM64_GOTPCREL target is not SDYNIMPORT symbol: %v", ldr.SymName(targ)) + } + if r.Add() != 0 { + ldr.Errorf(s, "R_ARM64_GOTPCREL with non-zero addend (%v)", r.Add()) + } + if target.IsElf() { + ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_AARCH64_GLOB_DAT)) + } else { + ld.AddGotSym(target, ldr, syms, targ, 0) + } + // turn into two relocations, one for each instruction. + su := ldr.MakeSymbolUpdater(s) + r.SetType(objabi.R_ARM64_GOT) + r.SetSiz(4) + r.SetSym(syms.GOT) + r.SetAdd(int64(ldr.SymGot(targ))) + r2, _ := su.AddRel(objabi.R_ARM64_GOT) + r2.SetSiz(4) + r2.SetOff(r.Off() + 4) + r2.SetSym(syms.GOT) + r2.SetAdd(int64(ldr.SymGot(targ))) + return true } return false } @@ -464,8 +493,9 @@ func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, return true } -// sign-extends from 24-bit. -func signext24(x int64) int64 { return x << 40 >> 40 } +// sign-extends from 21, 24-bit. +func signext21(x int64) int64 { return x << (64 - 21) >> (64 - 21) } +func signext24(x int64) int64 { return x << (64 - 24) >> (64 - 24) } func machoreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, sectoff int64) bool { var v uint32 @@ -478,7 +508,7 @@ func machoreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sy if xadd != signext24(xadd) { // If the relocation target would overflow the addend, then target // a linker-manufactured label symbol with a smaller addend instead. - label := ldr.Lookup(machoLabelName(ldr, rs, xadd), ldr.SymVersion(rs)) + label := ldr.Lookup(offsetLabelName(ldr, rs, xadd/machoRelocLimit*machoRelocLimit), ldr.SymVersion(rs)) if label != 0 { xadd = ldr.SymValue(rs) + xadd - ldr.SymValue(label) rs = label @@ -569,35 +599,67 @@ func machoreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sy } func pereloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, sectoff int64) bool { - var v uint32 - rs := r.Xsym rt := r.Type - if ldr.SymDynid(rs) < 0 { + if r.Xadd != signext21(r.Xadd) { + // If the relocation target would overflow the addend, then target + // a linker-manufactured label symbol with a smaller addend instead. + label := ldr.Lookup(offsetLabelName(ldr, rs, r.Xadd/peRelocLimit*peRelocLimit), ldr.SymVersion(rs)) + if label == 0 { + ldr.Errorf(s, "invalid relocation: %v %s+0x%x", rt, ldr.SymName(rs), r.Xadd) + return false + } + rs = label + } + if rt == objabi.R_CALLARM64 && r.Xadd != 0 { + label := ldr.Lookup(offsetLabelName(ldr, rs, r.Xadd), ldr.SymVersion(rs)) + if label == 0 { + ldr.Errorf(s, "invalid relocation: %v %s+0x%x", rt, ldr.SymName(rs), r.Xadd) + return false + } + rs = label + } + symdynid := ldr.SymDynid(rs) + if symdynid < 0 { ldr.Errorf(s, "reloc %d (%s) to non-coff symbol %s type=%d (%s)", rt, sym.RelocName(arch, rt), ldr.SymName(rs), ldr.SymType(rs), ldr.SymType(rs)) return false } - out.Write32(uint32(sectoff)) - out.Write32(uint32(ldr.SymDynid(rs))) - switch rt { default: return false case objabi.R_DWARFSECREF: - v = ld.IMAGE_REL_ARM64_SECREL + out.Write32(uint32(sectoff)) + out.Write32(uint32(symdynid)) + out.Write16(ld.IMAGE_REL_ARM64_SECREL) case objabi.R_ADDR: + out.Write32(uint32(sectoff)) + out.Write32(uint32(symdynid)) if r.Size == 8 { - v = ld.IMAGE_REL_ARM64_ADDR64 + out.Write16(ld.IMAGE_REL_ARM64_ADDR64) } else { - v = ld.IMAGE_REL_ARM64_ADDR32 + out.Write16(ld.IMAGE_REL_ARM64_ADDR32) } - } - out.Write16(uint16(v)) + case objabi.R_ADDRARM64: + // Note: r.Xadd has been taken care of below, in archreloc. + out.Write32(uint32(sectoff)) + out.Write32(uint32(symdynid)) + out.Write16(ld.IMAGE_REL_ARM64_PAGEBASE_REL21) + + out.Write32(uint32(sectoff + 4)) + out.Write32(uint32(symdynid)) + out.Write16(ld.IMAGE_REL_ARM64_PAGEOFFSET_12A) + + case objabi.R_CALLARM64: + // Note: r.Xadd has been taken care of above, by using a label pointing into the middle of the function. + out.Write32(uint32(sectoff)) + out.Write32(uint32(symdynid)) + out.Write16(ld.IMAGE_REL_ARM64_BRANCH26) + } return true } @@ -628,14 +690,8 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade nExtReloc = 4 // need another two relocations for non-zero addend } - // Note: ld64 currently has a bug that any non-zero addend for BR26 relocation - // will make the linking fail because it thinks the code is not PIC even though - // the BR26 relocation should be fully resolved at link time. - // That is the reason why the next if block is disabled. When the bug in ld64 - // is fixed, we can enable this block and also enable duff's device in cmd/7g. - if false && target.IsDarwin() { + if target.IsWindows() { var o0, o1 uint32 - if target.IsBigEndian() { o0 = uint32(val >> 32) o1 = uint32(val) @@ -643,15 +699,20 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade o0 = uint32(val) o1 = uint32(val >> 32) } - // Mach-O wants the addend to be encoded in the instruction - // Note that although Mach-O supports ARM64_RELOC_ADDEND, it - // can only encode 24-bit of signed addend, but the instructions - // supports 33-bit of signed addend, so we always encode the - // addend in place. - o0 |= (uint32((xadd>>12)&3) << 29) | (uint32((xadd>>12>>2)&0x7ffff) << 5) - o1 |= uint32(xadd&0xfff) << 10 - // when laid out, the instruction order must always be o1, o2. + // The first instruction (ADRP) has a 21-bit immediate field, + // and the second (ADD) has a 12-bit immediate field. + // The first instruction is only for high bits, but to get the carry bits right we have + // to put the full addend, including the bottom 12 bits again. + // That limits the distance of any addend to only 21 bits. + // But we assume that LDRP's top bit will be interpreted as a sign bit, + // so we only use 20 bits. + // pereloc takes care of introducing new symbol labels + // every megabyte for longer relocations. + xadd := uint32(xadd) + o0 |= (xadd&3)<<29 | (xadd&0xffffc)<<3 + o1 |= (xadd & 0xfff) << 10 + if target.IsBigEndian() { val = int64(o0)<<32 | int64(o1) } else { @@ -668,6 +729,18 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade nExtReloc = 2 // need two ELF relocations. see elfreloc1 } return val, nExtReloc, isOk + + case objabi.R_ADDR: + if target.IsWindows() && r.Add() != 0 { + if r.Siz() == 8 { + val = r.Add() + } else if target.IsBigEndian() { + val = int64(uint32(val)) | int64(r.Add())<<32 + } else { + val = val>>32<<32 | int64(uint32(r.Add())) + } + return val, 1, true + } } } @@ -865,7 +938,7 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade return val, 0, false } -func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64) int64 { +func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64, []byte) int64 { log.Fatalf("unexpected relocation variant") return -1 } @@ -1018,37 +1091,54 @@ func addpltsym(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loade } } -const machoRelocLimit = 1 << 23 +const ( + machoRelocLimit = 1 << 23 + peRelocLimit = 1 << 20 +) func gensymlate(ctxt *ld.Link, ldr *loader.Loader) { // When external linking on darwin, Mach-O relocation has only signed 24-bit // addend. For large symbols, we generate "label" symbols in the middle, so // that relocations can target them with smaller addends. - if !ctxt.IsDarwin() || !ctxt.IsExternal() { + // On Windows, we only get 21 bits, again (presumably) signed. + if !ctxt.IsDarwin() && !ctxt.IsWindows() || !ctxt.IsExternal() { return } - big := false - for _, seg := range ld.Segments { - if seg.Length >= machoRelocLimit { - big = true - break - } - } - if !big { - return // skip work if nothing big + limit := int64(machoRelocLimit) + if ctxt.IsWindows() { + limit = peRelocLimit } - // addLabelSyms adds "label" symbols at s+machoRelocLimit, s+2*machoRelocLimit, etc. - addLabelSyms := func(s loader.Sym, sz int64) { + if ctxt.IsDarwin() { + big := false + for _, seg := range ld.Segments { + if seg.Length >= machoRelocLimit { + big = true + break + } + } + if !big { + return // skip work if nothing big + } + } + + // addLabelSyms adds "label" symbols at s+limit, s+2*limit, etc. + addLabelSyms := func(s loader.Sym, limit, sz int64) { v := ldr.SymValue(s) - for off := int64(machoRelocLimit); off < sz; off += machoRelocLimit { - p := ldr.LookupOrCreateSym(machoLabelName(ldr, s, off), ldr.SymVersion(s)) + for off := limit; off < sz; off += limit { + p := ldr.LookupOrCreateSym(offsetLabelName(ldr, s, off), ldr.SymVersion(s)) ldr.SetAttrReachable(p, true) ldr.SetSymValue(p, v+off) ldr.SetSymSect(p, ldr.SymSect(s)) - ld.AddMachoSym(ldr, p) - //fmt.Printf("gensymlate %s %x\n", ldr.SymName(p), ldr.SymValue(p)) + if ctxt.IsDarwin() { + ld.AddMachoSym(ldr, p) + } else if ctxt.IsWindows() { + ld.AddPELabelSym(ldr, p) + } else { + panic("missing case in gensymlate") + } + // fmt.Printf("gensymlate %s %x\n", ldr.SymName(p), ldr.SymValue(p)) } } @@ -1057,26 +1147,150 @@ func gensymlate(ctxt *ld.Link, ldr *loader.Loader) { continue } if ldr.SymType(s) == sym.STEXT { - continue // we don't target the middle of a function + if ctxt.IsDarwin() || ctxt.IsWindows() { + // Cannot relocate into middle of function. + // Generate symbol names for every offset we need in duffcopy/duffzero (only 64 each). + switch ldr.SymName(s) { + case "runtime.duffcopy": + addLabelSyms(s, 8, 8*64) + case "runtime.duffzero": + addLabelSyms(s, 4, 4*64) + } + } + continue // we don't target the middle of other functions } sz := ldr.SymSize(s) - if sz <= machoRelocLimit { + if sz <= limit { continue } - addLabelSyms(s, sz) + addLabelSyms(s, limit, sz) } // Also for carrier symbols (for which SymSize is 0) for _, ss := range ld.CarrierSymByType { - if ss.Sym != 0 && ss.Size > machoRelocLimit { - addLabelSyms(ss.Sym, ss.Size) + if ss.Sym != 0 && ss.Size > limit { + addLabelSyms(ss.Sym, limit, ss.Size) } } } -// machoLabelName returns the name of the "label" symbol used for a -// relocation targeting s+off. The label symbols is used on darwin -// when external linking, so that the addend fits in a Mach-O relocation. -func machoLabelName(ldr *loader.Loader, s loader.Sym, off int64) string { - return fmt.Sprintf("%s.%d", ldr.SymExtname(s), off/machoRelocLimit) +// offsetLabelName returns the name of the "label" symbol used for a +// relocation targeting s+off. The label symbols is used on Darwin/Windows +// when external linking, so that the addend fits in a Mach-O/PE relocation. +func offsetLabelName(ldr *loader.Loader, s loader.Sym, off int64) string { + if off>>20<<20 == off { + return fmt.Sprintf("%s+%dMB", ldr.SymExtname(s), off>>20) + } + return fmt.Sprintf("%s+%d", ldr.SymExtname(s), off) +} + +// Convert the direct jump relocation r to refer to a trampoline if the target is too far +func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) { + relocs := ldr.Relocs(s) + r := relocs.At(ri) + const pcrel = 1 + switch r.Type() { + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_CALL26), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_JUMP26), + objabi.MachoRelocOffset + ld.MACHO_ARM64_RELOC_BRANCH26*2 + pcrel: + // Host object relocations that will be turned into a PLT call. + // The PLT may be too far. Insert a trampoline for them. + fallthrough + case objabi.R_CALLARM64: + var t int64 + // ldr.SymValue(rs) == 0 indicates a cross-package jump to a function that is not yet + // laid out. Conservatively use a trampoline. This should be rare, as we lay out packages + // in dependency order. + if ldr.SymValue(rs) != 0 { + t = ldr.SymValue(rs) + r.Add() - (ldr.SymValue(s) + int64(r.Off())) + } + if t >= 1<<27 || t < -1<<27 || ldr.SymValue(rs) == 0 || (*ld.FlagDebugTramp > 1 && (ldr.SymPkg(s) == "" || ldr.SymPkg(s) != ldr.SymPkg(rs))) { + // direct call too far, need to insert trampoline. + // look up existing trampolines first. if we found one within the range + // of direct call, we can reuse it. otherwise create a new one. + var tramp loader.Sym + for i := 0; ; i++ { + oName := ldr.SymName(rs) + name := oName + fmt.Sprintf("%+x-tramp%d", r.Add(), i) + tramp = ldr.LookupOrCreateSym(name, int(ldr.SymVersion(rs))) + ldr.SetAttrReachable(tramp, true) + if ldr.SymType(tramp) == sym.SDYNIMPORT { + // don't reuse trampoline defined in other module + continue + } + if oName == "runtime.deferreturn" { + ldr.SetIsDeferReturnTramp(tramp, true) + } + if ldr.SymValue(tramp) == 0 { + // either the trampoline does not exist -- we need to create one, + // or found one the address which is not assigned -- this will be + // laid down immediately after the current function. use this one. + break + } + + t = ldr.SymValue(tramp) - (ldr.SymValue(s) + int64(r.Off())) + if t >= -1<<27 && t < 1<<27 { + // found an existing trampoline that is not too far + // we can just use it + break + } + } + if ldr.SymType(tramp) == 0 { + // trampoline does not exist, create one + trampb := ldr.MakeSymbolUpdater(tramp) + ctxt.AddTramp(trampb) + if ldr.SymType(rs) == sym.SDYNIMPORT { + if r.Add() != 0 { + ctxt.Errorf(s, "nonzero addend for DYNIMPORT call: %v+%d", ldr.SymName(rs), r.Add()) + } + gentrampgot(ctxt, ldr, trampb, rs) + } else { + gentramp(ctxt, ldr, trampb, rs, r.Add()) + } + } + // modify reloc to point to tramp, which will be resolved later + sb := ldr.MakeSymbolUpdater(s) + relocs := sb.Relocs() + r := relocs.At(ri) + r.SetSym(tramp) + r.SetAdd(0) // clear the offset embedded in the instruction + } + default: + ctxt.Errorf(s, "trampoline called with non-jump reloc: %d (%s)", r.Type(), sym.RelocName(ctxt.Arch, r.Type())) + } +} + +// generate a trampoline to target+offset. +func gentramp(ctxt *ld.Link, ldr *loader.Loader, tramp *loader.SymbolBuilder, target loader.Sym, offset int64) { + tramp.SetSize(12) // 3 instructions + P := make([]byte, tramp.Size()) + o1 := uint32(0x90000010) // adrp x16, target + o2 := uint32(0x91000210) // add x16, pc-relative-offset + o3 := uint32(0xd61f0200) // br x16 + ctxt.Arch.ByteOrder.PutUint32(P, o1) + ctxt.Arch.ByteOrder.PutUint32(P[4:], o2) + ctxt.Arch.ByteOrder.PutUint32(P[8:], o3) + tramp.SetData(P) + + r, _ := tramp.AddRel(objabi.R_ADDRARM64) + r.SetSiz(8) + r.SetSym(target) + r.SetAdd(offset) +} + +// generate a trampoline to target+offset for a DYNIMPORT symbol via GOT. +func gentrampgot(ctxt *ld.Link, ldr *loader.Loader, tramp *loader.SymbolBuilder, target loader.Sym) { + tramp.SetSize(12) // 3 instructions + P := make([]byte, tramp.Size()) + o1 := uint32(0x90000010) // adrp x16, target@GOT + o2 := uint32(0xf9400210) // ldr x16, [x16, offset] + o3 := uint32(0xd61f0200) // br x16 + ctxt.Arch.ByteOrder.PutUint32(P, o1) + ctxt.Arch.ByteOrder.PutUint32(P[4:], o2) + ctxt.Arch.ByteOrder.PutUint32(P[8:], o3) + tramp.SetData(P) + + r, _ := tramp.AddRel(objabi.R_ARM64_GOTPCREL) + r.SetSiz(8) + r.SetSym(target) } diff --git a/src/cmd/link/internal/arm64/obj.go b/src/cmd/link/internal/arm64/obj.go index 18a32531e90..9c7459855c1 100644 --- a/src/cmd/link/internal/arm64/obj.go +++ b/src/cmd/link/internal/arm64/obj.go @@ -45,6 +45,7 @@ func Init() (*sys.Arch, ld.Arch) { Minalign: minAlign, Dwarfregsp: dwarfRegSP, Dwarfreglr: dwarfRegLR, + TrampLimit: 0x7c00000, // 26-bit signed offset * 4, leave room for PLT etc. Adddynrel: adddynrel, Archinit: archinit, @@ -59,6 +60,7 @@ func Init() (*sys.Arch, ld.Arch) { Machoreloc1: machoreloc1, MachorelocSize: 8, PEreloc1: pereloc1, + Trampoline: trampoline, Androiddynld: "/system/bin/linker64", Linuxdynld: "/lib/ld-linux-aarch64.so.1", diff --git a/src/cmd/link/internal/ld/ar.go b/src/cmd/link/internal/ld/ar.go index e4fd5916766..22f53a4df2e 100644 --- a/src/cmd/link/internal/ld/ar.go +++ b/src/cmd/link/internal/ld/ar.go @@ -32,10 +32,10 @@ package ld import ( "cmd/internal/bio" - "cmd/internal/objabi" "cmd/link/internal/sym" "encoding/binary" "fmt" + "internal/buildcfg" "io" "os" ) @@ -170,7 +170,7 @@ func readArmap(filename string, f *bio.Reader, arhdr ArHdr) archiveMap { // For Mach-O and PE/386 files we strip a leading // underscore from the symbol name. - if objabi.GOOS == "darwin" || objabi.GOOS == "ios" || (objabi.GOOS == "windows" && objabi.GOARCH == "386") { + if buildcfg.GOOS == "darwin" || buildcfg.GOOS == "ios" || (buildcfg.GOOS == "windows" && buildcfg.GOARCH == "386") { if name[0] == '_' && len(name) > 1 { name = name[1:] } diff --git a/src/cmd/link/internal/ld/asmb.go b/src/cmd/link/internal/ld/asmb.go index fda04394550..37546695556 100644 --- a/src/cmd/link/internal/ld/asmb.go +++ b/src/cmd/link/internal/ld/asmb.go @@ -29,8 +29,6 @@ func asmb(ctxt *Link) { } var wg sync.WaitGroup - sect := Segtext.Sections[0] - offset := sect.Vaddr - Segtext.Vaddr + Segtext.Fileoff f := func(ctxt *Link, out *OutBuf, start, length int64) { pad := thearch.CodePad if pad == nil { @@ -39,23 +37,14 @@ func asmb(ctxt *Link) { CodeblkPad(ctxt, out, start, length, pad) } - if !thearch.WriteTextBlocks { - writeParallel(&wg, f, ctxt, offset, sect.Vaddr, sect.Length) - for _, sect := range Segtext.Sections[1:] { - offset := sect.Vaddr - Segtext.Vaddr + Segtext.Fileoff + for _, sect := range Segtext.Sections { + offset := sect.Vaddr - Segtext.Vaddr + Segtext.Fileoff + // Handle text sections with Codeblk + if sect.Name == ".text" { + writeParallel(&wg, f, ctxt, offset, sect.Vaddr, sect.Length) + } else { writeParallel(&wg, datblk, ctxt, offset, sect.Vaddr, sect.Length) } - } else { - // TODO why can't we handle all sections this way? - for _, sect := range Segtext.Sections { - offset := sect.Vaddr - Segtext.Vaddr + Segtext.Fileoff - // Handle additional text sections with Codeblk - if sect.Name == ".text" { - writeParallel(&wg, f, ctxt, offset, sect.Vaddr, sect.Length) - } else { - writeParallel(&wg, datblk, ctxt, offset, sect.Vaddr, sect.Length) - } - } } if Segrodata.Filelen > 0 { diff --git a/src/cmd/link/internal/ld/config.go b/src/cmd/link/internal/ld/config.go index 481dc674750..ae0d7520ebc 100644 --- a/src/cmd/link/internal/ld/config.go +++ b/src/cmd/link/internal/ld/config.go @@ -5,10 +5,9 @@ package ld import ( - "cmd/internal/objabi" "cmd/internal/sys" "fmt" - "log" + "internal/buildcfg" ) // A BuildMode indicates the sort of object we are building. @@ -29,23 +28,23 @@ const ( func (mode *BuildMode) Set(s string) error { badmode := func() error { - return fmt.Errorf("buildmode %s not supported on %s/%s", s, objabi.GOOS, objabi.GOARCH) + return fmt.Errorf("buildmode %s not supported on %s/%s", s, buildcfg.GOOS, buildcfg.GOARCH) } switch s { default: return fmt.Errorf("invalid buildmode: %q", s) case "exe": - switch objabi.GOOS + "/" + objabi.GOARCH { + switch buildcfg.GOOS + "/" + buildcfg.GOARCH { case "darwin/arm64", "windows/arm", "windows/arm64": // On these platforms, everything is PIE *mode = BuildModePIE default: *mode = BuildModeExe } case "pie": - switch objabi.GOOS { + switch buildcfg.GOOS { case "aix", "android", "linux", "windows", "darwin", "ios": case "freebsd": - switch objabi.GOARCH { + switch buildcfg.GOARCH { case "amd64": default: return badmode() @@ -55,16 +54,16 @@ func (mode *BuildMode) Set(s string) error { } *mode = BuildModePIE case "c-archive": - switch objabi.GOOS { + switch buildcfg.GOOS { case "aix", "darwin", "ios", "linux": case "freebsd": - switch objabi.GOARCH { + switch buildcfg.GOARCH { case "amd64": default: return badmode() } case "windows": - switch objabi.GOARCH { + switch buildcfg.GOARCH { case "amd64", "386", "arm", "arm64": default: return badmode() @@ -74,16 +73,16 @@ func (mode *BuildMode) Set(s string) error { } *mode = BuildModeCArchive case "c-shared": - switch objabi.GOARCH { + switch buildcfg.GOARCH { case "386", "amd64", "arm", "arm64", "ppc64le", "s390x": default: return badmode() } *mode = BuildModeCShared case "shared": - switch objabi.GOOS { + switch buildcfg.GOOS { case "linux": - switch objabi.GOARCH { + switch buildcfg.GOARCH { case "386", "amd64", "arm", "arm64", "ppc64le", "s390x": default: return badmode() @@ -93,21 +92,21 @@ func (mode *BuildMode) Set(s string) error { } *mode = BuildModeShared case "plugin": - switch objabi.GOOS { + switch buildcfg.GOOS { case "linux": - switch objabi.GOARCH { + switch buildcfg.GOARCH { case "386", "amd64", "arm", "arm64", "s390x", "ppc64le": default: return badmode() } case "darwin": - switch objabi.GOARCH { + switch buildcfg.GOARCH { case "amd64", "arm64": default: return badmode() } case "freebsd": - switch objabi.GOARCH { + switch buildcfg.GOARCH { case "amd64": default: return badmode() @@ -181,13 +180,13 @@ func mustLinkExternal(ctxt *Link) (res bool, reason string) { if ctxt.Debugvlog > 1 { defer func() { if res { - log.Printf("external linking is forced by: %s\n", reason) + ctxt.Logf("external linking is forced by: %s\n", reason) } }() } - if sys.MustLinkExternal(objabi.GOOS, objabi.GOARCH) { - return true, fmt.Sprintf("%s/%s requires external linking", objabi.GOOS, objabi.GOARCH) + if sys.MustLinkExternal(buildcfg.GOOS, buildcfg.GOARCH) { + return true, fmt.Sprintf("%s/%s requires external linking", buildcfg.GOOS, buildcfg.GOARCH) } if *flagMsan { @@ -198,17 +197,24 @@ func mustLinkExternal(ctxt *Link) (res bool, reason string) { // https://golang.org/issue/14449 // https://golang.org/issue/21961 if iscgo && ctxt.Arch.InFamily(sys.MIPS64, sys.MIPS, sys.PPC64, sys.RISCV64) { - return true, objabi.GOARCH + " does not support internal cgo" + return true, buildcfg.GOARCH + " does not support internal cgo" } - if iscgo && objabi.GOOS == "android" { - return true, objabi.GOOS + " does not support internal cgo" + if iscgo && (buildcfg.GOOS == "android" || buildcfg.GOOS == "dragonfly") { + // It seems that on Dragonfly thread local storage is + // set up by the dynamic linker, so internal cgo linking + // doesn't work. Test case is "go test runtime/cgo". + return true, buildcfg.GOOS + " does not support internal cgo" + } + if iscgo && buildcfg.GOOS == "windows" && buildcfg.GOARCH == "arm64" { + // windows/arm64 internal linking is not implemented. + return true, buildcfg.GOOS + "/" + buildcfg.GOARCH + " does not support internal cgo" } // When the race flag is set, the LLVM tsan relocatable file is linked // into the final binary, which means external linking is required because // internal linking does not support it. if *flagRace && ctxt.Arch.InFamily(sys.PPC64) { - return true, "race on " + objabi.GOARCH + return true, "race on " + buildcfg.GOARCH } // Some build modes require work the internal linker cannot do (yet). @@ -218,7 +224,7 @@ func mustLinkExternal(ctxt *Link) (res bool, reason string) { case BuildModeCShared: return true, "buildmode=c-shared" case BuildModePIE: - switch objabi.GOOS + "/" + objabi.GOARCH { + switch buildcfg.GOOS + "/" + buildcfg.GOARCH { case "linux/amd64", "linux/arm64", "android/arm64": case "windows/386", "windows/amd64", "windows/arm", "windows/arm64": case "darwin/amd64", "darwin/arm64": @@ -252,7 +258,7 @@ func determineLinkMode(ctxt *Link) { // default value of -linkmode. If it is not set when the // linker is called we take the value it was set to when // cmd/link was compiled. (See make.bash.) - switch objabi.Getgoextlinkenabled() { + switch buildcfg.Getgoextlinkenabled() { case "0": ctxt.LinkMode = LinkInternal via = "via GO_EXTLINK_ENABLED " @@ -275,8 +281,8 @@ func determineLinkMode(ctxt *Link) { } case LinkExternal: switch { - case objabi.GOARCH == "ppc64" && objabi.GOOS != "aix": - Exitf("external linking not supported for %s/ppc64", objabi.GOOS) + case buildcfg.GOARCH == "ppc64" && buildcfg.GOOS != "aix": + Exitf("external linking not supported for %s/ppc64", buildcfg.GOOS) } } } diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index 92d38bb63e5..6659e95cc1f 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -39,6 +39,7 @@ import ( "cmd/link/internal/loader" "cmd/link/internal/sym" "compress/zlib" + "debug/elf" "encoding/binary" "fmt" "log" @@ -66,7 +67,7 @@ func isRuntimeDepPkg(pkg string) bool { // Estimate the max size needed to hold any new trampolines created for this function. This // is used to determine when the section can be split if it becomes too large, to ensure that // the trampolines are in the same section as the function that uses them. -func maxSizeTrampolinesPPC64(ldr *loader.Loader, s loader.Sym, isTramp bool) uint64 { +func maxSizeTrampolines(ctxt *Link, ldr *loader.Loader, s loader.Sym, isTramp bool) uint64 { // If thearch.Trampoline is nil, then trampoline support is not available on this arch. // A trampoline does not need any dependent trampolines. if thearch.Trampoline == nil || isTramp { @@ -81,8 +82,14 @@ func maxSizeTrampolinesPPC64(ldr *loader.Loader, s loader.Sym, isTramp bool) uin n++ } } - // Trampolines in ppc64 are 4 instructions. - return n * 16 + + if ctxt.IsPPC64() { + return n * 16 // Trampolines in PPC64 are 4 instructions. + } + if ctxt.IsARM64() { + return n * 12 // Trampolines in ARM64 are 3 instructions. + } + panic("unreachable") } // detect too-far jumps in function s, and add trampolines if necessary @@ -98,7 +105,8 @@ func trampoline(ctxt *Link, s loader.Sym) { relocs := ldr.Relocs(s) for ri := 0; ri < relocs.Count(); ri++ { r := relocs.At(ri) - if !r.Type().IsDirectCallOrJump() { + rt := r.Type() + if !rt.IsDirectCallOrJump() && !isPLTCall(rt) { continue } rs := r.Sym() @@ -107,8 +115,11 @@ func trampoline(ctxt *Link, s loader.Sym) { } rs = ldr.ResolveABIAlias(rs) if ldr.SymValue(rs) == 0 && (ldr.SymType(rs) != sym.SDYNIMPORT && ldr.SymType(rs) != sym.SUNDEFEXT) { - if ldr.SymPkg(rs) == ldr.SymPkg(s) { - continue // symbols in the same package are laid out together + if ldr.SymPkg(s) != "" && ldr.SymPkg(rs) == ldr.SymPkg(s) { + // Symbols in the same package are laid out together. + // Except that if SymPkg(s) == "", it is a host object symbol + // which may call an external symbol via PLT. + continue } if isRuntimeDepPkg(ldr.SymPkg(s)) && isRuntimeDepPkg(ldr.SymPkg(rs)) { continue // runtime packages are laid out together @@ -117,7 +128,27 @@ func trampoline(ctxt *Link, s loader.Sym) { thearch.Trampoline(ctxt, ldr, ri, rs, s) } +} +// whether rt is a (host object) relocation that will be turned into +// a call to PLT. +func isPLTCall(rt objabi.RelocType) bool { + const pcrel = 1 + switch rt { + // ARM64 + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_CALL26), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_JUMP26), + objabi.MachoRelocOffset + MACHO_ARM64_RELOC_BRANCH26*2 + pcrel: + return true + + // ARM + case objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_CALL), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_PC24), + objabi.ElfRelocOffset + objabi.RelocType(elf.R_ARM_JUMP24): + return true + } + // TODO: other architectures. + return false } // FoldSubSymbolOffset computes the offset of symbol s to its top-level outer @@ -165,6 +196,7 @@ func (st *relocSymState) relocsym(s loader.Sym, P []byte) { rs := r.Sym() rs = ldr.ResolveABIAlias(rs) rt := r.Type() + weak := r.Weak() if off < 0 || off+siz > int32(len(P)) { rname := "" if rs != 0 { @@ -211,7 +243,7 @@ func (st *relocSymState) relocsym(s loader.Sym, P []byte) { st.err.Errorf(s, "unhandled relocation for %s (type %d (%s) rtype %d (%s))", ldr.SymName(rs), rst, rst, rt, sym.RelocName(target.Arch, rt)) } } - if rs != 0 && rst != sym.STLSBSS && rt != objabi.R_WEAKADDROFF && rt != objabi.R_METHODOFF && !ldr.AttrReachable(rs) { + if rs != 0 && rst != sym.STLSBSS && !weak && rt != objabi.R_METHODOFF && !ldr.AttrReachable(rs) { st.err.Errorf(s, "unreachable sym in relocation: %s", ldr.SymName(rs)) } @@ -305,6 +337,10 @@ func (st *relocSymState) relocsym(s loader.Sym, P []byte) { log.Fatalf("cannot handle R_TLS_IE (sym %s) when linking internally", ldr.SymName(s)) } case objabi.R_ADDR: + if weak && !ldr.AttrReachable(rs) { + // Redirect it to runtime.unreachableMethod, which will throw if called. + rs = syms.unreachableMethod + } if target.IsExternal() { nExtReloc++ @@ -387,18 +423,18 @@ func (st *relocSymState) relocsym(s loader.Sym, P []byte) { break } o = ldr.SymValue(rs) + r.Add() - int64(ldr.SymSect(rs).Vaddr) - case objabi.R_WEAKADDROFF, objabi.R_METHODOFF: + case objabi.R_METHODOFF: if !ldr.AttrReachable(rs) { - if rt == objabi.R_METHODOFF { - // Set it to a sentinel value. The runtime knows this is not pointing to - // anything valid. - o = -1 - break - } - continue + // Set it to a sentinel value. The runtime knows this is not pointing to + // anything valid. + o = -1 + break } fallthrough case objabi.R_ADDROFF: + if weak && !ldr.AttrReachable(rs) { + continue + } // The method offset tables using this relocation expect the offset to be relative // to the start of the first text section, even if there are multiple. if ldr.SymSect(rs).Name == ".text" { @@ -506,7 +542,7 @@ func (st *relocSymState) relocsym(s loader.Sym, P []byte) { if target.IsPPC64() || target.IsS390X() { if rv != sym.RV_NONE { - o = thearch.Archrelocvariant(target, ldr, r, rv, s, o) + o = thearch.Archrelocvariant(target, ldr, r, rv, s, o, P) } } @@ -585,6 +621,9 @@ func extreloc(ctxt *Link, ldr *loader.Loader, s loader.Sym, r loader.Reloc) (loa case objabi.R_ADDR: // set up addend for eventual relocation via outer symbol. rs := ldr.ResolveABIAlias(r.Sym()) + if r.Weak() && !ldr.AttrReachable(rs) { + rs = ctxt.ArchSyms.unreachableMethod + } rs, off := FoldSubSymbolOffset(ldr, rs) rr.Xadd = r.Add() + off rr.Xsym = rs @@ -635,7 +674,7 @@ func extreloc(ctxt *Link, ldr *loader.Loader, s loader.Sym, r loader.Reloc) (loa return ExtrelocSimple(ldr, r), true // These reloc types don't need external relocations. - case objabi.R_ADDROFF, objabi.R_WEAKADDROFF, objabi.R_METHODOFF, objabi.R_ADDRCUOFF, + case objabi.R_ADDROFF, objabi.R_METHODOFF, objabi.R_ADDRCUOFF, objabi.R_SIZE, objabi.R_CONST, objabi.R_GOTOFF: return rr, false } @@ -710,9 +749,8 @@ func windynrelocsym(ctxt *Link, rel *loader.SymbolBuilder, s loader.Sym) { if targ == 0 { continue } - rt := r.Type() if !ctxt.loader.AttrReachable(targ) { - if rt == objabi.R_WEAKADDROFF { + if r.Weak() { continue } ctxt.Errorf(s, "dynamic relocation to unreachable symbol %s", @@ -786,6 +824,10 @@ func dynrelocsym(ctxt *Link, s loader.Sym) { if r.IsMarker() { continue // skip marker relocations } + rSym := r.Sym() + if r.Weak() && !ldr.AttrReachable(rSym) { + continue + } if ctxt.BuildMode == BuildModePIE && ctxt.LinkMode == LinkInternal { // It's expected that some relocations will be done // later by relocsym (R_TLS_LE, R_ADDROFF), so @@ -794,7 +836,6 @@ func dynrelocsym(ctxt *Link, s loader.Sym) { continue } - rSym := r.Sym() if rSym != 0 && ldr.SymType(rSym) == sym.SDYNIMPORT || r.Type() >= objabi.ElfRelocOffset { if rSym != 0 && !ldr.AttrReachable(rSym) { ctxt.Errorf(s, "dynamic relocation to unreachable symbol %s", ldr.SymName(rSym)) @@ -2193,23 +2234,87 @@ func (ctxt *Link) textaddress() { ctxt.Textp[0] = text } - va := uint64(Rnd(*FlagTextAddr, int64(Funcalign))) + start := uint64(Rnd(*FlagTextAddr, int64(Funcalign))) + va := start n := 1 sect.Vaddr = va - ntramps := 0 + + limit := thearch.TrampLimit + if limit == 0 { + limit = 1 << 63 // unlimited + } + if *FlagDebugTextSize != 0 { + limit = uint64(*FlagDebugTextSize) + } + if *FlagDebugTramp > 1 { + limit = 1 // debug mode, force generating trampolines for everything + } + + if ctxt.IsAIX() && ctxt.IsExternal() { + // On AIX, normally we won't generate direct calls to external symbols, + // except in one test, cmd/go/testdata/script/link_syso_issue33139.txt. + // That test doesn't make much sense, and I'm not sure it ever works. + // Just generate trampoline for now (which will turn a direct call to + // an indirect call, which at least builds). + limit = 1 + } + + // First pass: assign addresses assuming the program is small and + // don't generate trampolines. + big := false for _, s := range ctxt.Textp { - sect, n, va = assignAddress(ctxt, sect, n, s, va, false) + sect, n, va = assignAddress(ctxt, sect, n, s, va, false, big) + if va-start >= limit { + big = true + break + } + } - trampoline(ctxt, s) // resolve jumps, may add trampolines if jump too far - - // lay down trampolines after each function - for ; ntramps < len(ctxt.tramps); ntramps++ { - tramp := ctxt.tramps[ntramps] - if ctxt.IsAIX() && strings.HasPrefix(ldr.SymName(tramp), "runtime.text.") { - // Already set in assignAddress + // Second pass: only if it is too big, insert trampolines for too-far + // jumps and targets with unknown addresses. + if big { + // reset addresses + for _, s := range ctxt.Textp { + if ldr.OuterSym(s) != 0 || s == text { continue } - sect, n, va = assignAddress(ctxt, sect, n, tramp, va, true) + oldv := ldr.SymValue(s) + for sub := s; sub != 0; sub = ldr.SubSym(sub) { + ldr.SetSymValue(sub, ldr.SymValue(sub)-oldv) + } + } + va = start + + ntramps := 0 + for _, s := range ctxt.Textp { + sect, n, va = assignAddress(ctxt, sect, n, s, va, false, big) + + trampoline(ctxt, s) // resolve jumps, may add trampolines if jump too far + + // lay down trampolines after each function + for ; ntramps < len(ctxt.tramps); ntramps++ { + tramp := ctxt.tramps[ntramps] + if ctxt.IsAIX() && strings.HasPrefix(ldr.SymName(tramp), "runtime.text.") { + // Already set in assignAddress + continue + } + sect, n, va = assignAddress(ctxt, sect, n, tramp, va, true, big) + } + } + + // merge tramps into Textp, keeping Textp in address order + if ntramps != 0 { + newtextp := make([]loader.Sym, 0, len(ctxt.Textp)+ntramps) + i := 0 + for _, s := range ctxt.Textp { + for ; i < ntramps && ldr.SymValue(ctxt.tramps[i]) < ldr.SymValue(s); i++ { + newtextp = append(newtextp, ctxt.tramps[i]) + } + newtextp = append(newtextp, s) + } + newtextp = append(newtextp, ctxt.tramps[i:ntramps]...) + + ctxt.Textp = newtextp } } @@ -2221,25 +2326,10 @@ func (ctxt *Link) textaddress() { ldr.SetSymValue(etext, int64(va)) ldr.SetSymValue(text, int64(Segtext.Sections[0].Vaddr)) } - - // merge tramps into Textp, keeping Textp in address order - if ntramps != 0 { - newtextp := make([]loader.Sym, 0, len(ctxt.Textp)+ntramps) - i := 0 - for _, s := range ctxt.Textp { - for ; i < ntramps && ldr.SymValue(ctxt.tramps[i]) < ldr.SymValue(s); i++ { - newtextp = append(newtextp, ctxt.tramps[i]) - } - newtextp = append(newtextp, s) - } - newtextp = append(newtextp, ctxt.tramps[i:ntramps]...) - - ctxt.Textp = newtextp - } } // assigns address for a text symbol, returns (possibly new) section, its number, and the address -func assignAddress(ctxt *Link, sect *sym.Section, n int, s loader.Sym, va uint64, isTramp bool) (*sym.Section, int, uint64) { +func assignAddress(ctxt *Link, sect *sym.Section, n int, s loader.Sym, va uint64, isTramp, big bool) (*sym.Section, int, uint64) { ldr := ctxt.loader if thearch.AssignAddress != nil { return thearch.AssignAddress(ldr, sect, n, s, va, isTramp) @@ -2264,36 +2354,46 @@ func assignAddress(ctxt *Link, sect *sym.Section, n int, s loader.Sym, va uint64 funcsize = uint64(ldr.SymSize(s)) } - // On ppc64x a text section should not be larger than 2^26 bytes due to the size of - // call target offset field in the bl instruction. Splitting into smaller text - // sections smaller than this limit allows the GNU linker to modify the long calls - // appropriately. The limit allows for the space needed for tables inserted by the linker. - - // If this function doesn't fit in the current text section, then create a new one. - + // If we need to split text sections, and this function doesn't fit in the current + // section, then create a new one. + // // Only break at outermost syms. + if big && splitTextSections(ctxt) && ldr.OuterSym(s) == 0 { + // For debugging purposes, allow text size limit to be cranked down, + // so as to stress test the code that handles multiple text sections. + var textSizelimit uint64 = thearch.TrampLimit + if *FlagDebugTextSize != 0 { + textSizelimit = uint64(*FlagDebugTextSize) + } - // For debugging purposes, allow text size limit to be cranked down, - // so as to stress test the code that handles multiple text sections. - var textSizelimit uint64 = 0x1c00000 - if *FlagDebugTextSize != 0 { - textSizelimit = uint64(*FlagDebugTextSize) - } - - if ctxt.Arch.InFamily(sys.PPC64) && ldr.OuterSym(s) == 0 && ctxt.IsExternal() { // Sanity check: make sure the limit is larger than any // individual text symbol. if funcsize > textSizelimit { - panic(fmt.Sprintf("error: ppc64 text size limit %d less than text symbol %s size of %d", textSizelimit, ldr.SymName(s), funcsize)) + panic(fmt.Sprintf("error: text size limit %d less than text symbol %s size of %d", textSizelimit, ldr.SymName(s), funcsize)) } - if va-sect.Vaddr+funcsize+maxSizeTrampolinesPPC64(ldr, s, isTramp) > textSizelimit { + if va-sect.Vaddr+funcsize+maxSizeTrampolines(ctxt, ldr, s, isTramp) > textSizelimit { + sectAlign := int32(thearch.Funcalign) + if ctxt.IsPPC64() { + // Align the next text section to the worst case function alignment likely + // to be encountered when processing function symbols. The start address + // is rounded against the final alignment of the text section later on in + // (*Link).address. This may happen due to usage of PCALIGN directives + // larger than Funcalign, or usage of ISA 3.1 prefixed instructions + // (see ISA 3.1 Book I 1.9). + const ppc64maxFuncalign = 64 + sectAlign = ppc64maxFuncalign + va = uint64(Rnd(int64(va), ppc64maxFuncalign)) + } + // Set the length for the previous text section sect.Length = va - sect.Vaddr // Create new section, set the starting Vaddr sect = addsection(ctxt.loader, ctxt.Arch, &Segtext, ".text", 05) + sect.Vaddr = va + sect.Align = sectAlign ldr.SetSymSect(s, sect) // Create a symbol for the start of the secondary text sections @@ -2306,6 +2406,7 @@ func assignAddress(ctxt *Link, sect *sym.Section, n int, s loader.Sym, va uint64 ntext.SetType(sym.STEXT) ntext.SetSize(int64(MINFUNC)) ntext.SetOnList(true) + ntext.SetAlign(sectAlign) ctxt.tramps = append(ctxt.tramps, ntext.Sym()) ntext.SetValue(int64(va)) @@ -2324,6 +2425,9 @@ func assignAddress(ctxt *Link, sect *sym.Section, n int, s loader.Sym, va uint64 ldr.SetSymValue(s, 0) for sub := s; sub != 0; sub = ldr.SubSym(sub) { ldr.SetSymValue(sub, ldr.SymValue(sub)+int64(va)) + if ctxt.Debugvlog > 2 { + fmt.Println("assign text address:", ldr.SymName(sub), ldr.SymValue(sub)) + } } va += funcsize @@ -2331,6 +2435,19 @@ func assignAddress(ctxt *Link, sect *sym.Section, n int, s loader.Sym, va uint64 return sect, n, va } +// Return whether we may need to split text sections. +// +// On PPC64x whem external linking a text section should not be larger than 2^25 bytes +// due to the size of call target offset field in the bl instruction. Splitting into +// smaller text sections smaller than this limit allows the system linker to modify the long +// calls appropriately. The limit allows for the space needed for tables inserted by the +// linker. +// +// The same applies to Darwin/ARM64, with 2^27 byte threshold. +func splitTextSections(ctxt *Link) bool { + return (ctxt.IsPPC64() || (ctxt.IsARM64() && ctxt.IsDarwin())) && ctxt.IsExternal() +} + // address assigns virtual addresses to all segments and sections and // returns all segments in file order. func (ctxt *Link) address() []*sym.Segment { diff --git a/src/cmd/link/internal/ld/data_test.go b/src/cmd/link/internal/ld/data_test.go index 7c46307bd8c..f91493bc417 100644 --- a/src/cmd/link/internal/ld/data_test.go +++ b/src/cmd/link/internal/ld/data_test.go @@ -8,6 +8,7 @@ import ( "cmd/internal/objabi" "cmd/internal/sys" "cmd/link/internal/loader" + "internal/buildcfg" "testing" ) @@ -63,14 +64,14 @@ func TestAddGotSym(t *testing.T) { } // Save the architecture as we're going to set it on each test run. - origArch := objabi.GOARCH + origArch := buildcfg.GOARCH defer func() { - objabi.GOARCH = origArch + buildcfg.GOARCH = origArch }() for i, test := range tests { iself := len(test.rel) != 0 - objabi.GOARCH = test.arch.Name + buildcfg.GOARCH = test.arch.Name ctxt := setUpContext(test.arch, iself, test.ht, test.bm, test.lm) foo := ctxt.loader.CreateSymForUpdate("foo", 0) ctxt.loader.CreateExtSym("bar", 0) diff --git a/src/cmd/link/internal/ld/deadcode.go b/src/cmd/link/internal/ld/deadcode.go index 1874103b932..1ed5598c99d 100644 --- a/src/cmd/link/internal/ld/deadcode.go +++ b/src/cmd/link/internal/ld/deadcode.go @@ -11,6 +11,7 @@ import ( "cmd/link/internal/loader" "cmd/link/internal/sym" "fmt" + "internal/buildcfg" "unicode" ) @@ -19,11 +20,12 @@ var _ = fmt.Print type deadcodePass struct { ctxt *Link ldr *loader.Loader - wq heap // work queue, using min-heap for beter locality + wq heap // work queue, using min-heap for better locality ifaceMethod map[methodsig]bool // methods declared in reached interfaces markableMethods []methodref // methods of reached types reflectSeen bool // whether we have seen a reflect method call + dynlink bool methodsigstmp []methodsig // scratch buffer for decoding method signatures } @@ -31,9 +33,10 @@ type deadcodePass struct { func (d *deadcodePass) init() { d.ldr.InitReachable() d.ifaceMethod = make(map[methodsig]bool) - if objabi.Fieldtrack_enabled != 0 { + if buildcfg.Experiment.FieldTrack { d.ldr.Reachparent = make([]loader.Sym, d.ldr.NSym()) } + d.dynlink = d.ctxt.DynlinkingGo() if d.ctxt.BuildMode == BuildModeShared { // Mark all symbols defined in this library as reachable when @@ -62,6 +65,9 @@ func (d *deadcodePass) init() { } } names = append(names, *flagEntrySymbol) + // runtime.unreachableMethod is a function that will throw if called. + // We redirect unreachable methods to it. + names = append(names, "runtime.unreachableMethod") if !d.ctxt.linkShared && d.ctxt.BuildMode != BuildModePlugin { // runtime.buildVersion and runtime.modinfo are referenced in .go.buildinfo section // (see function buildinfo in data.go). They should normally be reachable from the @@ -83,14 +89,6 @@ func (d *deadcodePass) init() { } } - dynexpMap := d.ctxt.cgo_export_dynamic - if d.ctxt.LinkMode == LinkExternal { - dynexpMap = d.ctxt.cgo_export_static - } - for exp := range dynexpMap { - names = append(names, exp) - } - if d.ctxt.Debugvlog > 1 { d.ctxt.Logf("deadcode start names: %v\n", names) } @@ -101,6 +99,14 @@ func (d *deadcodePass) init() { // Also mark any Go functions (internal ABI). d.mark(d.ldr.Lookup(name, sym.SymVerABIInternal), 0) } + + // All dynamic exports are roots. + for _, s := range d.ctxt.dynexp { + if d.ctxt.Debugvlog > 1 { + d.ctxt.Logf("deadcode start dynexp: %s<%d>\n", d.ldr.SymName(s), d.ldr.SymVersion(s)) + } + d.mark(s, 0) + } } func (d *deadcodePass) flood() { @@ -115,16 +121,22 @@ func (d *deadcodePass) flood() { var usedInIface bool if isgotype { + if d.dynlink { + // When dynamic linking, a type may be passed across DSO + // boundary and get converted to interface at the other side. + d.ldr.SetAttrUsedInIface(symIdx, true) + } usedInIface = d.ldr.AttrUsedInIface(symIdx) } methods = methods[:0] for i := 0; i < relocs.Count(); i++ { r := relocs.At(i) + if r.Weak() { + continue + } t := r.Type() switch t { - case objabi.R_WEAKADDROFF: - continue case objabi.R_METHODOFF: if i+2 >= relocs.Count() { panic("expect three consecutive R_METHODOFF relocs") @@ -247,7 +259,7 @@ func (d *deadcodePass) mark(symIdx, parent loader.Sym) { if symIdx != 0 && !d.ldr.AttrReachable(symIdx) { d.wq.push(symIdx) d.ldr.SetAttrReachable(symIdx, true) - if objabi.Fieldtrack_enabled != 0 && d.ldr.Reachparent[symIdx] == 0 { + if buildcfg.Experiment.FieldTrack && d.ldr.Reachparent[symIdx] == 0 { d.ldr.Reachparent[symIdx] = parent } if *flagDumpDep { diff --git a/src/cmd/link/internal/ld/deadcode_test.go b/src/cmd/link/internal/ld/deadcode_test.go index b756091613c..6e128432dcb 100644 --- a/src/cmd/link/internal/ld/deadcode_test.go +++ b/src/cmd/link/internal/ld/deadcode_test.go @@ -7,8 +7,6 @@ package ld import ( "bytes" "internal/testenv" - "io/ioutil" - "os" "os/exec" "path/filepath" "testing" @@ -18,11 +16,7 @@ func TestDeadcode(t *testing.T) { testenv.MustHaveGoBuild(t) t.Parallel() - tmpdir, err := ioutil.TempDir("", "TestDeadcode") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() tests := []struct { src string @@ -46,10 +40,10 @@ func TestDeadcode(t *testing.T) { if err != nil { t.Fatalf("%v: %v:\n%s", cmd.Args, err, out) } - if test.pos != "" && !bytes.Contains(out, []byte(test.pos)) { + if test.pos != "" && !bytes.Contains(out, []byte(test.pos+"\n")) { t.Errorf("%s should be reachable. Output:\n%s", test.pos, out) } - if test.neg != "" && bytes.Contains(out, []byte(test.neg)) { + if test.neg != "" && bytes.Contains(out, []byte(test.neg+"\n")) { t.Errorf("%s should not be reachable. Output:\n%s", test.neg, out) } }) diff --git a/src/cmd/link/internal/ld/decodesym.go b/src/cmd/link/internal/ld/decodesym.go index fc179fc6e44..c41d97706ef 100644 --- a/src/cmd/link/internal/ld/decodesym.go +++ b/src/cmd/link/internal/ld/decodesym.go @@ -10,6 +10,7 @@ import ( "cmd/link/internal/loader" "cmd/link/internal/sym" "debug/elf" + "encoding/binary" "log" ) @@ -126,8 +127,8 @@ func decodetypeName(ldr *loader.Loader, symIdx loader.Sym, relocs *loader.Relocs } data := ldr.Data(r) - namelen := int(uint16(data[1])<<8 | uint16(data[2])) - return string(data[3 : 3+namelen]) + nameLen, nameLenLen := binary.Uvarint(data[1:]) + return string(data[1+nameLenLen : 1+nameLenLen+int(nameLen)]) } func decodetypeFuncInType(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, relocs *loader.Relocs, i int) loader.Sym { diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go index 561f6f14759..c53d2408cbe 100644 --- a/src/cmd/link/internal/ld/dwarf.go +++ b/src/cmd/link/internal/ld/dwarf.go @@ -22,6 +22,7 @@ import ( "cmd/link/internal/loader" "cmd/link/internal/sym" "fmt" + "internal/buildcfg" "log" "path" "runtime" @@ -458,12 +459,6 @@ func newmemberoffsetattr(die *dwarf.DWDie, offs int32) { newattr(die, dwarf.DW_AT_data_member_location, dwarf.DW_CLS_CONSTANT, int64(offs), nil) } -// GDB doesn't like FORM_addr for AT_location, so emit a -// location expression that evals to a const. -func (d *dwctxt) newabslocexprattr(die *dwarf.DWDie, addr int64, symIdx loader.Sym) { - newattr(die, dwarf.DW_AT_location, dwarf.DW_CLS_ADDRESS, addr, dwSym(symIdx)) -} - func (d *dwctxt) lookupOrDiag(n string) loader.Sym { symIdx := d.ldr.Lookup(n, 0) if symIdx == 0 { @@ -1020,25 +1015,6 @@ func (d *dwctxt) synthesizechantypes(ctxt *Link, die *dwarf.DWDie) { } } -func (d *dwctxt) dwarfDefineGlobal(ctxt *Link, symIdx loader.Sym, str string, v int64, gotype loader.Sym) { - // Find a suitable CU DIE to include the global. - // One would think it's as simple as just looking at the unit, but that might - // not have any reachable code. So, we go to the runtime's CU if our unit - // isn't otherwise reachable. - unit := d.ldr.SymUnit(symIdx) - if unit == nil { - unit = ctxt.runtimeCU - } - ver := d.ldr.SymVersion(symIdx) - dv := d.newdie(unit.DWInfo, dwarf.DW_ABRV_VARIABLE, str, int(ver)) - d.newabslocexprattr(dv, v, symIdx) - if d.ldr.SymVersion(symIdx) < sym.SymVerStatic { - newattr(dv, dwarf.DW_AT_external, dwarf.DW_CLS_FLAG, 1, 0) - } - dt := d.defgotype(gotype) - d.newrefattr(dv, dwarf.DW_AT_type, dt) -} - // createUnitLength creates the initial length field with value v and update // offset of unit_length if needed. func (d *dwctxt) createUnitLength(su *loader.SymbolBuilder, v uint64) { @@ -1552,7 +1528,7 @@ func appendSyms(syms []loader.Sym, src []sym.LoaderSym) []loader.Sym { func (d *dwctxt) writeUnitInfo(u *sym.CompilationUnit, abbrevsym loader.Sym, infoEpilog loader.Sym) []loader.Sym { syms := []loader.Sym{} - if len(u.Textp) == 0 && u.DWInfo.Child == nil { + if len(u.Textp) == 0 && u.DWInfo.Child == nil && len(u.VarDIEs) == 0 { return syms } @@ -1583,6 +1559,7 @@ func (d *dwctxt) writeUnitInfo(u *sym.CompilationUnit, abbrevsym loader.Sym, inf if u.Consts != 0 { cu = append(cu, loader.Sym(u.Consts)) } + cu = appendSyms(cu, u.VarDIEs) var cusize int64 for _, child := range cu { cusize += int64(len(d.ldr.Data(child))) @@ -1867,7 +1844,7 @@ func dwarfGenerateDebugInfo(ctxt *Link) { if producerExtra := d.ldr.Lookup(dwarf.CUInfoPrefix+"producer."+unit.Lib.Pkg, 0); producerExtra != 0 { peData = d.ldr.Data(producerExtra) } - producer := "Go cmd/compile " + objabi.Version + producer := "Go cmd/compile " + buildcfg.Version if len(peData) > 0 { // We put a semicolon before the flags to clearly // separate them from the version, which can be long @@ -1907,10 +1884,11 @@ func dwarfGenerateDebugInfo(ctxt *Link) { checkStrictDups = 1 } - // Create DIEs for global variables and the types they use. - // FIXME: ideally this should be done in the compiler, since - // for globals there isn't any abiguity about which package - // a global belongs to. + // Make a pass through all data symbols, looking for those + // corresponding to reachable, Go-generated, user-visible + // global variables. For each global of this sort, locate + // the corresponding compiler-generated DIE symbol and tack + // it onto the list associated with the unit. for idx := loader.Sym(1); idx < loader.Sym(d.ldr.NDef()); idx++ { if !d.ldr.AttrReachable(idx) || d.ldr.AttrNotInSymbolTable(idx) || @@ -1925,7 +1903,8 @@ func dwarfGenerateDebugInfo(ctxt *Link) { continue } // Skip things with no type - if d.ldr.SymGoType(idx) == 0 { + gt := d.ldr.SymGoType(idx) + if gt == 0 { continue } // Skip file local symbols (this includes static tmps, stack @@ -1939,10 +1918,20 @@ func dwarfGenerateDebugInfo(ctxt *Link) { continue } - // Create DIE for global. - sv := d.ldr.SymValue(idx) - gt := d.ldr.SymGoType(idx) - d.dwarfDefineGlobal(ctxt, idx, sn, sv, gt) + // Find compiler-generated DWARF info sym for global in question, + // and tack it onto the appropriate unit. Note that there are + // circumstances under which we can't find the compiler-generated + // symbol-- this typically happens as a result of compiler options + // (e.g. compile package X with "-dwarf=0"). + + // FIXME: use an aux sym or a relocation here instead of a + // name lookup. + varDIE := d.ldr.Lookup(dwarf.InfoPrefix+sn, 0) + if varDIE != 0 { + unit := d.ldr.SymUnit(idx) + d.defgotype(gt) + unit.VarDIEs = append(unit.VarDIEs, sym.LoaderSym(varDIE)) + } } d.synthesizestringtypes(ctxt, dwtypes.Child) @@ -2302,12 +2291,6 @@ func getDwsectCUSize(sname string, pkgname string) uint64 { return dwsectCUSize[sname+"."+pkgname] } -func saveDwsectCUSize(sname string, pkgname string, size uint64) { - dwsectCUSizeMu.Lock() - defer dwsectCUSizeMu.Unlock() - dwsectCUSize[sname+"."+pkgname] = size -} - func addDwsectCUSize(sname string, pkgname string, size uint64) { dwsectCUSizeMu.Lock() defer dwsectCUSizeMu.Unlock() diff --git a/src/cmd/link/internal/ld/dwarf_test.go b/src/cmd/link/internal/ld/dwarf_test.go index a66506d3928..5cc4800e2a3 100644 --- a/src/cmd/link/internal/ld/dwarf_test.go +++ b/src/cmd/link/internal/ld/dwarf_test.go @@ -19,6 +19,7 @@ import ( "path/filepath" "reflect" "runtime" + "sort" "strconv" "strings" "testing" @@ -39,11 +40,7 @@ func TestRuntimeTypesPresent(t *testing.T) { t.Skip("skipping on plan9; no DWARF symbol table in executables") } - dir, err := ioutil.TempDir("", "TestRuntimeTypesPresent") - if err != nil { - t.Fatalf("could not create directory: %v", err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() f := gobuild(t, dir, `package main; func main() { }`, NoOpt) defer f.Close() @@ -171,11 +168,7 @@ func main() { "main.Baz": {"Foo": true, "name": false}, } - dir, err := ioutil.TempDir("", "TestEmbeddedStructMarker") - if err != nil { - t.Fatalf("could not create directory: %v", err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() f := gobuild(t, dir, prog, NoOpt) @@ -255,11 +248,8 @@ func main() { y[0] = nil } ` - dir, err := ioutil.TempDir("", "TestSizes") - if err != nil { - t.Fatalf("could not create directory: %v", err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() + f := gobuild(t, dir, prog, NoOpt) defer f.Close() d, err := f.DWARF() @@ -303,11 +293,7 @@ func main() { c <- "foo" } ` - dir, err := ioutil.TempDir("", "TestFieldOverlap") - if err != nil { - t.Fatalf("could not create directory: %v", err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() f := gobuild(t, dir, prog, NoOpt) defer f.Close() @@ -351,13 +337,10 @@ func varDeclCoordsAndSubrogramDeclFile(t *testing.T, testpoint string, expectFil prog := fmt.Sprintf("package main\n%s\nfunc main() {\n\nvar i int\ni = i\n}\n", directive) - dir, err := ioutil.TempDir("", testpoint) - if err != nil { - t.Fatalf("could not create directory: %v", err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() f := gobuild(t, dir, prog, NoOpt) + defer f.Close() d, err := f.DWARF() if err != nil { @@ -628,8 +611,8 @@ func TestInlinedRoutineRecords(t *testing.T) { if runtime.GOOS == "plan9" { t.Skip("skipping on plan9; no DWARF symbol table in executables") } - if runtime.GOOS == "solaris" || runtime.GOOS == "illumos" || runtime.GOOS == "darwin" { - t.Skip("skipping on solaris, illumos, and darwin, pending resolution of issue #23168") + if runtime.GOOS == "solaris" || runtime.GOOS == "illumos" { + t.Skip("skipping on solaris, illumos, pending resolution of issue #23168") } t.Parallel() @@ -653,11 +636,7 @@ func main() { G = x } ` - dir, err := ioutil.TempDir("", "TestInlinedRoutineRecords") - if err != nil { - t.Fatalf("could not create directory: %v", err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() // Note: this is a build with "-l=4", as opposed to "-l -N". The // test is intended to verify DWARF that is only generated when @@ -665,6 +644,7 @@ func main() { // main.main, however, hence we build with "-gcflags=-l=4" as opposed // to "-gcflags=all=-l=4". f := gobuild(t, dir, prog, OptInl4) + defer f.Close() d, err := f.DWARF() if err != nil { @@ -788,14 +768,11 @@ func main() { func abstractOriginSanity(t *testing.T, pkgDir string, flags string) { t.Parallel() - dir, err := ioutil.TempDir("", "TestAbstractOriginSanity") - if err != nil { - t.Fatalf("could not create directory: %v", err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() // Build with inlining, to exercise DWARF inlining support. f := gobuildTestdata(t, dir, filepath.Join(pkgDir, "main"), flags) + defer f.Close() d, err := f.DWARF() if err != nil { @@ -871,8 +848,8 @@ func TestAbstractOriginSanity(t *testing.T) { if runtime.GOOS == "plan9" { t.Skip("skipping on plan9; no DWARF symbol table in executables") } - if runtime.GOOS == "solaris" || runtime.GOOS == "illumos" || runtime.GOOS == "darwin" { - t.Skip("skipping on solaris, illumos, and darwin, pending resolution of issue #23168") + if runtime.GOOS == "solaris" || runtime.GOOS == "illumos" { + t.Skip("skipping on solaris, illumos, pending resolution of issue #23168") } if wd, err := os.Getwd(); err == nil { @@ -889,11 +866,11 @@ func TestAbstractOriginSanityIssue25459(t *testing.T) { if runtime.GOOS == "plan9" { t.Skip("skipping on plan9; no DWARF symbol table in executables") } - if runtime.GOOS == "solaris" || runtime.GOOS == "illumos" || runtime.GOOS == "darwin" { - t.Skip("skipping on solaris, illumos, and darwin, pending resolution of issue #23168") + if runtime.GOOS == "solaris" || runtime.GOOS == "illumos" { + t.Skip("skipping on solaris, illumos, pending resolution of issue #23168") } - if runtime.GOARCH != "amd64" && runtime.GOARCH != "x86" { - t.Skip("skipping on not-amd64 not-x86; location lists not supported") + if runtime.GOARCH != "amd64" && runtime.GOARCH != "386" { + t.Skip("skipping on not-amd64 not-386; location lists not supported") } if wd, err := os.Getwd(); err == nil { @@ -910,8 +887,8 @@ func TestAbstractOriginSanityIssue26237(t *testing.T) { if runtime.GOOS == "plan9" { t.Skip("skipping on plan9; no DWARF symbol table in executables") } - if runtime.GOOS == "solaris" || runtime.GOOS == "illumos" || runtime.GOOS == "darwin" { - t.Skip("skipping on solaris, illumos, and darwin, pending resolution of issue #23168") + if runtime.GOOS == "solaris" || runtime.GOOS == "illumos" { + t.Skip("skipping on solaris, illumos, pending resolution of issue #23168") } if wd, err := os.Getwd(); err == nil { gopathdir := filepath.Join(wd, "testdata", "issue26237") @@ -973,13 +950,11 @@ func main() { print(p) } ` - dir, err := ioutil.TempDir("", "TestRuntimeType") - if err != nil { - t.Fatalf("could not create directory: %v", err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() f := gobuild(t, dir, prog, flags) + defer f.Close() + out, err := exec.Command(f.path).CombinedOutput() if err != nil { t.Fatalf("could not run test program: %v", err) @@ -1043,11 +1018,7 @@ func TestIssue27614(t *testing.T) { t.Parallel() - dir, err := ioutil.TempDir("", "go-build") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() const prog = `package main @@ -1161,11 +1132,7 @@ func TestStaticTmp(t *testing.T) { t.Parallel() - dir, err := ioutil.TempDir("", "go-build") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() const prog = `package main @@ -1243,11 +1210,7 @@ func TestPackageNameAttr(t *testing.T) { t.Parallel() - dir, err := ioutil.TempDir("", "go-build") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() const prog = "package main\nfunc main() {\nprintln(\"hello world\")\n}\n" @@ -1307,14 +1270,10 @@ func TestMachoIssue32233(t *testing.T) { t.Skip("skipping; test only interesting on darwin") } - tmpdir, err := ioutil.TempDir("", "TestMachoIssue32233") - if err != nil { - t.Fatalf("could not create directory: %v", err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() - wd, err2 := os.Getwd() - if err2 != nil { + wd, err := os.Getwd() + if err != nil { t.Fatalf("where am I? %v", err) } pdir := filepath.Join(wd, "testdata", "issue32233", "main") @@ -1328,11 +1287,7 @@ func TestWindowsIssue36495(t *testing.T) { t.Skip("skipping: test only on windows") } - dir, err := ioutil.TempDir("", "TestEmbeddedStructMarker") - if err != nil { - t.Fatalf("could not create directory: %v", err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() prog := ` package main @@ -1343,10 +1298,12 @@ func main() { fmt.Println("Hello World") }` f := gobuild(t, dir, prog, NoOpt) + defer f.Close() exe, err := pe.Open(f.path) if err != nil { t.Fatalf("error opening pe file: %v", err) } + defer exe.Close() dw, err := exe.DWARF() if err != nil { t.Fatalf("error parsing DWARF: %v", err) @@ -1397,17 +1354,14 @@ func TestIssue38192(t *testing.T) { // Build a test program that contains a translation unit whose // text (from am assembly source) contains only a single instruction. - tmpdir, err := ioutil.TempDir("", "TestIssue38192") - if err != nil { - t.Fatalf("could not create directory: %v", err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() wd, err := os.Getwd() if err != nil { t.Fatalf("where am I? %v", err) } pdir := filepath.Join(wd, "testdata", "issue38192") f := gobuildTestdata(t, tmpdir, pdir, DefaultOpt) + defer f.Close() // Open the resulting binary and examine the DWARF it contains. // Look for the function of interest ("main.singleInstruction") @@ -1520,17 +1474,15 @@ func TestIssue39757(t *testing.T) { // compiler/runtime in ways that aren't happening now, so this // might be something to check for if it does start failing. - tmpdir, err := ioutil.TempDir("", "TestIssue38192") - if err != nil { - t.Fatalf("could not create directory: %v", err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() + wd, err := os.Getwd() if err != nil { t.Fatalf("where am I? %v", err) } pdir := filepath.Join(wd, "testdata", "issue39757") f := gobuildTestdata(t, tmpdir, pdir, DefaultOpt) + defer f.Close() syms, err := f.Symbols() if err != nil { @@ -1619,3 +1571,183 @@ func TestIssue39757(t *testing.T) { } } } + +func TestIssue42484(t *testing.T) { + testenv.MustHaveGoBuild(t) + + if runtime.GOOS == "plan9" { + t.Skip("skipping on plan9; no DWARF symbol table in executables") + } + + t.Parallel() + + tmpdir, err := ioutil.TempDir("", "TestIssue42484") + if err != nil { + t.Fatalf("could not create directory: %v", err) + } + defer os.RemoveAll(tmpdir) + wd, err := os.Getwd() + if err != nil { + t.Fatalf("where am I? %v", err) + } + pdir := filepath.Join(wd, "testdata", "issue42484") + f := gobuildTestdata(t, tmpdir, pdir, NoOpt) + + var lastAddr uint64 + var lastFile string + var lastLine int + + dw, err := f.DWARF() + if err != nil { + t.Fatalf("error parsing DWARF: %v", err) + } + rdr := dw.Reader() + for { + e, err := rdr.Next() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + if e == nil { + break + } + if e.Tag != dwarf.TagCompileUnit { + continue + } + lnrdr, err := dw.LineReader(e) + if err != nil { + t.Fatalf("error creating DWARF line reader: %v", err) + } + if lnrdr != nil { + var lne dwarf.LineEntry + for { + err := lnrdr.Next(&lne) + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("error reading next DWARF line: %v", err) + } + if lne.EndSequence { + continue + } + if lne.Address == lastAddr && (lne.File.Name != lastFile || lne.Line != lastLine) { + t.Errorf("address %#x is assigned to both %s:%d and %s:%d", lastAddr, lastFile, lastLine, lne.File.Name, lne.Line) + } + lastAddr = lne.Address + lastFile = lne.File.Name + lastLine = lne.Line + } + } + rdr.SkipChildren() + } + f.Close() +} + +func TestOutputParamAbbrevAndAttr(t *testing.T) { + testenv.MustHaveGoBuild(t) + + if runtime.GOOS == "plan9" { + t.Skip("skipping on plan9; no DWARF symbol table in executables") + } + t.Parallel() + + // This test verifies that the compiler is selecting the correct + // DWARF abbreviation for output parameters, and that the + // variable parameter attribute is correct for in-params and + // out-params. + + const prog = ` +package main + +//go:noinline +func ABC(p1, p2, p3 int, f1, f2, f3 float32, b1 [1024]int) (r1 int, r2 int, r3 [1024]int, r4 byte) { + b1[0] = 6 + r1, r2, r3, r4 = p3, p2, b1, 'a' + return +} + +func main() { + a := [1024]int{} + v1, v2, v3, v4 := ABC(1, 2, 3, 1.0, 2.0, 1.0, a) + println(v1, v2, v3[0], v4) +} +` + dir := t.TempDir() + f := gobuild(t, dir, prog, NoOpt) + defer f.Close() + + d, err := f.DWARF() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + + rdr := d.Reader() + ex := examiner{} + if err := ex.populate(rdr); err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + + // Locate the main.ABC DIE + abcs := ex.Named("main.ABC") + if len(abcs) == 0 { + t.Fatalf("unable to locate DIE for main.ABC") + } + if len(abcs) != 1 { + t.Fatalf("more than one main.ABC DIE") + } + abcdie := abcs[0] + + // Vet the DIE + if abcdie.Tag != dwarf.TagSubprogram { + t.Fatalf("unexpected tag %v on main.ABC DIE", abcdie.Tag) + } + + // A setting of DW_AT_variable_parameter indicates that the + // param in question is an output parameter; we want to see this + // attribute set to TRUE for all Go return params. It would be + // OK to have it missing for input parameters, but for the moment + // we verify that the attr is present but set to false. + + // Values in this map: + // + // 0: + // -1: varparm attr not found + // 1: varparm found with value false + // 2: varparm found with value true + // + foundParams := make(map[string]int) + + // Walk ABCs's children looking for params. + abcIdx := ex.idxFromOffset(abcdie.Offset) + childDies := ex.Children(abcIdx) + for _, child := range childDies { + if child.Tag == dwarf.TagFormalParameter { + st := -1 + if vp, ok := child.Val(dwarf.AttrVarParam).(bool); ok { + if vp { + st = 2 + } else { + st = 1 + } + } + if name, ok := child.Val(dwarf.AttrName).(string); ok { + foundParams[name] = st + } + } + } + + // Digest the result. + found := make([]string, 0, len(foundParams)) + for k, v := range foundParams { + found = append(found, fmt.Sprintf("%s:%d", k, v)) + } + sort.Strings(found) + + // Make sure we see all of the expected params, that they have + // the varparam attr, and the varparm is set for the returns. + expected := "[b1:1 f1:1 f2:1 f3:1 p1:1 p2:1 p3:1 r1:2 r2:2 r3:2 r4:2]" + if fmt.Sprintf("%+v", found) != expected { + t.Errorf("param check failed, wanted %s got %s\n", + expected, found) + } +} diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go index 37b2dc640d4..87d88dd957d 100644 --- a/src/cmd/link/internal/ld/elf.go +++ b/src/cmd/link/internal/ld/elf.go @@ -14,6 +14,7 @@ import ( "encoding/binary" "encoding/hex" "fmt" + "internal/buildcfg" "path/filepath" "sort" "strings" @@ -519,6 +520,90 @@ func elfwriteinterp(out *OutBuf) int { return int(sh.Size) } +// member of .gnu.attributes of MIPS for fpAbi +const ( + // No floating point is present in the module (default) + MIPS_FPABI_NONE = 0 + // FP code in the module uses the FP32 ABI for a 32-bit ABI + MIPS_FPABI_ANY = 1 + // FP code in the module only uses single precision ABI + MIPS_FPABI_SINGLE = 2 + // FP code in the module uses soft-float ABI + MIPS_FPABI_SOFT = 3 + // FP code in the module assumes an FPU with FR=1 and has 12 + // callee-saved doubles. Historic, no longer supported. + MIPS_FPABI_HIST = 4 + // FP code in the module uses the FPXX ABI + MIPS_FPABI_FPXX = 5 + // FP code in the module uses the FP64 ABI + MIPS_FPABI_FP64 = 6 + // FP code in the module uses the FP64A ABI + MIPS_FPABI_FP64A = 7 +) + +func elfMipsAbiFlags(sh *ElfShdr, startva uint64, resoff uint64) int { + n := 24 + sh.Addr = startva + resoff - uint64(n) + sh.Off = resoff - uint64(n) + sh.Size = uint64(n) + sh.Type = uint32(elf.SHT_MIPS_ABIFLAGS) + sh.Flags = uint64(elf.SHF_ALLOC) + + return n +} + +//typedef struct +//{ +// /* Version of flags structure. */ +// uint16_t version; +// /* The level of the ISA: 1-5, 32, 64. */ +// uint8_t isa_level; +// /* The revision of ISA: 0 for MIPS V and below, 1-n otherwise. */ +// uint8_t isa_rev; +// /* The size of general purpose registers. */ +// uint8_t gpr_size; +// /* The size of co-processor 1 registers. */ +// uint8_t cpr1_size; +// /* The size of co-processor 2 registers. */ +// uint8_t cpr2_size; +// /* The floating-point ABI. */ +// uint8_t fp_abi; +// /* Processor-specific extension. */ +// uint32_t isa_ext; +// /* Mask of ASEs used. */ +// uint32_t ases; +// /* Mask of general flags. */ +// uint32_t flags1; +// uint32_t flags2; +//} Elf_Internal_ABIFlags_v0; +func elfWriteMipsAbiFlags(ctxt *Link) int { + sh := elfshname(".MIPS.abiflags") + ctxt.Out.SeekSet(int64(sh.Off)) + ctxt.Out.Write16(0) // version + ctxt.Out.Write8(32) // isaLevel + ctxt.Out.Write8(1) // isaRev + ctxt.Out.Write8(1) // gprSize + ctxt.Out.Write8(1) // cpr1Size + ctxt.Out.Write8(0) // cpr2Size + if buildcfg.GOMIPS == "softfloat" { + ctxt.Out.Write8(MIPS_FPABI_SOFT) // fpAbi + } else { + // Go cannot make sure non odd-number-fpr is used (ie, in load a double from memory). + // So, we mark the object is MIPS I style paired float/double register scheme, + // aka MIPS_FPABI_ANY. If we mark the object as FPXX, the kernel may use FR=1 mode, + // then we meet some problem. + // Note: MIPS_FPABI_ANY is bad naming: in fact it is MIPS I style FPR usage. + // It is not for 'ANY'. + // TODO: switch to FPXX after be sure that no odd-number-fpr is used. + ctxt.Out.Write8(MIPS_FPABI_ANY) // fpAbi + } + ctxt.Out.Write32(0) // isaExt + ctxt.Out.Write32(0) // ases + ctxt.Out.Write32(0) // flags1 + ctxt.Out.Write32(0) // flags2 + return int(sh.Size) +} + func elfnote(sh *ElfShdr, startva uint64, resoff uint64, sz int) int { n := 3*4 + uint64(sz) + resoff%4 @@ -1204,6 +1289,10 @@ func (ctxt *Link) doelf() { shstrtab.Addstring(".noptrbss") shstrtab.Addstring("__libfuzzer_extra_counters") shstrtab.Addstring(".go.buildinfo") + if ctxt.IsMIPS() { + shstrtab.Addstring(".MIPS.abiflags") + shstrtab.Addstring(".gnu.attributes") + } // generate .tbss section for dynamic internal linker or external // linking, so that various binutils could correctly calculate @@ -1254,6 +1343,10 @@ func (ctxt *Link) doelf() { shstrtab.Addstring(elfRelType + ".data.rel.ro") } shstrtab.Addstring(elfRelType + ".go.buildinfo") + if ctxt.IsMIPS() { + shstrtab.Addstring(elfRelType + ".MIPS.abiflags") + shstrtab.Addstring(elfRelType + ".gnu.attributes") + } // add a .note.GNU-stack section to mark the stack as non-executable shstrtab.Addstring(".note.GNU-stack") @@ -1445,6 +1538,35 @@ func (ctxt *Link) doelf() { if ctxt.LinkMode == LinkExternal && *flagBuildid != "" { addgonote(ctxt, ".note.go.buildid", ELF_NOTE_GOBUILDID_TAG, []byte(*flagBuildid)) } + + //type mipsGnuAttributes struct { + // version uint8 // 'A' + // length uint32 // 15 including itself + // gnu [4]byte // "gnu\0" + // tag uint8 // 1:file, 2: section, 3: symbol, 1 here + // taglen uint32 // tag length, including tag, 7 here + // tagfp uint8 // 4 + // fpAbi uint8 // see .MIPS.abiflags + //} + if ctxt.IsMIPS() { + gnuattributes := ldr.CreateSymForUpdate(".gnu.attributes", 0) + gnuattributes.SetType(sym.SELFROSECT) + gnuattributes.SetReachable(true) + gnuattributes.AddUint8('A') // version 'A' + gnuattributes.AddUint32(ctxt.Arch, 15) // length 15 including itself + gnuattributes.AddBytes([]byte("gnu\x00")) // "gnu\0" + gnuattributes.AddUint8(1) // 1:file, 2: section, 3: symbol, 1 here + gnuattributes.AddUint32(ctxt.Arch, 7) // tag length, including tag, 7 here + gnuattributes.AddUint8(4) // 4 for FP, 8 for MSA + if buildcfg.GOMIPS == "softfloat" { + gnuattributes.AddUint8(MIPS_FPABI_SOFT) + } else { + // Note: MIPS_FPABI_ANY is bad naming: in fact it is MIPS I style FPR usage. + // It is not for 'ANY'. + // TODO: switch to FPXX after be sure that no odd-number-fpr is used. + gnuattributes.AddUint8(MIPS_FPABI_ANY) + } + } } // Do not write DT_NULL. elfdynhash will finish it. @@ -1622,14 +1744,14 @@ func asmbElf(ctxt *Link) { sh.Flags = uint64(elf.SHF_ALLOC) sh.Addralign = 1 - if interpreter == "" && objabi.GO_LDSO != "" { - interpreter = objabi.GO_LDSO + if interpreter == "" && buildcfg.GO_LDSO != "" { + interpreter = buildcfg.GO_LDSO } if interpreter == "" { switch ctxt.HeadType { case objabi.Hlinux: - if objabi.GOOS == "android" { + if buildcfg.GOOS == "android" { interpreter = thearch.Androiddynld if interpreter == "" { Exitf("ELF interpreter not set") @@ -1910,6 +2032,25 @@ elfobj: shsym(sh, ldr, ldr.Lookup(".shstrtab", 0)) eh.Shstrndx = uint16(sh.shnum) + if ctxt.IsMIPS() { + sh = elfshname(".MIPS.abiflags") + sh.Type = uint32(elf.SHT_MIPS_ABIFLAGS) + sh.Flags = uint64(elf.SHF_ALLOC) + sh.Addralign = 8 + resoff -= int64(elfMipsAbiFlags(sh, uint64(startva), uint64(resoff))) + + ph := newElfPhdr() + ph.Type = elf.PT_MIPS_ABIFLAGS + ph.Flags = elf.PF_R + phsh(ph, sh) + + sh = elfshname(".gnu.attributes") + sh.Type = uint32(elf.SHT_GNU_ATTRIBUTES) + sh.Addralign = 1 + ldr := ctxt.loader + shsym(sh, ldr, ldr.Lookup(".gnu.attributes", 0)) + } + // put these sections early in the list if !*FlagS { elfshname(".symtab") @@ -2029,6 +2170,10 @@ elfobj: if !*FlagD { a += int64(elfwriteinterp(ctxt.Out)) } + if ctxt.IsMIPS() { + a += int64(elfWriteMipsAbiFlags(ctxt)) + } + if ctxt.LinkMode != LinkExternal { if ctxt.HeadType == objabi.Hnetbsd { a += int64(elfwritenetbsdsig(ctxt.Out)) @@ -2050,6 +2195,12 @@ elfobj: if a > elfreserve { Errorf(nil, "ELFRESERVE too small: %d > %d with %d text sections", a, elfreserve, numtext) } + + // Verify the amount of space allocated for the elf header is sufficient. The file offsets are + // already computed in layout, so we could spill into another section. + if a > int64(HEADR) { + Errorf(nil, "HEADR too small: %d > %d with %d text sections", a, HEADR, numtext) + } } func elfadddynsym(ldr *loader.Loader, target *Target, syms *ArchSyms, s loader.Sym) { diff --git a/src/cmd/link/internal/ld/elf_test.go b/src/cmd/link/internal/ld/elf_test.go index 776fc1b4f97..d86ebb89e04 100644 --- a/src/cmd/link/internal/ld/elf_test.go +++ b/src/cmd/link/internal/ld/elf_test.go @@ -1,3 +1,4 @@ +//go:build cgo // +build cgo // Copyright 2019 The Go Authors. All rights reserved. @@ -21,11 +22,7 @@ import ( func TestDynSymShInfo(t *testing.T) { t.Parallel() testenv.MustHaveGoBuild(t) - dir, err := ioutil.TempDir("", "go-build-issue33358") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() const prog = ` package main @@ -51,6 +48,7 @@ func main() { if err != nil { t.Fatalf("failed to open built file: %v", err) } + defer fi.Close() elfFile, err := elf.NewFile(fi) if err != nil { @@ -95,11 +93,7 @@ func TestNoDuplicateNeededEntries(t *testing.T) { t.Parallel() - dir, err := ioutil.TempDir("", "no-dup-needed") - if err != nil { - t.Fatalf("Failed to create temp dir: %v", err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() wd, err := os.Getwd() if err != nil { diff --git a/src/cmd/link/internal/ld/execarchive.go b/src/cmd/link/internal/ld/execarchive.go index 4687c624de4..918b86cdc5b 100644 --- a/src/cmd/link/internal/ld/execarchive.go +++ b/src/cmd/link/internal/ld/execarchive.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !wasm && !windows // +build !wasm,!windows package ld diff --git a/src/cmd/link/internal/ld/execarchive_noexec.go b/src/cmd/link/internal/ld/execarchive_noexec.go index a70dea9fda3..5e1f2669d3c 100644 --- a/src/cmd/link/internal/ld/execarchive_noexec.go +++ b/src/cmd/link/internal/ld/execarchive_noexec.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build wasm || windows // +build wasm windows package ld diff --git a/src/cmd/link/internal/ld/fallocate_test.go b/src/cmd/link/internal/ld/fallocate_test.go index 244b70f0615..1ed0eb2ca74 100644 --- a/src/cmd/link/internal/ld/fallocate_test.go +++ b/src/cmd/link/internal/ld/fallocate_test.go @@ -2,12 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin || linux // +build darwin linux package ld import ( - "io/ioutil" "os" "path/filepath" "syscall" @@ -15,14 +15,10 @@ import ( ) func TestFallocate(t *testing.T) { - dir, err := ioutil.TempDir("", "TestFallocate") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() filename := filepath.Join(dir, "a.out") out := NewOutBuf(nil) - err = out.Open(filename) + err := out.Open(filename) if err != nil { t.Fatalf("Open file failed: %v", err) } diff --git a/src/cmd/link/internal/ld/go.go b/src/cmd/link/internal/ld/go.go index fbc7a78d0ed..fc63b30c80f 100644 --- a/src/cmd/link/internal/ld/go.go +++ b/src/cmd/link/internal/ld/go.go @@ -9,6 +9,7 @@ package ld import ( "bytes" "cmd/internal/bio" + "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/sys" "cmd/link/internal/loader" @@ -101,36 +102,13 @@ func loadcgo(ctxt *Link, file string, pkg string, p string) { return } - // Find cgo_export symbols. They are roots in the deadcode pass. - for _, f := range directives { - switch f[0] { - case "cgo_export_static", "cgo_export_dynamic": - if len(f) < 2 || len(f) > 3 { - continue - } - local := f[1] - switch ctxt.BuildMode { - case BuildModeCShared, BuildModeCArchive, BuildModePlugin: - if local == "main" { - continue - } - } - local = expandpkg(local, pkg) - if f[0] == "cgo_export_static" { - ctxt.cgo_export_static[local] = true - } else { - ctxt.cgo_export_dynamic[local] = true - } - } - } - // Record the directives. We'll process them later after Symbols are created. ctxt.cgodata = append(ctxt.cgodata, cgodata{file, pkg, directives}) } // Set symbol attributes or flags based on cgo directives. // Any newly discovered HOSTOBJ syms are added to 'hostObjSyms'. -func setCgoAttr(ctxt *Link, lookup func(string, int) loader.Sym, file string, pkg string, directives [][]string, hostObjSyms map[loader.Sym]struct{}) { +func setCgoAttr(ctxt *Link, file string, pkg string, directives [][]string, hostObjSyms map[loader.Sym]struct{}) { l := ctxt.loader for _, f := range directives { switch f[0] { @@ -173,7 +151,7 @@ func setCgoAttr(ctxt *Link, lookup func(string, int) loader.Sym, file string, pk if i := strings.Index(remote, "#"); i >= 0 { remote, q = remote[:i], remote[i+1:] } - s := lookup(local, 0) + s := l.LookupOrCreateSym(local, 0) st := l.SymType(s) if st == 0 || st == sym.SXREF || st == sym.SBSS || st == sym.SNOPTRBSS || st == sym.SHOSTOBJ { l.SetSymDynimplib(s, lib) @@ -199,7 +177,7 @@ func setCgoAttr(ctxt *Link, lookup func(string, int) loader.Sym, file string, pk } local := f[1] - s := lookup(local, 0) + s := l.LookupOrCreateSym(local, 0) su := l.MakeSymbolUpdater(s) su.SetType(sym.SHOSTOBJ) su.SetSize(0) @@ -207,7 +185,7 @@ func setCgoAttr(ctxt *Link, lookup func(string, int) loader.Sym, file string, pk continue case "cgo_export_static", "cgo_export_dynamic": - if len(f) < 2 || len(f) > 3 { + if len(f) < 2 || len(f) > 4 { break } local := f[1] @@ -216,13 +194,20 @@ func setCgoAttr(ctxt *Link, lookup func(string, int) loader.Sym, file string, pk remote = f[2] } local = expandpkg(local, pkg) + // The compiler adds a fourth argument giving + // the definition ABI of function symbols. + abi := obj.ABI0 + if len(f) > 3 { + var ok bool + abi, ok = obj.ParseABI(f[3]) + if !ok { + fmt.Fprintf(os.Stderr, "%s: bad ABI in cgo_export directive %s\n", os.Args[0], f) + nerrors++ + return + } + } - // The compiler arranges for an ABI0 wrapper - // to be available for all cgo-exported - // functions. Link.loadlib will resolve any - // ABI aliases we find here (since we may not - // yet know it's an alias). - s := lookup(local, 0) + s := l.LookupOrCreateSym(local, sym.ABIToVersion(abi)) if l.SymType(s) == sym.SHOSTOBJ { hostObjSyms[s] = struct{}{} @@ -230,7 +215,7 @@ func setCgoAttr(ctxt *Link, lookup func(string, int) loader.Sym, file string, pk switch ctxt.BuildMode { case BuildModeCShared, BuildModeCArchive, BuildModePlugin: - if s == lookup("main", 0) { + if s == l.Lookup("main", 0) { continue } } @@ -254,11 +239,32 @@ func setCgoAttr(ctxt *Link, lookup func(string, int) loader.Sym, file string, pk return } + // Mark exported symbols and also add them to + // the lists used for roots in the deadcode pass. if f[0] == "cgo_export_static" { + if ctxt.LinkMode == LinkExternal && !l.AttrCgoExportStatic(s) { + // Static cgo exports appear + // in the exported symbol table. + ctxt.dynexp = append(ctxt.dynexp, s) + } + if ctxt.LinkMode == LinkInternal { + // For internal linking, we're + // responsible for resolving + // relocations from host objects. + // Record the right Go symbol + // version to use. + l.AddCgoExport(s) + } l.SetAttrCgoExportStatic(s, true) } else { + if ctxt.LinkMode == LinkInternal && !l.AttrCgoExportDynamic(s) { + // Dynamic cgo exports appear + // in the exported symbol table. + ctxt.dynexp = append(ctxt.dynexp, s) + } l.SetAttrCgoExportDynamic(s, true) } + continue case "cgo_dynamic_linker": @@ -440,9 +446,16 @@ func (ctxt *Link) addexport() { return } - for _, exp := range ctxt.dynexp { - Adddynsym(ctxt.loader, &ctxt.Target, &ctxt.ArchSyms, exp) + // Add dynamic symbols. + for _, s := range ctxt.dynexp { + // Consistency check. + if !ctxt.loader.AttrReachable(s) { + panic("dynexp entry not reachable") + } + + Adddynsym(ctxt.loader, &ctxt.Target, &ctxt.ArchSyms, s) } + for _, lib := range dedupLibraries(ctxt, dynlib) { adddynlib(ctxt, lib) } diff --git a/src/cmd/link/internal/ld/go_test.go b/src/cmd/link/internal/ld/go_test.go index 0197196023b..230f85a0e5f 100644 --- a/src/cmd/link/internal/ld/go_test.go +++ b/src/cmd/link/internal/ld/go_test.go @@ -8,7 +8,6 @@ import ( "cmd/internal/objabi" "internal/testenv" "io/ioutil" - "os" "os/exec" "path/filepath" "reflect" @@ -86,11 +85,7 @@ func TestDedupLibrariesOpenBSDLink(t *testing.T) { testenv.MustHaveCGO(t) t.Parallel() - dir, err := ioutil.TempDir("", "dedup-build") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() // cgo_import_dynamic both the unversioned libraries and pull in the // net package to get a cgo package with a versioned library. diff --git a/src/cmd/link/internal/ld/issue33808_test.go b/src/cmd/link/internal/ld/issue33808_test.go index 92a47faa4a0..43f4540a022 100644 --- a/src/cmd/link/internal/ld/issue33808_test.go +++ b/src/cmd/link/internal/ld/issue33808_test.go @@ -6,8 +6,6 @@ package ld import ( "internal/testenv" - "io/ioutil" - "os" "runtime" "strings" "testing" @@ -31,11 +29,7 @@ func TestIssue33808(t *testing.T) { testenv.MustHaveCGO(t) t.Parallel() - dir, err := ioutil.TempDir("", "TestIssue33808") - if err != nil { - t.Fatalf("could not create directory: %v", err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() f := gobuild(t, dir, prog, "-ldflags=-linkmode=external") f.Close() diff --git a/src/cmd/link/internal/ld/ld_test.go b/src/cmd/link/internal/ld/ld_test.go index cdfaadb17de..ca764632c3d 100644 --- a/src/cmd/link/internal/ld/ld_test.go +++ b/src/cmd/link/internal/ld/ld_test.go @@ -9,7 +9,6 @@ import ( "fmt" "internal/testenv" "io/ioutil" - "os" "os/exec" "path/filepath" "runtime" @@ -25,11 +24,6 @@ func TestUndefinedRelocErrors(t *testing.T) { testenv.MustInternalLink(t) t.Parallel() - dir, err := ioutil.TempDir("", "go-build") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) out, err := exec.Command(testenv.GoToolPath(t), "build", "./testdata/issue10978").CombinedOutput() if err == nil { @@ -108,11 +102,7 @@ func TestArchiveBuildInvokeWithExec(t *testing.T) { case "openbsd", "windows": t.Skip("c-archive unsupported") } - dir, err := ioutil.TempDir("", "go-build") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() srcfile := filepath.Join(dir, "test.go") arfile := filepath.Join(dir, "test.a") @@ -141,34 +131,36 @@ func TestArchiveBuildInvokeWithExec(t *testing.T) { } } -func TestPPC64LargeTextSectionSplitting(t *testing.T) { - // The behavior we're checking for is of interest only on ppc64. - if !strings.HasPrefix(runtime.GOARCH, "ppc64") { - t.Skip("test useful only for ppc64") +func TestLargeTextSectionSplitting(t *testing.T) { + switch runtime.GOARCH { + case "ppc64", "ppc64le": + case "arm64": + if runtime.GOOS == "darwin" { + break + } + fallthrough + default: + t.Skipf("text section splitting is not done in %s/%s", runtime.GOOS, runtime.GOARCH) } testenv.MustHaveGoBuild(t) testenv.MustHaveCGO(t) t.Parallel() - dir, err := ioutil.TempDir("", "go-build") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() - // NB: the use of -ldflags=-debugppc64textsize=1048576 tells the linker to + // NB: the use of -ldflags=-debugtextsize=1048576 tells the linker to // split text sections at a size threshold of 1M instead of the - // architected limit of 67M. The choice of building cmd/go is - // arbitrary; we just need something sufficiently large that uses + // architected limit of 67M or larger. The choice of building cmd/go + // is arbitrary; we just need something sufficiently large that uses // external linking. exe := filepath.Join(dir, "go.exe") - out, eerr := exec.Command(testenv.GoToolPath(t), "build", "-o", exe, "-ldflags=-linkmode=external -debugppc64textsize=1048576", "cmd/go").CombinedOutput() + out, eerr := exec.Command(testenv.GoToolPath(t), "build", "-o", exe, "-ldflags=-linkmode=external -debugtextsize=1048576", "cmd/go").CombinedOutput() if eerr != nil { t.Fatalf("build failure: %s\n%s\n", eerr, string(out)) } // Result should be runnable. - _, err = exec.Command(exe, "version").CombinedOutput() + _, err := exec.Command(exe, "version").CombinedOutput() if err != nil { t.Fatal(err) } @@ -194,11 +186,7 @@ func testWindowsBuildmodeCSharedASLR(t *testing.T, useASLR bool) { t.Parallel() testenv.MustHaveGoBuild(t) - dir, err := ioutil.TempDir("", "go-build") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() srcfile := filepath.Join(dir, "test.go") objfile := filepath.Join(dir, "test.dll") @@ -242,3 +230,103 @@ func testWindowsBuildmodeCSharedASLR(t *testing.T, useASLR bool) { t.Error("IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE flag should not be set") } } + +// TestMemProfileCheck tests that cmd/link sets +// runtime.disableMemoryProfiling if the runtime.MemProfile +// symbol is unreachable after deadcode (and not dynlinking). +// The runtime then uses that to set the default value of +// runtime.MemProfileRate, which this test checks. +func TestMemProfileCheck(t *testing.T) { + testenv.MustHaveGoBuild(t) + t.Parallel() + + tests := []struct { + name string + prog string + wantOut string + }{ + { + "no_memprofile", + ` +package main +import "runtime" +func main() { + println(runtime.MemProfileRate) +} +`, + "0", + }, + { + "with_memprofile", + ` +package main +import "runtime" +func main() { + runtime.MemProfile(nil, false) + println(runtime.MemProfileRate) +} +`, + "524288", + }, + { + "with_memprofile_indirect", + ` +package main +import "runtime" +var f = runtime.MemProfile +func main() { + if f == nil { + panic("no f") + } + println(runtime.MemProfileRate) +} +`, + "524288", + }, + { + "with_memprofile_runtime_pprof", + ` +package main +import "runtime" +import "runtime/pprof" +func main() { + _ = pprof.Profiles() + println(runtime.MemProfileRate) +} +`, + "524288", + }, + { + "with_memprofile_http_pprof", + ` +package main +import "runtime" +import _ "net/http/pprof" +func main() { + println(runtime.MemProfileRate) +} +`, + "524288", + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + tempDir := t.TempDir() + src := filepath.Join(tempDir, "x.go") + if err := ioutil.WriteFile(src, []byte(tt.prog), 0644); err != nil { + t.Fatal(err) + } + cmd := exec.Command(testenv.GoToolPath(t), "run", src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + got := strings.TrimSpace(string(out)) + if got != tt.wantOut { + t.Errorf("got %q; want %q", got, tt.wantOut) + } + }) + } +} diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 314896824a0..e8f001ba8ec 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -49,6 +49,7 @@ import ( "encoding/base64" "encoding/binary" "fmt" + "internal/buildcfg" exec "internal/execabs" "io" "io/ioutil" @@ -56,7 +57,6 @@ import ( "os" "path/filepath" "runtime" - "sort" "strings" "sync" ) @@ -118,6 +118,8 @@ type ArchSyms struct { Dynamic loader.Sym DynSym loader.Sym DynStr loader.Sym + + unreachableMethod loader.Sym } // mkArchSym is a helper for setArchSyms, to set up a special symbol. @@ -142,6 +144,7 @@ func (ctxt *Link) setArchSyms() { ctxt.mkArchSym(".dynamic", 0, &ctxt.Dynamic) ctxt.mkArchSym(".dynsym", 0, &ctxt.DynSym) ctxt.mkArchSym(".dynstr", 0, &ctxt.DynStr) + ctxt.mkArchSym("runtime.unreachableMethod", sym.SymVerABIInternal, &ctxt.unreachableMethod) if ctxt.IsPPC64() { ctxt.mkArchSym("TOC", 0, &ctxt.TOC) @@ -174,11 +177,18 @@ func (ctxt *Link) setArchSyms() { } type Arch struct { - Funcalign int - Maxalign int - Minalign int - Dwarfregsp int - Dwarfreglr int + Funcalign int + Maxalign int + Minalign int + Dwarfregsp int + Dwarfreglr int + + // Threshold of total text size, used for trampoline insertion. If the total + // text size is smaller than TrampLimit, we won't need to insert trampolines. + // It is pretty close to the offset range of a direct CALL machine instruction. + // We leave some room for extra stuff like PLT stubs. + TrampLimit uint64 + Androiddynld string Linuxdynld string Freebsddynld string @@ -193,9 +203,6 @@ type Arch struct { // are padded with zeros. CodePad []byte - // Set to true to write all text blocks in with CodeBlkWrite - WriteTextBlocks bool - // Plan 9 variables. Plan9Magic uint32 Plan9_64Bit bool @@ -223,7 +230,7 @@ type Arch struct { // to-be-relocated data item (from sym.P). Return is an updated // offset value. Archrelocvariant func(target *Target, ldr *loader.Loader, rel loader.Reloc, - rv sym.RelocVariant, sym loader.Sym, offset int64) (relocatedOffset int64) + rv sym.RelocVariant, sym loader.Sym, offset int64, data []byte) (relocatedOffset int64) // Generate a trampoline for a call from s to rs if necessary. ri is // index of the relocation. @@ -377,7 +384,7 @@ func libinit(ctxt *Link) { suffix = "msan" } - Lflag(ctxt, filepath.Join(objabi.GOROOT, "pkg", fmt.Sprintf("%s_%s%s%s", objabi.GOOS, objabi.GOARCH, suffixsep, suffix))) + Lflag(ctxt, filepath.Join(buildcfg.GOROOT, "pkg", fmt.Sprintf("%s_%s%s%s", buildcfg.GOOS, buildcfg.GOARCH, suffixsep, suffix))) mayberemoveoutfile() @@ -388,9 +395,9 @@ func libinit(ctxt *Link) { if *flagEntrySymbol == "" { switch ctxt.BuildMode { case BuildModeCShared, BuildModeCArchive: - *flagEntrySymbol = fmt.Sprintf("_rt0_%s_%s_lib", objabi.GOARCH, objabi.GOOS) + *flagEntrySymbol = fmt.Sprintf("_rt0_%s_%s_lib", buildcfg.GOARCH, buildcfg.GOOS) case BuildModeExe, BuildModePIE: - *flagEntrySymbol = fmt.Sprintf("_rt0_%s_%s", objabi.GOARCH, objabi.GOOS) + *flagEntrySymbol = fmt.Sprintf("_rt0_%s_%s", buildcfg.GOARCH, buildcfg.GOOS) case BuildModeShared, BuildModePlugin: // No *flagEntrySymbol for -buildmode=shared and plugin default: @@ -493,10 +500,8 @@ func (ctxt *Link) loadlib() { default: log.Fatalf("invalid -strictdups flag value %d", *FlagStrictDups) } - if !*flagAbiWrap || ctxt.linkShared { + if !buildcfg.Experiment.RegabiWrappers { // Use ABI aliases if ABI wrappers are not used. - // TODO: for now we still use ABI aliases in shared linkage, even if - // the wrapper is enabled. flags |= loader.FlagUseABIAlias } elfsetstring1 := func(str string, off int) { elfsetstring(ctxt, 0, str, off) } @@ -505,9 +510,6 @@ func (ctxt *Link) loadlib() { return ctxt.loader.SymName(s) } - ctxt.cgo_export_static = make(map[string]bool) - ctxt.cgo_export_dynamic = make(map[string]bool) - // ctxt.Library grows during the loop, so not a range loop. i := 0 for ; i < len(ctxt.Library); i++ { @@ -539,12 +541,15 @@ func (ctxt *Link) loadlib() { // up symbol by name may not get expected result. iscgo = ctxt.LibraryByPkg["runtime/cgo"] != nil - ctxt.canUsePlugins = ctxt.LibraryByPkg["plugin"] != nil + + // Plugins a require cgo support to function. Similarly, plugins may require additional + // internal linker support on some platforms which may not be implemented. + ctxt.canUsePlugins = ctxt.LibraryByPkg["plugin"] != nil && iscgo // We now have enough information to determine the link mode. determineLinkMode(ctxt) - if ctxt.LinkMode == LinkExternal && !iscgo && !(objabi.GOOS == "darwin" && ctxt.BuildMode != BuildModePlugin && ctxt.Arch.Family == sys.AMD64) { + if ctxt.LinkMode == LinkExternal && !iscgo && !(buildcfg.GOOS == "darwin" && ctxt.BuildMode != BuildModePlugin && ctxt.Arch.Family == sys.AMD64) { // This indicates a user requested -linkmode=external. // The startup code uses an import of runtime/cgo to decide // whether to initialize the TLS. So give it one. This could @@ -553,7 +558,12 @@ func (ctxt *Link) loadlib() { if ctxt.BuildMode == BuildModeShared || ctxt.linkShared { Exitf("cannot implicitly include runtime/cgo in a shared library") } - loadobjfile(ctxt, lib) + for ; i < len(ctxt.Library); i++ { + lib := ctxt.Library[i] + if lib.Shlib == "" { + loadobjfile(ctxt, lib) + } + } } } @@ -595,9 +605,6 @@ func (ctxt *Link) loadlib() { // errors - see if we can find libcompiler_rt.a instead. *flagLibGCC = ctxt.findLibPathCmd("--print-file-name=libcompiler_rt.a", "libcompiler_rt") } - if *flagLibGCC != "none" { - hostArchive(ctxt, *flagLibGCC) - } if ctxt.HeadType == objabi.Hwindows { if p := ctxt.findLibPath("libmingwex.a"); p != "none" { hostArchive(ctxt, p) @@ -619,6 +626,9 @@ func (ctxt *Link) loadlib() { libmsvcrt.a libm.a */ } + if *flagLibGCC != "none" { + hostArchive(ctxt, *flagLibGCC) + } } } @@ -630,50 +640,13 @@ func (ctxt *Link) loadlib() { strictDupMsgCount = ctxt.loader.NStrictDupMsgs() } -// setupdynexp constructs ctxt.dynexp, a list of loader.Sym. -func setupdynexp(ctxt *Link) { - dynexpMap := ctxt.cgo_export_dynamic - if ctxt.LinkMode == LinkExternal { - dynexpMap = ctxt.cgo_export_static - } - d := make([]loader.Sym, 0, len(dynexpMap)) - for exp := range dynexpMap { - s := ctxt.loader.LookupOrCreateSym(exp, 0) - d = append(d, s) - // sanity check - if !ctxt.loader.AttrReachable(s) { - panic("dynexp entry not reachable") - } - } - sort.Slice(d, func(i, j int) bool { - return ctxt.loader.SymName(d[i]) < ctxt.loader.SymName(d[j]) - }) - - // Resolve ABI aliases in the list of cgo-exported functions. - // This is necessary because we load the ABI0 symbol for all - // cgo exports. - for i, s := range d { - if ctxt.loader.SymType(s) != sym.SABIALIAS { - continue - } - t := ctxt.loader.ResolveABIAlias(s) - ctxt.loader.CopyAttributes(s, t) - ctxt.loader.SetSymExtname(t, ctxt.loader.SymExtname(s)) - d[i] = t - } - ctxt.dynexp = d - - ctxt.cgo_export_static = nil - ctxt.cgo_export_dynamic = nil -} - // loadcgodirectives reads the previously discovered cgo directives, creating // symbols in preparation for host object loading or use later in the link. func (ctxt *Link) loadcgodirectives() { l := ctxt.loader hostObjSyms := make(map[loader.Sym]struct{}) for _, d := range ctxt.cgodata { - setCgoAttr(ctxt, ctxt.loader.LookupOrCreateSym, d.file, d.pkg, d.directives, hostObjSyms) + setCgoAttr(ctxt, d.file, d.pkg, d.directives, hostObjSyms) } ctxt.cgodata = nil @@ -738,7 +711,7 @@ func (ctxt *Link) linksetup() { } } - if ctxt.LinkMode == LinkExternal && ctxt.Arch.Family == sys.PPC64 && objabi.GOOS != "aix" { + if ctxt.LinkMode == LinkExternal && ctxt.Arch.Family == sys.PPC64 && buildcfg.GOOS != "aix" { toc := ctxt.loader.LookupOrCreateSym(".TOC.", 0) sb := ctxt.loader.MakeSymbolUpdater(toc) sb.SetType(sym.SDYNIMPORT) @@ -747,7 +720,7 @@ func (ctxt *Link) linksetup() { // The Android Q linker started to complain about underalignment of the our TLS // section. We don't actually use the section on android, so don't // generate it. - if objabi.GOOS != "android" { + if buildcfg.GOOS != "android" { tlsg := ctxt.loader.LookupOrCreateSym("runtime.tlsg", 0) sb := ctxt.loader.MakeSymbolUpdater(tlsg) @@ -788,7 +761,19 @@ func (ctxt *Link) linksetup() { sb := ctxt.loader.MakeSymbolUpdater(goarm) sb.SetType(sym.SDATA) sb.SetSize(0) - sb.AddUint8(uint8(objabi.GOARM)) + sb.AddUint8(uint8(buildcfg.GOARM)) + } + + // Set runtime.disableMemoryProfiling bool if + // runtime.MemProfile is not retained in the binary after + // deadcode (and we're not dynamically linking). + memProfile := ctxt.loader.Lookup("runtime.MemProfile", sym.SymVerABIInternal) + if memProfile != 0 && !ctxt.loader.AttrReachable(memProfile) && !ctxt.DynlinkingGo() { + memProfSym := ctxt.loader.LookupOrCreateSym("runtime.disableMemoryProfiling", 0) + sb := ctxt.loader.MakeSymbolUpdater(memProfSym) + sb.SetType(sym.SDATA) + sb.SetSize(0) + sb.AddUint8(1) // true bool } } else { // If OTOH the module does not contain the runtime package, @@ -1269,7 +1254,7 @@ func (ctxt *Link) hostlink() { // -headerpad is incompatible with -fembed-bitcode. argv = append(argv, "-Wl,-headerpad,1144") } - if ctxt.DynlinkingGo() && objabi.GOOS != "ios" { + if ctxt.DynlinkingGo() && buildcfg.GOOS != "ios" { // -flat_namespace is deprecated on iOS. // It is useful for supporting plugins. We don't support plugins on iOS. argv = append(argv, "-Wl,-flat_namespace") @@ -1343,8 +1328,6 @@ func (ctxt *Link) hostlink() { if ctxt.HeadType == objabi.Hdarwin { argv = append(argv, "-dynamiclib") } else { - // ELF. - argv = append(argv, "-Wl,-Bsymbolic") if ctxt.UseRelro() { argv = append(argv, "-Wl,-z,relro") } @@ -1357,6 +1340,8 @@ func (ctxt *Link) hostlink() { // Pass -z nodelete to mark the shared library as // non-closeable: a dlclose will do nothing. argv = append(argv, "-Wl,-z,nodelete") + // Only pass Bsymbolic on non-Windows. + argv = append(argv, "-Wl,-Bsymbolic") } } case BuildModeShared: @@ -1387,12 +1372,12 @@ func (ctxt *Link) hostlink() { // from the beginning of the section (like sym.STYPE). argv = append(argv, "-Wl,-znocopyreloc") - if objabi.GOOS == "android" { + if buildcfg.GOOS == "android" { // Use lld to avoid errors from default linker (issue #38838) altLinker = "lld" } - if ctxt.Arch.InFamily(sys.ARM, sys.ARM64) && objabi.GOOS == "linux" { + if ctxt.Arch.InFamily(sys.ARM, sys.ARM64) && buildcfg.GOOS == "linux" { // On ARM, the GNU linker will generate COPY relocations // even with -znocopyreloc set. // https://sourceware.org/bugzilla/show_bug.cgi?id=19962 @@ -1414,7 +1399,7 @@ func (ctxt *Link) hostlink() { } } } - if ctxt.Arch.Family == sys.ARM64 && objabi.GOOS == "freebsd" { + if ctxt.Arch.Family == sys.ARM64 && buildcfg.GOOS == "freebsd" { // Switch to ld.bfd on freebsd/arm64. altLinker = "bfd" @@ -1441,7 +1426,7 @@ func (ctxt *Link) hostlink() { // only want to do this when producing a Windows output file // on a Windows host. outopt := *flagOutfile - if objabi.GOOS == "windows" && runtime.GOOS == "windows" && filepath.Ext(outopt) == "" { + if buildcfg.GOOS == "windows" && runtime.GOOS == "windows" && filepath.Ext(outopt) == "" { outopt += "." } argv = append(argv, "-o") @@ -1451,6 +1436,14 @@ func (ctxt *Link) hostlink() { argv = append(argv, fmt.Sprintf("-Wl,-rpath,%s", rpath.val)) } + if *flagInterpreter != "" { + // Many linkers support both -I and the --dynamic-linker flags + // to set the ELF interpreter, but lld only supports + // --dynamic-linker so prefer that (ld on very old Solaris only + // supports -I but that seems less important). + argv = append(argv, fmt.Sprintf("-Wl,--dynamic-linker,%s", *flagInterpreter)) + } + // Force global symbols to be exported for dlopen, etc. if ctxt.IsELF { argv = append(argv, "-rdynamic") @@ -1460,8 +1453,9 @@ func (ctxt *Link) hostlink() { argv = append(argv, "-Wl,-bE:"+fileName) } - if strings.Contains(argv[0], "clang") { - argv = append(argv, "-Qunused-arguments") + const unusedArguments = "-Qunused-arguments" + if linkerFlagSupported(ctxt.Arch, argv[0], altLinker, unusedArguments) { + argv = append(argv, unusedArguments) } const compressDWARF = "-Wl,--compress-debug-sections=zlib-gnu" @@ -1528,12 +1522,13 @@ func (ctxt *Link) hostlink() { // even when linking with -static, causing a linker // error when using GNU ld. So take out -rdynamic if // we added it. We do it in this order, rather than - // only adding -rdynamic later, so that -*extldflags + // only adding -rdynamic later, so that -extldflags // can override -rdynamic without using -static. + // Similarly for -Wl,--dynamic-linker. checkStatic := func(arg string) { if ctxt.IsELF && arg == "-static" { for i := range argv { - if argv[i] == "-rdynamic" { + if argv[i] == "-rdynamic" || strings.HasPrefix(argv[i], "-Wl,--dynamic-linker,") { argv[i] = "-static" } } @@ -1757,7 +1752,7 @@ func hostlinkArchArgs(arch *sys.Arch) []string { case sys.I386: return []string{"-m32"} case sys.AMD64: - if objabi.GOOS == "darwin" { + if buildcfg.GOOS == "darwin" { return []string{"-arch", "x86_64", "-m64"} } return []string{"-m64"} @@ -1766,7 +1761,7 @@ func hostlinkArchArgs(arch *sys.Arch) []string { case sys.ARM: return []string{"-marm"} case sys.ARM64: - if objabi.GOOS == "darwin" { + if buildcfg.GOOS == "darwin" { return []string{"-arch", "arm64"} } case sys.MIPS64: @@ -1774,7 +1769,7 @@ func hostlinkArchArgs(arch *sys.Arch) []string { case sys.MIPS: return []string{"-mabi=32"} case sys.PPC64: - if objabi.GOOS == "aix" { + if buildcfg.GOOS == "aix" { return []string{"-maix64"} } else { return []string{"-m64"} @@ -1784,6 +1779,8 @@ func hostlinkArchArgs(arch *sys.Arch) []string { return nil } +var wantHdr = objabi.HeaderString() + // ldobj loads an input object. If it is a host object (an object // compiled by a non-Go compiler) it returns the Hostobj pointer. If // it is a Go object, it returns nil. @@ -1827,7 +1824,11 @@ func ldobj(ctxt *Link, f *bio.Reader, lib *sym.Library, length int64, pn string, return ldhostobj(ldmacho, ctxt.HeadType, f, pkg, length, pn, file) } - if /* x86 */ c1 == 0x4c && c2 == 0x01 || /* x86_64 */ c1 == 0x64 && c2 == 0x86 || /* armv7 */ c1 == 0xc4 && c2 == 0x01 { + switch c1<<8 | c2 { + case 0x4c01, // 386 + 0x6486, // amd64 + 0xc401, // arm + 0x64aa: // arm64 ldpe := func(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { textp, rsrc, err := loadpe.Load(ctxt.loader, ctxt.Arch, ctxt.IncVersion(), f, pkg, length, pn) if err != nil { @@ -1873,29 +1874,13 @@ func ldobj(ctxt *Link, f *bio.Reader, lib *sym.Library, length int64, pn string, return nil } - Errorf(nil, "%s: not an object file", pn) + Errorf(nil, "%s: not an object file: @%d %02x%02x%02x%02x", pn, start, c1, c2, c3, c4) return nil } // First, check that the basic GOOS, GOARCH, and Version match. - t := fmt.Sprintf("%s %s %s ", objabi.GOOS, objabi.GOARCH, objabi.Version) - - line = strings.TrimRight(line, "\n") - if !strings.HasPrefix(line[10:]+" ", t) && !*flagF { - Errorf(nil, "%s: object is [%s] expected [%s]", pn, line[10:], t) - return nil - } - - // Second, check that longer lines match each other exactly, - // so that the Go compiler and write additional information - // that must be the same from run to run. - if len(line) >= len(t)+10 { - if theline == "" { - theline = line[10:] - } else if theline != line[10:] { - Errorf(nil, "%s: object is [%s] expected [%s]", pn, line[10:], theline) - return nil - } + if line != wantHdr { + Errorf(nil, "%s: linked object header mismatch:\nhave %q\nwant %q\n", pn, line, wantHdr) } // Skip over exports and other info -- ends with \n!\n. @@ -2098,25 +2083,6 @@ func ldshlibsyms(ctxt *Link, shlib string) { return } - // collect text symbol ABI versions. - symabi := make(map[string]int) // map (unmangled) symbol name to version - if *flagAbiWrap { - for _, elfsym := range syms { - if elf.ST_TYPE(elfsym.Info) != elf.STT_FUNC { - continue - } - // Demangle the name. Keep in sync with symtab.go:putelfsym. - if strings.HasSuffix(elfsym.Name, ".abiinternal") { - // ABIInternal symbol has mangled name, so the primary symbol is ABI0. - symabi[strings.TrimSuffix(elfsym.Name, ".abiinternal")] = 0 - } - if strings.HasSuffix(elfsym.Name, ".abi0") { - // ABI0 symbol has mangled name, so the primary symbol is ABIInternal. - symabi[strings.TrimSuffix(elfsym.Name, ".abi0")] = sym.SymVerABIInternal - } - } - } - for _, elfsym := range syms { if elf.ST_TYPE(elfsym.Info) == elf.STT_NOTYPE || elf.ST_TYPE(elfsym.Info) == elf.STT_SECTION { continue @@ -2128,15 +2094,14 @@ func ldshlibsyms(ctxt *Link, shlib string) { symname := elfsym.Name // (unmangled) symbol name if elf.ST_TYPE(elfsym.Info) == elf.STT_FUNC && strings.HasPrefix(elfsym.Name, "type.") { ver = sym.SymVerABIInternal - } else if *flagAbiWrap && elf.ST_TYPE(elfsym.Info) == elf.STT_FUNC { + } else if buildcfg.Experiment.RegabiWrappers && elf.ST_TYPE(elfsym.Info) == elf.STT_FUNC { + // Demangle the ABI name. Keep in sync with symtab.go:mangleABIName. if strings.HasSuffix(elfsym.Name, ".abiinternal") { ver = sym.SymVerABIInternal symname = strings.TrimSuffix(elfsym.Name, ".abiinternal") } else if strings.HasSuffix(elfsym.Name, ".abi0") { ver = 0 symname = strings.TrimSuffix(elfsym.Name, ".abi0") - } else if abi, ok := symabi[elfsym.Name]; ok { - ver = abi } } @@ -2170,19 +2135,9 @@ func ldshlibsyms(ctxt *Link, shlib string) { l.SetSymExtname(s, elfsym.Name) } - // For function symbols, we don't know what ABI is - // available, so alias it under both ABIs. - // - // TODO(austin): This is almost certainly wrong once - // the ABIs are actually different. We might have to - // mangle Go function names in the .so to include the - // ABI. - if elf.ST_TYPE(elfsym.Info) == elf.STT_FUNC && ver == 0 { - if *flagAbiWrap { - if _, ok := symabi[symname]; ok { - continue // only use alias for functions w/o ABI wrappers - } - } + // For function symbols, if ABI wrappers are not used, we don't + // know what ABI is available, so alias it under both ABIs. + if !buildcfg.Experiment.RegabiWrappers && elf.ST_TYPE(elfsym.Info) == elf.STT_FUNC && ver == 0 { alias := ctxt.loader.LookupOrCreateSym(symname, sym.SymVerABIInternal) if l.SymType(alias) != 0 { continue @@ -2250,7 +2205,7 @@ func (ctxt *Link) dostkcheck() { // of non-splitting functions. var ch chain ch.limit = objabi.StackLimit - callsize(ctxt) - if objabi.GOARCH == "arm64" { + if buildcfg.GOARCH == "arm64" { // need extra 8 bytes below SP to save FP ch.limit -= 8 } @@ -2400,7 +2355,7 @@ func (sc *stkChk) print(ch *chain, limit int) { ctxt := sc.ctxt var name string if ch.sym != 0 { - name = ldr.SymName(ch.sym) + name = fmt.Sprintf("%s<%d>", ldr.SymName(ch.sym), ldr.SymVersion(ch.sym)) if ldr.IsNoSplit(ch.sym) { name += " (nosplit)" } diff --git a/src/cmd/link/internal/ld/link.go b/src/cmd/link/internal/ld/link.go index f26d051a491..13618beff97 100644 --- a/src/cmd/link/internal/ld/link.go +++ b/src/cmd/link/internal/ld/link.go @@ -84,9 +84,6 @@ type Link struct { loader *loader.Loader cgodata []cgodata // cgo directives to load, three strings are args for loadcgo - cgo_export_static map[string]bool - cgo_export_dynamic map[string]bool - datap []loader.Sym dynexp []loader.Sym diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go index 3630e67c25d..642113cf070 100644 --- a/src/cmd/link/internal/ld/macho.go +++ b/src/cmd/link/internal/ld/macho.go @@ -7,6 +7,7 @@ package ld import ( "bytes" "cmd/internal/codesign" + "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/sys" "cmd/link/internal/loader" @@ -14,6 +15,7 @@ import ( "debug/macho" "encoding/binary" "fmt" + "internal/buildcfg" "io" "os" "sort" @@ -86,6 +88,8 @@ const ( MACHO_SUBCPU_ARMV7 = 9 MACHO_CPU_ARM64 = 1<<24 | 12 MACHO_SUBCPU_ARM64_ALL = 0 + MACHO_SUBCPU_ARM64_V8 = 1 + MACHO_SUBCPU_ARM64E = 2 MACHO32SYMSIZE = 12 MACHO64SYMSIZE = 16 MACHO_X86_64_RELOC_UNSIGNED = 0 @@ -176,6 +180,8 @@ const ( LC_VERSION_MIN_WATCHOS = 0x30 LC_VERSION_NOTE = 0x31 LC_BUILD_VERSION = 0x32 + LC_DYLD_EXPORTS_TRIE = 0x80000033 + LC_DYLD_CHAINED_FIXUPS = 0x80000034 ) const ( @@ -466,27 +472,25 @@ func (ctxt *Link) domacho() { } } if machoPlatform == 0 { - switch ctxt.Arch.Family { - default: - machoPlatform = PLATFORM_MACOS - if ctxt.LinkMode == LinkInternal { - // For lldb, must say LC_VERSION_MIN_MACOSX or else - // it won't know that this Mach-O binary is from OS X - // (could be iOS or WatchOS instead). - // Go on iOS uses linkmode=external, and linkmode=external - // adds this itself. So we only need this code for linkmode=internal - // and we can assume OS X. - // - // See golang.org/issues/12941. - // - // The version must be at least 10.9; see golang.org/issues/30488. - ml := newMachoLoad(ctxt.Arch, LC_VERSION_MIN_MACOSX, 2) - ml.data[0] = 10<<16 | 9<<8 | 0<<0 // OS X version 10.9.0 - ml.data[1] = 10<<16 | 9<<8 | 0<<0 // SDK 10.9.0 - } - case sys.ARM, sys.ARM64: + machoPlatform = PLATFORM_MACOS + if buildcfg.GOOS == "ios" { machoPlatform = PLATFORM_IOS } + if ctxt.LinkMode == LinkInternal && machoPlatform == PLATFORM_MACOS { + var version uint32 + switch ctxt.Arch.Family { + case sys.AMD64: + // The version must be at least 10.9; see golang.org/issues/30488. + version = 10<<16 | 9<<8 | 0<<0 // 10.9.0 + case sys.ARM64: + version = 11<<16 | 0<<8 | 0<<0 // 11.0.0 + } + ml := newMachoLoad(ctxt.Arch, LC_BUILD_VERSION, 4) + ml.data[0] = uint32(machoPlatform) + ml.data[1] = version // OS version + ml.data[2] = version // SDK version + ml.data[3] = 0 // ntools + } } // empirically, string table must begin with " \x00". @@ -535,12 +539,31 @@ func (ctxt *Link) domacho() { sb.AddUint8(0) } - // Do not export C symbols dynamically in plugins, as runtime C symbols like crosscall2 - // are in pclntab and end up pointing at the host binary, breaking unwinding. - // See Issue #18190. + // Un-export runtime symbols from plugins. Since the runtime + // is included in both the main binary and each plugin, these + // symbols appear in both images. If we leave them exported in + // the plugin, then the dynamic linker will resolve + // relocations to these functions in the plugin's functab to + // point to the main image, causing the runtime to think the + // plugin's functab is corrupted. By unexporting them, these + // become static references, which are resolved to the + // plugin's text. + // + // It would be better to omit the runtime from plugins. (Using + // relative PCs in the functab instead of relocations would + // also address this.) + // + // See issue #18190. if ctxt.BuildMode == BuildModePlugin { for _, name := range []string{"_cgo_topofstack", "__cgo_topofstack", "_cgo_panic", "crosscall2"} { - s := ctxt.loader.Lookup(name, 0) + // Most of these are data symbols or C + // symbols, so they have symbol version 0. + ver := 0 + // _cgo_panic is a Go function, so it uses ABIInternal. + if name == "_cgo_panic" { + ver = sym.ABIToVersion(obj.ABIInternal) + } + s := ctxt.loader.Lookup(name, ver) if s != 0 { ctxt.loader.SetAttrCgoExportDynamic(s, false) } @@ -925,12 +948,12 @@ func collectmachosyms(ctxt *Link) { if machoPlatform == PLATFORM_MACOS { switch n := ldr.SymExtname(s); n { case "fdopendir": - switch objabi.GOARCH { + switch buildcfg.GOARCH { case "amd64": ldr.SetSymExtname(s, n+"$INODE64") } case "readdir_r", "getfsstat": - switch objabi.GOARCH { + switch buildcfg.GOARCH { case "amd64": ldr.SetSymExtname(s, n+"$INODE64") } @@ -1022,7 +1045,10 @@ func machosymtab(ctxt *Link) { symstr.AddUint8('_') // replace "·" as ".", because DTrace cannot handle it. - symstr.Addstring(strings.Replace(ldr.SymExtname(s), "·", ".", -1)) + name := strings.Replace(ldr.SymExtname(s), "·", ".", -1) + + name = mangleABIName(ctxt, ldr, s, name) + symstr.Addstring(name) if t := ldr.SymType(s); t == sym.SDYNIMPORT || t == sym.SHOSTOBJ || t == sym.SUNDEFEXT { symtab.AddUint8(0x01) // type N_EXT, external symbol @@ -1215,7 +1241,11 @@ func machoEmitReloc(ctxt *Link) { relocSect(ctxt, Segtext.Sections[0], ctxt.Textp) for _, sect := range Segtext.Sections[1:] { - relocSect(ctxt, sect, ctxt.datap) + if sect.Name == ".text" { + relocSect(ctxt, sect, ctxt.Textp) + } else { + relocSect(ctxt, sect, ctxt.datap) + } } for _, sect := range Segrelrodata.Sections { relocSect(ctxt, sect, ctxt.datap) diff --git a/src/cmd/link/internal/ld/macho_combine_dwarf.go b/src/cmd/link/internal/ld/macho_combine_dwarf.go index 77ee8a4d62b..2ab7da967a2 100644 --- a/src/cmd/link/internal/ld/macho_combine_dwarf.go +++ b/src/cmd/link/internal/ld/macho_combine_dwarf.go @@ -222,11 +222,19 @@ func machoCombineDwarf(ctxt *Link, exef *os.File, exem *macho.File, dsym, outexe err = machoUpdateLoadCommand(reader, linkseg, linkoffset, &macho.SymtabCmd{}, "Symoff", "Stroff") case macho.LoadCmdDysymtab: err = machoUpdateLoadCommand(reader, linkseg, linkoffset, &macho.DysymtabCmd{}, "Tocoffset", "Modtaboff", "Extrefsymoff", "Indirectsymoff", "Extreloff", "Locreloff") - case LC_CODE_SIGNATURE, LC_SEGMENT_SPLIT_INFO, LC_FUNCTION_STARTS, LC_DATA_IN_CODE, LC_DYLIB_CODE_SIGN_DRS: + case LC_CODE_SIGNATURE, LC_SEGMENT_SPLIT_INFO, LC_FUNCTION_STARTS, LC_DATA_IN_CODE, LC_DYLIB_CODE_SIGN_DRS, + LC_DYLD_EXPORTS_TRIE, LC_DYLD_CHAINED_FIXUPS: err = machoUpdateLoadCommand(reader, linkseg, linkoffset, &linkEditDataCmd{}, "DataOff") case LC_ENCRYPTION_INFO, LC_ENCRYPTION_INFO_64: err = machoUpdateLoadCommand(reader, linkseg, linkoffset, &encryptionInfoCmd{}, "CryptOff") - case macho.LoadCmdDylib, macho.LoadCmdThread, macho.LoadCmdUnixThread, LC_PREBOUND_DYLIB, LC_UUID, LC_VERSION_MIN_MACOSX, LC_VERSION_MIN_IPHONEOS, LC_SOURCE_VERSION, LC_MAIN, LC_LOAD_DYLINKER, LC_LOAD_WEAK_DYLIB, LC_REEXPORT_DYLIB, LC_RPATH, LC_ID_DYLIB, LC_SYMSEG, LC_LOADFVMLIB, LC_IDFVMLIB, LC_IDENT, LC_FVMFILE, LC_PREPAGE, LC_ID_DYLINKER, LC_ROUTINES, LC_SUB_FRAMEWORK, LC_SUB_UMBRELLA, LC_SUB_CLIENT, LC_SUB_LIBRARY, LC_TWOLEVEL_HINTS, LC_PREBIND_CKSUM, LC_ROUTINES_64, LC_LAZY_LOAD_DYLIB, LC_LOAD_UPWARD_DYLIB, LC_DYLD_ENVIRONMENT, LC_LINKER_OPTION, LC_LINKER_OPTIMIZATION_HINT, LC_VERSION_MIN_TVOS, LC_VERSION_MIN_WATCHOS, LC_VERSION_NOTE, LC_BUILD_VERSION: + case macho.LoadCmdDylib, macho.LoadCmdThread, macho.LoadCmdUnixThread, + LC_PREBOUND_DYLIB, LC_UUID, LC_VERSION_MIN_MACOSX, LC_VERSION_MIN_IPHONEOS, LC_SOURCE_VERSION, + LC_MAIN, LC_LOAD_DYLINKER, LC_LOAD_WEAK_DYLIB, LC_REEXPORT_DYLIB, LC_RPATH, LC_ID_DYLIB, + LC_SYMSEG, LC_LOADFVMLIB, LC_IDFVMLIB, LC_IDENT, LC_FVMFILE, LC_PREPAGE, LC_ID_DYLINKER, + LC_ROUTINES, LC_SUB_FRAMEWORK, LC_SUB_UMBRELLA, LC_SUB_CLIENT, LC_SUB_LIBRARY, LC_TWOLEVEL_HINTS, + LC_PREBIND_CKSUM, LC_ROUTINES_64, LC_LAZY_LOAD_DYLIB, LC_LOAD_UPWARD_DYLIB, LC_DYLD_ENVIRONMENT, + LC_LINKER_OPTION, LC_LINKER_OPTIMIZATION_HINT, LC_VERSION_MIN_TVOS, LC_VERSION_MIN_WATCHOS, + LC_VERSION_NOTE, LC_BUILD_VERSION: // Nothing to update default: err = fmt.Errorf("unknown load command 0x%x (%s)", int(cmd.Cmd), cmd.Cmd) @@ -394,7 +402,7 @@ func machoUpdateDwarfHeader(r *loadCmdReader, compressedSects []*macho.Section, // We want the DWARF segment to be considered non-loadable, so // force vmaddr and vmsize to zero. In addition, set the initial // protection to zero so as to make the dynamic loader happy, - // since otherwise it may complain that that the vm size and file + // since otherwise it may complain that the vm size and file // size don't match for the segment. See issues 21647 and 32673 // for more context. Also useful to refer to the Apple dynamic // loader source, specifically ImageLoaderMachO::sniffLoadCommands diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go index 68dee185987..adb39d06076 100644 --- a/src/cmd/link/internal/ld/main.go +++ b/src/cmd/link/internal/ld/main.go @@ -37,6 +37,7 @@ import ( "cmd/internal/sys" "cmd/link/internal/benchmark" "flag" + "internal/buildcfg" "log" "os" "runtime" @@ -87,7 +88,7 @@ var ( flag8 bool // use 64-bit addresses in symbol table flagInterpreter = flag.String("I", "", "use `linker` as ELF dynamic linker") FlagDebugTramp = flag.Int("debugtramp", 0, "debug trampolines") - FlagDebugTextSize = flag.Int("debugppc64textsize", 0, "debug PPC64 text section max") + FlagDebugTextSize = flag.Int("debugtextsize", 0, "debug text section max size") FlagStrictDups = flag.Int("strictdups", 0, "sanity check duplicate symbol contents during object file reading (1=warn 2=err).") FlagRound = flag.Int("R", -1, "set address rounding `quantum`") FlagTextAddr = flag.Int64("T", -1, "set text segment `address`") @@ -95,7 +96,6 @@ var ( cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`") memprofile = flag.String("memprofile", "", "write memory profile to `file`") memprofilerate = flag.Int64("memprofilerate", 0, "set runtime.MemProfileRate to `rate`") - flagAbiWrap = flag.Bool("abiwrap", objabi.Regabi_enabled != 0, "support ABI wrapper functions") benchmarkFlag = flag.String("benchmark", "", "set to 'mem' or 'cpu' to enable phase benchmarking") benchmarkFileFlag = flag.String("benchmarkprofile", "", "emit phase profiles to `base`_phase.{cpu,mem}prof") ) @@ -117,10 +117,16 @@ func Main(arch *sys.Arch, theArch Arch) { final := gorootFinal() addstrdata1(ctxt, "runtime.defaultGOROOT="+final) - addstrdata1(ctxt, "cmd/internal/objabi.defaultGOROOT="+final) + addstrdata1(ctxt, "internal/buildcfg.defaultGOROOT="+final) + + buildVersion := buildcfg.Version + if goexperiment := buildcfg.GOEXPERIMENT(); goexperiment != "" { + buildVersion += " X:" + goexperiment + } + addstrdata1(ctxt, "runtime.buildVersion="+buildVersion) // TODO(matloob): define these above and then check flag values here - if ctxt.Arch.Family == sys.AMD64 && objabi.GOOS == "plan9" { + if ctxt.Arch.Family == sys.AMD64 && buildcfg.GOOS == "plan9" { flag.BoolVar(&flag8, "8", false, "use 64-bit addresses in symbol table") } flagHeadType := flag.String("H", "", "set header `type`") @@ -154,7 +160,7 @@ func Main(arch *sys.Arch, theArch Arch) { } } if ctxt.HeadType == objabi.Hunknown { - ctxt.HeadType.Set(objabi.GOOS) + ctxt.HeadType.Set(buildcfg.GOOS) } if !*flagAslr && ctxt.BuildMode != BuildModeCShared { @@ -250,7 +256,7 @@ func Main(arch *sys.Arch, theArch Arch) { bench.Start("dostrdata") ctxt.dostrdata() - if objabi.Fieldtrack_enabled != 0 { + if buildcfg.Experiment.FieldTrack { bench.Start("fieldtrack") fieldtrack(ctxt.Arch, ctxt.loader) } @@ -289,7 +295,6 @@ func Main(arch *sys.Arch, theArch Arch) { bench.Start("textbuildid") ctxt.textbuildid() bench.Start("addexport") - setupdynexp(ctxt) ctxt.setArchSyms() ctxt.addexport() bench.Start("Gentext") diff --git a/src/cmd/link/internal/ld/nooptcgolink_test.go b/src/cmd/link/internal/ld/nooptcgolink_test.go index 4d2ff1acf22..73548dabd4f 100644 --- a/src/cmd/link/internal/ld/nooptcgolink_test.go +++ b/src/cmd/link/internal/ld/nooptcgolink_test.go @@ -6,8 +6,6 @@ package ld import ( "internal/testenv" - "io/ioutil" - "os" "os/exec" "path/filepath" "runtime" @@ -22,11 +20,7 @@ func TestNooptCgoBuild(t *testing.T) { testenv.MustHaveGoBuild(t) testenv.MustHaveCGO(t) - dir, err := ioutil.TempDir("", "go-build") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() cmd := exec.Command(testenv.GoToolPath(t), "build", "-gcflags=-N -l", "-o", filepath.Join(dir, "a.out")) cmd.Dir = filepath.Join(runtime.GOROOT(), "src", "runtime", "testdata", "testprogcgo") out, err := cmd.CombinedOutput() diff --git a/src/cmd/link/internal/ld/outbuf_mmap.go b/src/cmd/link/internal/ld/outbuf_mmap.go index 807fe243752..40a3222788e 100644 --- a/src/cmd/link/internal/ld/outbuf_mmap.go +++ b/src/cmd/link/internal/ld/outbuf_mmap.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd // +build aix darwin dragonfly freebsd linux netbsd openbsd package ld diff --git a/src/cmd/link/internal/ld/outbuf_nofallocate.go b/src/cmd/link/internal/ld/outbuf_nofallocate.go index 6bf96bcb2ba..6564bd54a3d 100644 --- a/src/cmd/link/internal/ld/outbuf_nofallocate.go +++ b/src/cmd/link/internal/ld/outbuf_nofallocate.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !darwin && !linux // +build !darwin,!linux package ld diff --git a/src/cmd/link/internal/ld/outbuf_nommap.go b/src/cmd/link/internal/ld/outbuf_nommap.go index 6b4025384b4..c870fa2c182 100644 --- a/src/cmd/link/internal/ld/outbuf_nommap.go +++ b/src/cmd/link/internal/ld/outbuf_nommap.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !windows // +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!windows package ld diff --git a/src/cmd/link/internal/ld/outbuf_notdarwin.go b/src/cmd/link/internal/ld/outbuf_notdarwin.go index 8c5666f216d..f9caa413e3c 100644 --- a/src/cmd/link/internal/ld/outbuf_notdarwin.go +++ b/src/cmd/link/internal/ld/outbuf_notdarwin.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !darwin // +build !darwin package ld diff --git a/src/cmd/link/internal/ld/outbuf_test.go b/src/cmd/link/internal/ld/outbuf_test.go index e6643da396e..a7b105f887a 100644 --- a/src/cmd/link/internal/ld/outbuf_test.go +++ b/src/cmd/link/internal/ld/outbuf_test.go @@ -5,8 +5,6 @@ package ld import ( - "io/ioutil" - "os" "path/filepath" "runtime" "testing" @@ -19,11 +17,7 @@ func TestMMap(t *testing.T) { t.Skip("unsupported OS") case "aix", "darwin", "ios", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "windows": } - dir, err := ioutil.TempDir("", "TestMMap") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() filename := filepath.Join(dir, "foo.out") ob := NewOutBuf(nil) if err := ob.Open(filename); err != nil { diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go index fb733117be4..05fd3023694 100644 --- a/src/cmd/link/internal/ld/pcln.go +++ b/src/cmd/link/internal/ld/pcln.go @@ -11,6 +11,7 @@ import ( "cmd/link/internal/loader" "cmd/link/internal/sym" "fmt" + "internal/buildcfg" "os" "path/filepath" ) @@ -50,7 +51,7 @@ type pclntab struct { } // addGeneratedSym adds a generator symbol to pclntab, returning the new Sym. -// It is the caller's responsibilty to save they symbol in state. +// It is the caller's responsibility to save they symbol in state. func (state *pclntab) addGeneratedSym(ctxt *Link, name string, size int64, f generatorFunc) loader.Sym { size = Rnd(size, int64(ctxt.Arch.PtrSize)) state.size += size @@ -360,7 +361,7 @@ func (state *pclntab) generateFilenameTabs(ctxt *Link, compUnits []*sym.Compilat // then not loading extra filenames), and just use the hash value of the // symbol name to do this cataloging. // - // TOOD: Store filenames as symbols. (Note this would be easiest if you + // TODO: Store filenames as symbols. (Note this would be easiest if you // also move strings to ALWAYS using the larger content addressable hash // function, and use that hash value for uniqueness testing.) cuEntries := make([]goobj.CUFileIndex, len(compUnits)) @@ -589,6 +590,7 @@ func (state *pclntab) generateFunctab(ctxt *Link, funcs []loader.Sym, inlSyms ma if !useSymValue { // Generate relocations for funcdata when externally linking. state.writeFuncData(ctxt, sb, funcs, inlSyms, startLocations, setAddr, setUintNOP) + sb.SortRelocs() } } @@ -879,7 +881,7 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { } func gorootFinal() string { - root := objabi.GOROOT + root := buildcfg.GOROOT if final := os.Getenv("GOROOT_FINAL"); final != "" { root = final } diff --git a/src/cmd/link/internal/ld/pe.go b/src/cmd/link/internal/ld/pe.go index 36c8e0da9a7..3540c07da10 100644 --- a/src/cmd/link/internal/ld/pe.go +++ b/src/cmd/link/internal/ld/pe.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // PE (Portable Executable) file writing -// https://www.microsoft.com/whdc/system/platform/firmware/PECOFF.mspx +// https://docs.microsoft.com/en-us/windows/win32/debug/pe-format package ld @@ -15,6 +15,7 @@ import ( "debug/pe" "encoding/binary" "fmt" + "internal/buildcfg" "sort" "strconv" "strings" @@ -61,12 +62,37 @@ const ( IMAGE_SCN_CNT_CODE = 0x00000020 IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040 IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080 + IMAGE_SCN_LNK_OTHER = 0x00000100 + IMAGE_SCN_LNK_INFO = 0x00000200 + IMAGE_SCN_LNK_REMOVE = 0x00000800 + IMAGE_SCN_LNK_COMDAT = 0x00001000 + IMAGE_SCN_GPREL = 0x00008000 + IMAGE_SCN_MEM_PURGEABLE = 0x00020000 + IMAGE_SCN_MEM_16BIT = 0x00020000 + IMAGE_SCN_MEM_LOCKED = 0x00040000 + IMAGE_SCN_MEM_PRELOAD = 0x00080000 + IMAGE_SCN_ALIGN_1BYTES = 0x00100000 + IMAGE_SCN_ALIGN_2BYTES = 0x00200000 + IMAGE_SCN_ALIGN_4BYTES = 0x00300000 + IMAGE_SCN_ALIGN_8BYTES = 0x00400000 + IMAGE_SCN_ALIGN_16BYTES = 0x00500000 + IMAGE_SCN_ALIGN_32BYTES = 0x00600000 + IMAGE_SCN_ALIGN_64BYTES = 0x00700000 + IMAGE_SCN_ALIGN_128BYTES = 0x00800000 + IMAGE_SCN_ALIGN_256BYTES = 0x00900000 + IMAGE_SCN_ALIGN_512BYTES = 0x00A00000 + IMAGE_SCN_ALIGN_1024BYTES = 0x00B00000 + IMAGE_SCN_ALIGN_2048BYTES = 0x00C00000 + IMAGE_SCN_ALIGN_4096BYTES = 0x00D00000 + IMAGE_SCN_ALIGN_8192BYTES = 0x00E00000 + IMAGE_SCN_LNK_NRELOC_OVFL = 0x01000000 + IMAGE_SCN_MEM_DISCARDABLE = 0x02000000 + IMAGE_SCN_MEM_NOT_CACHED = 0x04000000 + IMAGE_SCN_MEM_NOT_PAGED = 0x08000000 + IMAGE_SCN_MEM_SHARED = 0x10000000 IMAGE_SCN_MEM_EXECUTE = 0x20000000 IMAGE_SCN_MEM_READ = 0x40000000 IMAGE_SCN_MEM_WRITE = 0x80000000 - IMAGE_SCN_MEM_DISCARDABLE = 0x2000000 - IMAGE_SCN_LNK_NRELOC_OVFL = 0x1000000 - IMAGE_SCN_ALIGN_32BYTES = 0x600000 ) // See https://docs.microsoft.com/en-us/windows/win32/debug/pe-format. @@ -418,14 +444,16 @@ func (f *peFile) addSection(name string, sectsize int, filesize int) *peSection name: name, shortName: name, index: len(f.sections) + 1, - virtualSize: uint32(sectsize), virtualAddress: f.nextSectOffset, pointerToRawData: f.nextFileOffset, } f.nextSectOffset = uint32(Rnd(int64(f.nextSectOffset)+int64(sectsize), PESECTALIGN)) if filesize > 0 { + sect.virtualSize = uint32(sectsize) sect.sizeOfRawData = uint32(Rnd(int64(filesize), PEFILEALIGN)) f.nextFileOffset += sect.sizeOfRawData + } else { + sect.sizeOfRawData = uint32(sectsize) } f.sections = append(f.sections, sect) return sect @@ -447,7 +475,7 @@ func (f *peFile) addDWARFSection(name string, size int) *peSection { off := f.stringTable.add(name) h := f.addSection(name, size, size) h.shortName = fmt.Sprintf("/%d", off) - h.characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_DISCARDABLE + h.characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_DISCARDABLE | IMAGE_SCN_CNT_INITIALIZED_DATA return h } @@ -476,27 +504,26 @@ func (f *peFile) addInitArray(ctxt *Link) *peSection { // However, the entire Go runtime is initialized from just one function, so it is unlikely // that this will need to grow in the future. var size int - switch objabi.GOARCH { + var alignment uint32 + switch buildcfg.GOARCH { default: - Exitf("peFile.addInitArray: unsupported GOARCH=%q\n", objabi.GOARCH) - case "386": + Exitf("peFile.addInitArray: unsupported GOARCH=%q\n", buildcfg.GOARCH) + case "386", "arm": size = 4 - case "amd64": - size = 8 - case "arm": - size = 4 - case "arm64": + alignment = IMAGE_SCN_ALIGN_4BYTES + case "amd64", "arm64": size = 8 + alignment = IMAGE_SCN_ALIGN_8BYTES } sect := f.addSection(".ctors", size, size) - sect.characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ + sect.characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE | alignment sect.sizeOfRawData = uint32(size) ctxt.Out.SeekSet(int64(sect.pointerToRawData)) sect.checkOffset(ctxt.Out.Offset()) init_entry := ctxt.loader.Lookup(*flagEntrySymbol, 0) addr := uint64(ctxt.loader.SymValue(init_entry)) - ctxt.loader.SymSect(init_entry).Vaddr - switch objabi.GOARCH { + switch buildcfg.GOARCH { case "386", "arm": ctxt.Out.Write32(uint32(addr)) case "amd64", "arm64": @@ -520,7 +547,6 @@ func (f *peFile) emitRelocations(ctxt *Link) { if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen { return 0 } - nrelocs := 0 sect.Reloff = uint64(ctxt.Out.Offset()) for i, s := range syms { if !ldr.AttrReachable(s) { @@ -531,12 +557,12 @@ func (f *peFile) emitRelocations(ctxt *Link) { break } } - eaddr := int32(sect.Vaddr + sect.Length) + eaddr := int64(sect.Vaddr + sect.Length) for _, s := range syms { if !ldr.AttrReachable(s) { continue } - if ldr.SymValue(s) >= int64(eaddr) { + if ldr.SymValue(s) >= eaddr { break } // Compute external relocations on the go, and pass to PEreloc1 @@ -556,13 +582,13 @@ func (f *peFile) emitRelocations(ctxt *Link) { ctxt.Errorf(s, "reloc %d to non-coff symbol %s (outer=%s) %d", r.Type(), ldr.SymName(r.Sym()), ldr.SymName(rr.Xsym), ldr.SymType(r.Sym())) } if !thearch.PEreloc1(ctxt.Arch, ctxt.Out, ldr, s, rr, int64(uint64(ldr.SymValue(s)+int64(r.Off()))-base)) { - ctxt.Errorf(s, "unsupported obj reloc %d/%d to %s", r.Type(), r.Siz(), ldr.SymName(r.Sym())) + ctxt.Errorf(s, "unsupported obj reloc %v/%d to %s", r.Type(), r.Siz(), ldr.SymName(r.Sym())) } - nrelocs++ } } sect.Rellen = uint64(ctxt.Out.Offset()) - sect.Reloff - return nrelocs + const relocLen = 4 + 4 + 2 + return int(sect.Rellen / relocLen) } sects := []struct { @@ -603,13 +629,17 @@ dwarfLoop: Errorf(nil, "emitRelocations: could not find %q section", sect.Name) } + if f.ctorsSect == nil { + return + } + f.ctorsSect.emitRelocations(ctxt.Out, func() int { dottext := ldr.Lookup(".text", 0) ctxt.Out.Write32(0) ctxt.Out.Write32(uint32(ldr.SymDynid(dottext))) - switch objabi.GOARCH { + switch buildcfg.GOARCH { default: - ctxt.Errorf(dottext, "unknown architecture for PE: %q\n", objabi.GOARCH) + ctxt.Errorf(dottext, "unknown architecture for PE: %q\n", buildcfg.GOARCH) case "386": ctxt.Out.Write16(IMAGE_REL_I386_DIR32) case "amd64": @@ -674,6 +704,12 @@ func (f *peFile) mapToPESection(ldr *loader.Loader, s loader.Sym, linkmode LinkM return f.bssSect.index, int64(v - Segdata.Filelen), nil } +var isLabel = make(map[loader.Sym]bool) + +func AddPELabelSym(ldr *loader.Loader, s loader.Sym) { + isLabel[s] = true +} + // writeSymbols writes all COFF symbol table records. func (f *peFile) writeSymbols(ctxt *Link) { ldr := ctxt.loader @@ -691,6 +727,8 @@ func (f *peFile) writeSymbols(ctxt *Link) { name = "_" + name } + name = mangleABIName(ctxt, ldr, s, name) + var peSymType uint16 if ctxt.IsExternal() { peSymType = IMAGE_SYM_TYPE_NULL @@ -768,6 +806,10 @@ func (f *peFile) writeSymbols(ctxt *Link) { switch t { case sym.SDYNIMPORT, sym.SHOSTOBJ, sym.SUNDEFEXT: addsym(s) + default: + if len(isLabel) > 0 && isLabel[s] { + addsym(s) + } } } } @@ -942,9 +984,7 @@ func (f *peFile) writeOptionalHeader(ctxt *Link) { // calls that may need more stack than we think. // // The default stack reserve size directly affects only the main - // thread, ctrlhandler thread, and profileloop thread. For - // these, it must be greater than the stack size assumed by - // externalthreadhandler. + // thread. // // For other threads, the runtime explicitly asks the kernel // to use the default stack size so that all stacks are @@ -1589,7 +1629,7 @@ func addpersrc(ctxt *Link) { func asmbPe(ctxt *Link) { t := pefile.addSection(".text", int(Segtext.Length), int(Segtext.Length)) - t.characteristics = IMAGE_SCN_CNT_CODE | IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_EXECUTE | IMAGE_SCN_MEM_READ + t.characteristics = IMAGE_SCN_CNT_CODE | IMAGE_SCN_MEM_EXECUTE | IMAGE_SCN_MEM_READ if ctxt.LinkMode == LinkExternal { // some data symbols (e.g. masks) end up in the .text section, and they normally // expect larger alignment requirement than the default text section alignment. diff --git a/src/cmd/link/internal/ld/sym.go b/src/cmd/link/internal/ld/sym.go index 75489720cc7..72639962e2f 100644 --- a/src/cmd/link/internal/ld/sym.go +++ b/src/cmd/link/internal/ld/sym.go @@ -36,6 +36,7 @@ import ( "cmd/internal/sys" "cmd/link/internal/loader" "cmd/link/internal/sym" + "internal/buildcfg" "log" "runtime" ) @@ -53,8 +54,8 @@ func linknew(arch *sys.Arch) *Link { generatorSyms: make(map[loader.Sym]generatorFunc), } - if objabi.GOARCH != arch.Name { - log.Fatalf("invalid objabi.GOARCH %s (want %s)", objabi.GOARCH, arch.Name) + if buildcfg.GOARCH != arch.Name { + log.Fatalf("invalid buildcfg.GOARCH %s (want %s)", buildcfg.GOARCH, arch.Name) } AtExit(func() { diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go index 85a8ff42ad0..00f557875a9 100644 --- a/src/cmd/link/internal/ld/symtab.go +++ b/src/cmd/link/internal/ld/symtab.go @@ -31,11 +31,13 @@ package ld import ( + "cmd/internal/obj" "cmd/internal/objabi" "cmd/link/internal/loader" "cmd/link/internal/sym" "debug/elf" "fmt" + "internal/buildcfg" "path/filepath" "strings" ) @@ -103,44 +105,14 @@ func putelfsym(ctxt *Link, x loader.Sym, typ elf.SymType, curbind elf.SymBind) { } sname := ldr.SymExtname(x) - - // For functions with ABI wrappers, we have to make sure that we - // don't wind up with two elf symbol table entries with the same - // name (since this will generated an error from the external - // linker). In the CgoExportStatic case, we want the ABI0 symbol - // to have the primary symbol table entry (since it's going to be - // called from C), so we rename the ABIInternal symbol. In all - // other cases, we rename the ABI0 symbol, since we want - // cross-load-module calls to target ABIInternal. - // - // TODO: generalize this for non-ELF (put the rename code in the - // loader, and store the rename result in SymExtname). - // - // TODO: avoid the ldr.Lookup calls below by instead using an aux - // sym or marker relocation to associate the wrapper with the - // wrapped function. - // - if *flagAbiWrap { - if !ldr.IsExternal(x) && ldr.SymType(x) == sym.STEXT { - // First case - if ldr.SymVersion(x) == sym.SymVerABIInternal { - if s2 := ldr.Lookup(sname, sym.SymVerABI0); s2 != 0 && ldr.AttrCgoExportStatic(s2) && ldr.SymType(s2) == sym.STEXT { - sname = sname + ".abiinternal" - } - } - // Second case - if ldr.SymVersion(x) == sym.SymVerABI0 && !ldr.AttrCgoExportStatic(x) { - if s2 := ldr.Lookup(sname, sym.SymVerABIInternal); s2 != 0 && ldr.SymType(s2) == sym.STEXT { - sname = sname + ".abi0" - } - } - } - } + sname = mangleABIName(ctxt, ldr, x, sname) // One pass for each binding: elf.STB_LOCAL, elf.STB_GLOBAL, // maybe one day elf.STB_WEAK. bind := elf.STB_GLOBAL - if ldr.IsFileLocal(x) || ldr.AttrVisibilityHidden(x) || ldr.AttrLocal(x) { + if ldr.IsFileLocal(x) && !isStaticTmp(sname) || ldr.AttrVisibilityHidden(x) || ldr.AttrLocal(x) { + // Static tmp is package local, but a package can be shared among multiple DSOs. + // They need to have a single view of the static tmp that are writable. bind = elf.STB_LOCAL } @@ -613,7 +585,9 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind { strings.HasPrefix(name, "gclocals."), strings.HasPrefix(name, "gclocals·"), ldr.SymType(s) == sym.SGOFUNC && s != symgofunc, - strings.HasSuffix(name, ".opendefer"): + strings.HasSuffix(name, ".opendefer"), + strings.HasSuffix(name, ".arginfo0"), + strings.HasSuffix(name, ".arginfo1"): symGroupType[s] = sym.SGOFUNC ldr.SetAttrNotInSymbolTable(s, true) ldr.SetCarrierSym(s, symgofunc) @@ -856,3 +830,43 @@ func setCarrierSize(typ sym.SymKind, sz int64) { } CarrierSymByType[typ].Size = sz } + +func isStaticTmp(name string) bool { + return strings.Contains(name, "."+obj.StaticNamePref) +} + +// Mangle function name with ABI information. +func mangleABIName(ctxt *Link, ldr *loader.Loader, x loader.Sym, name string) string { + // For functions with ABI wrappers, we have to make sure that we + // don't wind up with two symbol table entries with the same + // name (since this will generated an error from the external + // linker). If we have wrappers, keep the ABIInternal name + // unmangled since we want cross-load-module calls to target + // ABIInternal, and rename other symbols. + // + // TODO: avoid the ldr.Lookup calls below by instead using an aux + // sym or marker relocation to associate the wrapper with the + // wrapped function. + if !buildcfg.Experiment.RegabiWrappers { + return name + } + + if !ldr.IsExternal(x) && ldr.SymType(x) == sym.STEXT && ldr.SymVersion(x) != sym.SymVerABIInternal { + if s2 := ldr.Lookup(name, sym.SymVerABIInternal); s2 != 0 && ldr.SymType(s2) == sym.STEXT { + name = fmt.Sprintf("%s.abi%d", name, ldr.SymVersion(x)) + } + } + + // When loading a shared library, if a symbol has only one ABI, + // and the name is not mangled, we don't know what ABI it is. + // So we always mangle ABIInternal function name in shared linkage, + // except symbols that are exported to C. Type symbols are always + // ABIInternal so they are not mangled. + if ctxt.IsShared() { + if ldr.SymType(x) == sym.STEXT && ldr.SymVersion(x) == sym.SymVerABIInternal && !ldr.AttrCgoExport(x) && !strings.HasPrefix(name, "type.") { + name = fmt.Sprintf("%s.abiinternal", name) + } + } + + return name +} diff --git a/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod3.go b/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod3.go index 9a8dfbce5fa..37c89374cbe 100644 --- a/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod3.go +++ b/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod3.go @@ -14,7 +14,7 @@ type S int func (s S) M() { println("S.M") } -type I interface { M() } +type I interface{ M() } type T float64 diff --git a/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod4.go b/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod4.go index 52ee2e3d860..4af47ad1fa5 100644 --- a/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod4.go +++ b/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod4.go @@ -10,6 +10,7 @@ package main type T int +//go:noinline func (T) M() {} type I interface{ M() } @@ -20,4 +21,5 @@ var pp *I func main() { p = new(T) // use type T pp = new(I) // use type I + *pp = *p // convert T to I, build itab } diff --git a/src/cmd/link/internal/ld/testdata/issue42484/main.go b/src/cmd/link/internal/ld/testdata/issue42484/main.go new file mode 100644 index 00000000000..60fc110ffaa --- /dev/null +++ b/src/cmd/link/internal/ld/testdata/issue42484/main.go @@ -0,0 +1,16 @@ +package main + +import ( + "fmt" +) + +func main() { + a := 0 + a++ + b := 0 + f1(a, b) +} + +func f1(a, b int) { + fmt.Printf("%d %d\n", a, b) +} diff --git a/src/cmd/link/internal/ld/util.go b/src/cmd/link/internal/ld/util.go index 9228ed163d0..779f4988b68 100644 --- a/src/cmd/link/internal/ld/util.go +++ b/src/cmd/link/internal/ld/util.go @@ -57,7 +57,7 @@ func afterErrorAction() { // Logging an error means that on exit cmd/link will delete any // output file and return a non-zero error code. // -// TODO: remove. Use ctxt.Errof instead. +// TODO: remove. Use ctxt.Errorf instead. // All remaining calls use nil as first arg. func Errorf(dummy *int, format string, args ...interface{}) { format += "\n" diff --git a/src/cmd/link/internal/ld/xcoff.go b/src/cmd/link/internal/ld/xcoff.go index ba818eaa961..12bd23f7e57 100644 --- a/src/cmd/link/internal/ld/xcoff.go +++ b/src/cmd/link/internal/ld/xcoff.go @@ -28,8 +28,11 @@ const ( // Total amount of space to reserve at the start of the file // for File Header, Auxiliary Header, and Section Headers. // May waste some. - XCOFFHDRRESERVE = FILHSZ_64 + AOUTHSZ_EXEC64 + SCNHSZ_64*23 - XCOFFSECTALIGN int64 = 32 // base on dump -o + XCOFFHDRRESERVE = FILHSZ_64 + AOUTHSZ_EXEC64 + SCNHSZ_64*23 + + // base on dump -o, then rounded from 32B to 64B to + // match worst case elf text section alignment on ppc64. + XCOFFSECTALIGN int64 = 64 // XCOFF binaries should normally have all its sections position-independent. // However, this is not yet possible for .text because of some R_ADDR relocations @@ -555,11 +558,12 @@ func Xcoffinit(ctxt *Link) { // type records C_FILE information needed for genasmsym in XCOFF. type xcoffSymSrcFile struct { - name string - file *XcoffSymEnt64 // Symbol of this C_FILE - csectAux *XcoffAuxCSect64 // Symbol for the current .csect - csectSymNb uint64 // Symbol number for the current .csect - csectSize int64 + name string + file *XcoffSymEnt64 // Symbol of this C_FILE + csectAux *XcoffAuxCSect64 // Symbol for the current .csect + csectSymNb uint64 // Symbol number for the current .csect + csectVAStart int64 + csectVAEnd int64 } var ( @@ -746,7 +750,8 @@ func (f *xcoffFile) writeSymbolNewFile(ctxt *Link, name string, firstEntry uint6 f.addSymbol(aux) currSymSrcFile.csectAux = aux - currSymSrcFile.csectSize = 0 + currSymSrcFile.csectVAStart = int64(firstEntry) + currSymSrcFile.csectVAEnd = int64(firstEntry) } // Update values for the previous package. @@ -768,8 +773,9 @@ func (f *xcoffFile) updatePreviousFile(ctxt *Link, last bool) { // update csect scnlen in this auxiliary entry aux := currSymSrcFile.csectAux - aux.Xscnlenlo = uint32(currSymSrcFile.csectSize & 0xFFFFFFFF) - aux.Xscnlenhi = uint32(currSymSrcFile.csectSize >> 32) + csectSize := currSymSrcFile.csectVAEnd - currSymSrcFile.csectVAStart + aux.Xscnlenlo = uint32(csectSize & 0xFFFFFFFF) + aux.Xscnlenhi = uint32(csectSize >> 32) } // Write symbol representing a .text function. @@ -825,15 +831,20 @@ func (f *xcoffFile) writeSymbolFunc(ctxt *Link, x loader.Sym) []xcoffSym { Nnumaux: 2, } - if ldr.SymVersion(x) != 0 || ldr.AttrVisibilityHidden(x) || ldr.AttrLocal(x) { + if ldr.IsFileLocal(x) || ldr.AttrVisibilityHidden(x) || ldr.AttrLocal(x) { s.Nsclass = C_HIDEXT } ldr.SetSymDynid(x, int32(xfile.symbolCount)) syms = append(syms, s) - // Update current csect size - currSymSrcFile.csectSize += ldr.SymSize(x) + // Keep track of the section size by tracking the VA range. Individual + // alignment differences may introduce a few extra bytes of padding + // which are not fully accounted for by ldr.SymSize(x). + sv := ldr.SymValue(x) + ldr.SymSize(x) + if currSymSrcFile.csectVAEnd < sv { + currSymSrcFile.csectVAEnd = sv + } // create auxiliary entries a2 := &XcoffAuxFcn64{ @@ -914,7 +925,7 @@ func putaixsym(ctxt *Link, x loader.Sym, t SymbolType) { Nnumaux: 1, } - if ldr.SymVersion(x) != 0 || ldr.AttrVisibilityHidden(x) || ldr.AttrLocal(x) { + if ldr.IsFileLocal(x) || ldr.AttrVisibilityHidden(x) || ldr.AttrLocal(x) { // There is more symbols in the case of a global data // which are related to the assembly generated // to access such symbols. @@ -1318,19 +1329,14 @@ func (ctxt *Link) doxcoff() { if !ldr.AttrCgoExport(s) { continue } - if ldr.SymVersion(s) != 0 { // sanity check - panic("cgo_export on non-version 0 symbol") + if ldr.IsFileLocal(s) { + panic("cgo_export on static symbol") } if ldr.SymType(s) == sym.STEXT || ldr.SymType(s) == sym.SABIALIAS { // On AIX, a exported function must have two symbols: // - a .text symbol which must start with a ".". // - a .data symbol which is a function descriptor. - // - // CgoExport attribute should only be set on a version 0 - // symbol, which can be TEXT or ABIALIAS. - // (before, setupdynexp copies the attribute from the - // alias to the aliased. Now we are before setupdynexp.) name := ldr.SymExtname(s) ldr.SetSymExtname(s, "."+name) @@ -1554,7 +1560,7 @@ func (f *xcoffFile) writeFileHeader(ctxt *Link) { f.xahdr.Otoc = uint64(ldr.SymValue(toc)) f.xahdr.Osntoc = f.getXCOFFscnum(ldr.SymSect(toc)) - f.xahdr.Oalgntext = int16(logBase2(int(Funcalign))) + f.xahdr.Oalgntext = int16(logBase2(int(XCOFFSECTALIGN))) f.xahdr.Oalgndata = 0x5 binary.Write(ctxt.Out, binary.BigEndian, &f.xfhdr) @@ -1787,8 +1793,8 @@ func xcoffCreateExportFile(ctxt *Link) (fname string) { if !strings.HasPrefix(extname, "._cgoexp_") { continue } - if ldr.SymVersion(s) != 0 { - continue // Only export version 0 symbols. See the comment in doxcoff. + if ldr.IsFileLocal(s) { + continue // Only export non-static symbols } // Retrieve the name of the initial symbol diff --git a/src/cmd/link/internal/loadelf/ldelf.go b/src/cmd/link/internal/loadelf/ldelf.go index c698874b325..c6956297f6c 100644 --- a/src/cmd/link/internal/loadelf/ldelf.go +++ b/src/cmd/link/internal/loadelf/ldelf.go @@ -245,13 +245,13 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader, newSym := func(name string, version int) loader.Sym { return l.CreateStaticSym(name) } - lookup := func(name string, version int) loader.Sym { - return l.LookupOrCreateSym(name, version) - } + lookup := l.LookupOrCreateCgoExport errorf := func(str string, args ...interface{}) ([]loader.Sym, uint32, error) { return nil, 0, fmt.Errorf("loadelf: %s: %v", pn, fmt.Sprintf(str, args...)) } + ehdrFlags = initEhdrFlags + base := f.Offset() var hdrbuf [64]byte @@ -753,7 +753,7 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader, } rType := objabi.ElfRelocOffset + objabi.RelocType(relocType) - rSize, err := relSize(arch, pn, uint32(relocType)) + rSize, addendSize, err := relSize(arch, pn, uint32(relocType)) if err != nil { return nil, 0, err } @@ -770,10 +770,10 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader, } } - if rSize == 2 { + if addendSize == 2 { rAdd = int64(int16(rAdd)) } - if rSize == 4 { + if addendSize == 4 { rAdd = int64(int32(rAdd)) } @@ -945,7 +945,10 @@ func readelfsym(newSym, lookup func(string, int) loader.Sym, l *loader.Loader, a return nil } -func relSize(arch *sys.Arch, pn string, elftype uint32) (uint8, error) { +// Return the size of the relocated field, and the size of the addend as the first +// and second values. Note, the addend may be larger than the relocation field in +// some cases when a relocated value is split across multiple relocations. +func relSize(arch *sys.Arch, pn string, elftype uint32) (uint8, uint8, error) { // TODO(mdempsky): Replace this with a struct-valued switch statement // once golang.org/issue/15164 is fixed or found to not impair cmd/link // performance. @@ -964,7 +967,7 @@ func relSize(arch *sys.Arch, pn string, elftype uint32) (uint8, error) { switch uint32(arch.Family) | elftype<<16 { default: - return 0, fmt.Errorf("%s: unknown relocation type %d; compiled without -fpic?", pn, elftype) + return 0, 0, fmt.Errorf("%s: unknown relocation type %d; compiled without -fpic?", pn, elftype) case MIPS | uint32(elf.R_MIPS_HI16)<<16, MIPS | uint32(elf.R_MIPS_LO16)<<16, @@ -983,27 +986,23 @@ func relSize(arch *sys.Arch, pn string, elftype uint32) (uint8, error) { MIPS64 | uint32(elf.R_MIPS_GPREL16)<<16, MIPS64 | uint32(elf.R_MIPS_GOT_PAGE)<<16, MIPS64 | uint32(elf.R_MIPS_JALR)<<16, - MIPS64 | uint32(elf.R_MIPS_GOT_OFST)<<16: - return 4, nil + MIPS64 | uint32(elf.R_MIPS_GOT_OFST)<<16, + MIPS64 | uint32(elf.R_MIPS_CALL16)<<16, + MIPS64 | uint32(elf.R_MIPS_GPREL32)<<16, + MIPS64 | uint32(elf.R_MIPS_64)<<16, + MIPS64 | uint32(elf.R_MIPS_GOT_DISP)<<16: + return 4, 4, nil case S390X | uint32(elf.R_390_8)<<16: - return 1, nil + return 1, 1, nil case PPC64 | uint32(elf.R_PPC64_TOC16)<<16, - PPC64 | uint32(elf.R_PPC64_TOC16_LO)<<16, - PPC64 | uint32(elf.R_PPC64_TOC16_HI)<<16, - PPC64 | uint32(elf.R_PPC64_TOC16_HA)<<16, - PPC64 | uint32(elf.R_PPC64_TOC16_DS)<<16, - PPC64 | uint32(elf.R_PPC64_TOC16_LO_DS)<<16, - PPC64 | uint32(elf.R_PPC64_REL16_LO)<<16, - PPC64 | uint32(elf.R_PPC64_REL16_HI)<<16, - PPC64 | uint32(elf.R_PPC64_REL16_HA)<<16, S390X | uint32(elf.R_390_16)<<16, S390X | uint32(elf.R_390_GOT16)<<16, S390X | uint32(elf.R_390_PC16)<<16, S390X | uint32(elf.R_390_PC16DBL)<<16, S390X | uint32(elf.R_390_PLT16DBL)<<16: - return 2, nil + return 2, 2, nil case ARM | uint32(elf.R_ARM_ABS32)<<16, ARM | uint32(elf.R_ARM_GOT32)<<16, @@ -1051,7 +1050,7 @@ func relSize(arch *sys.Arch, pn string, elftype uint32) (uint8, error) { S390X | uint32(elf.R_390_PLT32DBL)<<16, S390X | uint32(elf.R_390_GOTPCDBL)<<16, S390X | uint32(elf.R_390_GOTENT)<<16: - return 4, nil + return 4, 4, nil case AMD64 | uint32(elf.R_X86_64_64)<<16, AMD64 | uint32(elf.R_X86_64_PC64)<<16, @@ -1066,11 +1065,11 @@ func relSize(arch *sys.Arch, pn string, elftype uint32) (uint8, error) { S390X | uint32(elf.R_390_PC64)<<16, S390X | uint32(elf.R_390_GOT64)<<16, S390X | uint32(elf.R_390_PLT64)<<16: - return 8, nil + return 8, 8, nil case RISCV64 | uint32(elf.R_RISCV_RVC_BRANCH)<<16, RISCV64 | uint32(elf.R_RISCV_RVC_JUMP)<<16: - return 2, nil + return 2, 2, nil case RISCV64 | uint32(elf.R_RISCV_32)<<16, RISCV64 | uint32(elf.R_RISCV_BRANCH)<<16, @@ -1082,12 +1081,22 @@ func relSize(arch *sys.Arch, pn string, elftype uint32) (uint8, error) { RISCV64 | uint32(elf.R_RISCV_PCREL_LO12_I)<<16, RISCV64 | uint32(elf.R_RISCV_PCREL_LO12_S)<<16, RISCV64 | uint32(elf.R_RISCV_RELAX)<<16: - return 4, nil + return 4, 4, nil case RISCV64 | uint32(elf.R_RISCV_64)<<16, RISCV64 | uint32(elf.R_RISCV_CALL)<<16, RISCV64 | uint32(elf.R_RISCV_CALL_PLT)<<16: - return 8, nil + return 8, 8, nil + + case PPC64 | uint32(elf.R_PPC64_TOC16_LO)<<16, + PPC64 | uint32(elf.R_PPC64_TOC16_HI)<<16, + PPC64 | uint32(elf.R_PPC64_TOC16_HA)<<16, + PPC64 | uint32(elf.R_PPC64_TOC16_DS)<<16, + PPC64 | uint32(elf.R_PPC64_TOC16_LO_DS)<<16, + PPC64 | uint32(elf.R_PPC64_REL16_LO)<<16, + PPC64 | uint32(elf.R_PPC64_REL16_HI)<<16, + PPC64 | uint32(elf.R_PPC64_REL16_HA)<<16: + return 2, 4, nil } } diff --git a/src/cmd/link/internal/loader/loader.go b/src/cmd/link/internal/loader/loader.go index 68dc3de2738..1b71a66c6f5 100644 --- a/src/cmd/link/internal/loader/loader.go +++ b/src/cmd/link/internal/loader/loader.go @@ -51,30 +51,14 @@ type Reloc struct { *goobj.Reloc r *oReader l *Loader - - // External reloc types may not fit into a uint8 which the Go object file uses. - // Store it here, instead of in the byte of goobj.Reloc. - // For Go symbols this will always be zero. - // goobj.Reloc.Type() + typ is always the right type, for both Go and external - // symbols. - typ objabi.RelocType } -func (rel Reloc) Type() objabi.RelocType { return objabi.RelocType(rel.Reloc.Type()) + rel.typ } -func (rel Reloc) Sym() Sym { return rel.l.resolve(rel.r, rel.Reloc.Sym()) } -func (rel Reloc) SetSym(s Sym) { rel.Reloc.SetSym(goobj.SymRef{PkgIdx: 0, SymIdx: uint32(s)}) } -func (rel Reloc) IsMarker() bool { return rel.Siz() == 0 } - -func (rel Reloc) SetType(t objabi.RelocType) { - if t != objabi.RelocType(uint8(t)) { - panic("SetType: type doesn't fit into Reloc") - } - rel.Reloc.SetType(uint8(t)) - if rel.typ != 0 { - // should use SymbolBuilder.SetRelocType - panic("wrong method to set reloc type") - } -} +func (rel Reloc) Type() objabi.RelocType { return objabi.RelocType(rel.Reloc.Type()) &^ objabi.R_WEAK } +func (rel Reloc) Weak() bool { return objabi.RelocType(rel.Reloc.Type())&objabi.R_WEAK != 0 } +func (rel Reloc) SetType(t objabi.RelocType) { rel.Reloc.SetType(uint16(t)) } +func (rel Reloc) Sym() Sym { return rel.l.resolve(rel.r, rel.Reloc.Sym()) } +func (rel Reloc) SetSym(s Sym) { rel.Reloc.SetSym(goobj.SymRef{PkgIdx: 0, SymIdx: uint32(s)}) } +func (rel Reloc) IsMarker() bool { return rel.Siz() == 0 } // Aux holds a "handle" to access an aux symbol record from an // object file. @@ -273,6 +257,9 @@ type Loader struct { // the symbol that triggered the marking of symbol K as live. Reachparent []Sym + // CgoExports records cgo-exported symbols by SymName. + CgoExports map[string]Sym + flags uint32 hasUnknownPkgPath bool // if any Go object has unknown package path @@ -307,15 +294,14 @@ type elfsetstringFunc func(str string, off int) // extSymPayload holds the payload (data + relocations) for linker-synthesized // external symbols (note that symbol value is stored in a separate slice). type extSymPayload struct { - name string // TODO: would this be better as offset into str table? - size int64 - ver int - kind sym.SymKind - objidx uint32 // index of original object if sym made by cloneToExternal - relocs []goobj.Reloc - reltypes []objabi.RelocType // relocation types - data []byte - auxs []goobj.Aux + name string // TODO: would this be better as offset into str table? + size int64 + ver int + kind sym.SymKind + objidx uint32 // index of original object if sym made by cloneToExternal + relocs []goobj.Reloc + data []byte + auxs []goobj.Aux } const ( @@ -485,14 +471,14 @@ func (st *loadState) addSym(name string, ver int, r *oReader, li uint32, kind in // new symbol overwrites old symbol. oldtyp := sym.AbiSymKindToSymKind[objabi.SymKind(oldsym.Type())] if !(oldtyp.IsData() && oldr.DataSize(oldli) == 0) { - log.Fatalf("duplicated definition of symbol " + name) + log.Fatalf("duplicated definition of symbol %s, from %s and %s", name, r.unit.Lib.Pkg, oldr.unit.Lib.Pkg) } l.objSyms[oldi] = objSym{r.objidx, li} } else { // old symbol overwrites new symbol. typ := sym.AbiSymKindToSymKind[objabi.SymKind(oldsym.Type())] if !typ.IsData() { // only allow overwriting data symbol - log.Fatalf("duplicated definition of symbol " + name) + log.Fatalf("duplicated definition of symbol %s, from %s and %s", name, r.unit.Lib.Pkg, oldr.unit.Lib.Pkg) } } return oldi @@ -531,6 +517,36 @@ func (l *Loader) LookupOrCreateSym(name string, ver int) Sym { return i } +// AddCgoExport records a cgo-exported symbol in l.CgoExports. +// This table is used to identify the correct Go symbol ABI to use +// to resolve references from host objects (which don't have ABIs). +func (l *Loader) AddCgoExport(s Sym) { + if l.CgoExports == nil { + l.CgoExports = make(map[string]Sym) + } + l.CgoExports[l.SymName(s)] = s +} + +// LookupOrCreateCgoExport is like LookupOrCreateSym, but if ver +// indicates a global symbol, it uses the CgoExport table to determine +// the appropriate symbol version (ABI) to use. ver must be either 0 +// or a static symbol version. +func (l *Loader) LookupOrCreateCgoExport(name string, ver int) Sym { + if ver >= sym.SymVerStatic { + return l.LookupOrCreateSym(name, ver) + } + if ver != 0 { + panic("ver must be 0 or a static version") + } + // Look for a cgo-exported symbol from Go. + if s, ok := l.CgoExports[name]; ok { + return s + } + // Otherwise, this must just be a symbol in the host object. + // Create a version 0 symbol for it. + return l.LookupOrCreateSym(name, 0) +} + func (l *Loader) IsExternal(i Sym) bool { r, _ := l.toLocal(i) return l.isExtReader(r) @@ -755,6 +771,9 @@ func (l *Loader) SymName(i Sym) string { return pp.name } r, li := l.toLocal(i) + if r == nil { + return "?" + } name := r.Sym(li).Name(r.Reader) if !r.NeedNameExpansion() { return name @@ -1427,7 +1446,7 @@ func (l *Loader) SetSymLocalElfSym(i Sym, es int32) { } } -// SymPlt returns the plt value for pe symbols. +// SymPlt returns the PLT offset of symbol s. func (l *Loader) SymPlt(s Sym) int32 { if v, ok := l.plt[s]; ok { return v @@ -1435,7 +1454,7 @@ func (l *Loader) SymPlt(s Sym) int32 { return -1 } -// SetPlt sets the plt value for pe symbols. +// SetPlt sets the PLT offset of symbol i. func (l *Loader) SetPlt(i Sym, v int32) { if i >= Sym(len(l.objSyms)) || i == 0 { panic("bad symbol for SetPlt") @@ -1447,7 +1466,7 @@ func (l *Loader) SetPlt(i Sym, v int32) { } } -// SymGot returns the got value for pe symbols. +// SymGot returns the GOT offset of symbol s. func (l *Loader) SymGot(s Sym) int32 { if v, ok := l.got[s]; ok { return v @@ -1455,7 +1474,7 @@ func (l *Loader) SymGot(s Sym) int32 { return -1 } -// SetGot sets the got value for pe symbols. +// SetGot sets the GOT offset of symbol i. func (l *Loader) SetGot(i Sym, v int32) { if i >= Sym(len(l.objSyms)) || i == 0 { panic("bad symbol for SetGot") @@ -1547,7 +1566,7 @@ func (l *Loader) SymUnit(i Sym) *sym.CompilationUnit { // regular compiler-generated Go symbols), but in the case of // building with "-linkshared" (when a symbol is read from a // shared library), will hold the library name. -// NOTE: this correspondes to sym.Symbol.File field. +// NOTE: this corresponds to sym.Symbol.File field. func (l *Loader) SymPkg(i Sym) string { if f, ok := l.symPkg[i]; ok { return f @@ -1833,10 +1852,9 @@ func (relocs *Relocs) Count() int { return len(relocs.rs) } // At returns the j-th reloc for a global symbol. func (relocs *Relocs) At(j int) Reloc { if relocs.l.isExtReader(relocs.r) { - pp := relocs.l.payloads[relocs.li] - return Reloc{&relocs.rs[j], relocs.r, relocs.l, pp.reltypes[j]} + return Reloc{&relocs.rs[j], relocs.r, relocs.l} } - return Reloc{&relocs.rs[j], relocs.r, relocs.l, 0} + return Reloc{&relocs.rs[j], relocs.r, relocs.l} } // Relocs returns a Relocs object for the given global sym. @@ -2226,7 +2244,7 @@ func loadObjRefs(l *Loader, r *oReader, arch *sys.Arch) { pkg := r.Pkg(i) objidx, ok := l.objByPkg[pkg] if !ok { - log.Fatalf("reference of nonexisted package %s, from %v", pkg, r.unit.Lib) + log.Fatalf("%v: reference to nonexistent package %s", r.unit.Lib, pkg) } r.pkg[i] = objidx } @@ -2337,13 +2355,11 @@ func (l *Loader) cloneToExternal(symIdx Sym) { // Copy relocations relocs := l.Relocs(symIdx) pp.relocs = make([]goobj.Reloc, relocs.Count()) - pp.reltypes = make([]objabi.RelocType, relocs.Count()) for i := range pp.relocs { // Copy the relocs slice. // Convert local reference to global reference. rel := relocs.At(i) - pp.relocs[i].Set(rel.Off(), rel.Siz(), 0, rel.Add(), goobj.SymRef{PkgIdx: 0, SymIdx: uint32(rel.Sym())}) - pp.reltypes[i] = rel.Type() + pp.relocs[i].Set(rel.Off(), rel.Siz(), uint16(rel.Type()), rel.Add(), goobj.SymRef{PkgIdx: 0, SymIdx: uint32(rel.Sym())}) } // Copy data @@ -2558,7 +2574,7 @@ func (l *Loader) AssignTextSymbolOrder(libs []*sym.Library, intlibs []bool, exts for i, list := range lists { for _, s := range list { sym := Sym(s) - if l.attrReachable.Has(sym) && !assignedToUnit.Has(sym) { + if !assignedToUnit.Has(sym) { textp = append(textp, sym) unit := l.SymUnit(sym) if unit != nil { diff --git a/src/cmd/link/internal/loader/loader_test.go b/src/cmd/link/internal/loader/loader_test.go index 1371c2a5410..15ae830dc94 100644 --- a/src/cmd/link/internal/loader/loader_test.go +++ b/src/cmd/link/internal/loader/loader_test.go @@ -237,7 +237,8 @@ func sameRelocSlice(s1 *Relocs, s2 []Reloc) bool { type addFunc func(l *Loader, s Sym, s2 Sym) Sym func mkReloc(l *Loader, typ objabi.RelocType, off int32, siz uint8, add int64, sym Sym) Reloc { - r := Reloc{&goobj.Reloc{}, l.extReader, l, typ} + r := Reloc{&goobj.Reloc{}, l.extReader, l} + r.SetType(typ) r.SetOff(off) r.SetSiz(siz) r.SetAdd(add) diff --git a/src/cmd/link/internal/loader/symbolbuilder.go b/src/cmd/link/internal/loader/symbolbuilder.go index 5d37da8ac6f..204d04412dc 100644 --- a/src/cmd/link/internal/loader/symbolbuilder.go +++ b/src/cmd/link/internal/loader/symbolbuilder.go @@ -121,13 +121,11 @@ func (sb *SymbolBuilder) Relocs() Relocs { // ResetRelocs removes all relocations on this symbol. func (sb *SymbolBuilder) ResetRelocs() { sb.relocs = sb.relocs[:0] - sb.reltypes = sb.reltypes[:0] } // SetRelocType sets the type of the 'i'-th relocation on this sym to 't' func (sb *SymbolBuilder) SetRelocType(i int, t objabi.RelocType) { - sb.relocs[i].SetType(0) - sb.reltypes[i] = t + sb.relocs[i].SetType(uint16(t)) } // SetRelocSym sets the target sym of the 'i'-th relocation on this sym to 's' @@ -143,7 +141,6 @@ func (sb *SymbolBuilder) SetRelocAdd(i int, a int64) { // Add n relocations, return a handle to the relocations. func (sb *SymbolBuilder) AddRelocs(n int) Relocs { sb.relocs = append(sb.relocs, make([]goobj.Reloc, n)...) - sb.reltypes = append(sb.reltypes, make([]objabi.RelocType, n)...) return sb.l.Relocs(sb.symIdx) } @@ -152,7 +149,7 @@ func (sb *SymbolBuilder) AddRelocs(n int) Relocs { func (sb *SymbolBuilder) AddRel(typ objabi.RelocType) (Reloc, int) { j := len(sb.relocs) sb.relocs = append(sb.relocs, goobj.Reloc{}) - sb.reltypes = append(sb.reltypes, typ) + sb.relocs[j].SetType(uint16(typ)) relocs := sb.Relocs() return relocs.At(j), j } @@ -169,7 +166,6 @@ func (p *relocsByOff) Len() int { return len(p.relocs) } func (p *relocsByOff) Less(i, j int) bool { return p.relocs[i].Off() < p.relocs[j].Off() } func (p *relocsByOff) Swap(i, j int) { p.relocs[i], p.relocs[j] = p.relocs[j], p.relocs[i] - p.reltypes[i], p.reltypes[j] = p.reltypes[j], p.reltypes[i] } func (sb *SymbolBuilder) Reachable() bool { diff --git a/src/cmd/link/internal/loadmacho/ldmacho.go b/src/cmd/link/internal/loadmacho/ldmacho.go index 6d1d9bb29ed..e7d9eebc33f 100644 --- a/src/cmd/link/internal/loadmacho/ldmacho.go +++ b/src/cmd/link/internal/loadmacho/ldmacho.go @@ -607,7 +607,7 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader, if machsym.type_&N_EXT == 0 { v = localSymVersion } - s := l.LookupOrCreateSym(name, v) + s := l.LookupOrCreateCgoExport(name, v) if machsym.type_&N_EXT == 0 { l.SetAttrDuplicateOK(s, true) } diff --git a/src/cmd/link/internal/loadpe/ldpe.go b/src/cmd/link/internal/loadpe/ldpe.go index a5c025de8ff..9cc7effe1f4 100644 --- a/src/cmd/link/internal/loadpe/ldpe.go +++ b/src/cmd/link/internal/loadpe/ldpe.go @@ -115,6 +115,24 @@ const ( IMAGE_REL_THUMB_BRANCH24 = 0x0014 IMAGE_REL_THUMB_BLX23 = 0x0015 IMAGE_REL_ARM_PAIR = 0x0016 + IMAGE_REL_ARM64_ABSOLUTE = 0x0000 + IMAGE_REL_ARM64_ADDR32 = 0x0001 + IMAGE_REL_ARM64_ADDR32NB = 0x0002 + IMAGE_REL_ARM64_BRANCH26 = 0x0003 + IMAGE_REL_ARM64_PAGEBASE_REL21 = 0x0004 + IMAGE_REL_ARM64_REL21 = 0x0005 + IMAGE_REL_ARM64_PAGEOFFSET_12A = 0x0006 + IMAGE_REL_ARM64_PAGEOFFSET_12L = 0x0007 + IMAGE_REL_ARM64_SECREL = 0x0008 + IMAGE_REL_ARM64_SECREL_LOW12A = 0x0009 + IMAGE_REL_ARM64_SECREL_HIGH12A = 0x000A + IMAGE_REL_ARM64_SECREL_LOW12L = 0x000B + IMAGE_REL_ARM64_TOKEN = 0x000C + IMAGE_REL_ARM64_SECTION = 0x000D + IMAGE_REL_ARM64_ADDR64 = 0x000E + IMAGE_REL_ARM64_BRANCH19 = 0x000F + IMAGE_REL_ARM64_BRANCH14 = 0x0010 + IMAGE_REL_ARM64_REL32 = 0x0011 ) // TODO(crawshaw): de-duplicate these symbols with cmd/internal/ld, ideally in debug/pe. @@ -160,11 +178,7 @@ func makeUpdater(l *loader.Loader, bld *loader.SymbolBuilder, s loader.Sym) *loa // If an .rsrc section or set of .rsrc$xx sections is found, its symbols are // returned as rsrc. func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, input *bio.Reader, pkg string, length int64, pn string) (textp []loader.Sym, rsrc []loader.Sym, err error) { - lookup := func(name string, version int) (*loader.SymbolBuilder, loader.Sym) { - s := l.LookupOrCreateSym(name, version) - sb := l.MakeSymbolUpdater(s) - return sb, s - } + lookup := l.LookupOrCreateCgoExport sectsyms := make(map[*pe.Section]loader.Sym) sectdata := make(map[*pe.Section][]byte) @@ -196,7 +210,8 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, input *bio.Read } name := fmt.Sprintf("%s(%s)", pkg, sect.Name) - bld, s := lookup(name, localSymVersion) + s := lookup(name, localSymVersion) + bld := l.MakeSymbolUpdater(s) switch sect.Characteristics & (IMAGE_SCN_CNT_UNINITIALIZED_DATA | IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE | IMAGE_SCN_CNT_CODE | IMAGE_SCN_MEM_EXECUTE) { case IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ: //.rdata @@ -254,7 +269,7 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, input *bio.Read return nil, nil, fmt.Errorf("relocation number %d symbol index idx=%d cannot be large then number of symbols %d", j, r.SymbolTableIndex, len(f.COFFSymbols)) } pesym := &f.COFFSymbols[r.SymbolTableIndex] - _, gosym, err := readpesym(l, arch, l.LookupOrCreateSym, f, pesym, sectsyms, localSymVersion) + _, gosym, err := readpesym(l, arch, lookup, f, pesym, sectsyms, localSymVersion) if err != nil { return nil, nil, err } @@ -319,6 +334,17 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, input *bio.Read case IMAGE_REL_ARM_BRANCH24: rType = objabi.R_CALLARM + rAdd = int64(int32(binary.LittleEndian.Uint32(sectdata[rsect][rOff:]))) + } + + case sys.ARM64: + switch r.Type { + default: + return nil, nil, fmt.Errorf("%s: %v: unknown ARM64 relocation type %v", pn, sectsyms[rsect], r.Type) + + case IMAGE_REL_ARM64_ADDR32, IMAGE_REL_ARM64_ADDR32NB: + rType = objabi.R_ADDR + rAdd = int64(int32(binary.LittleEndian.Uint32(sectdata[rsect][rOff:]))) } } @@ -385,7 +411,7 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, input *bio.Read } } - bld, s, err := readpesym(l, arch, l.LookupOrCreateSym, f, pesym, sectsyms, localSymVersion) + bld, s, err := readpesym(l, arch, lookup, f, pesym, sectsyms, localSymVersion) if err != nil { return nil, nil, err } diff --git a/src/cmd/link/internal/loadxcoff/ldxcoff.go b/src/cmd/link/internal/loadxcoff/ldxcoff.go index a5744216d66..920e1c85fd4 100644 --- a/src/cmd/link/internal/loadxcoff/ldxcoff.go +++ b/src/cmd/link/internal/loadxcoff/ldxcoff.go @@ -121,7 +121,7 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, input *bio.Read } sb := l.MakeSymbolUpdater(sect.sym) for _, rx := range sect.Relocs { - rSym := l.LookupOrCreateSym(rx.Symbol.Name, 0) + rSym := l.LookupOrCreateCgoExport(rx.Symbol.Name, 0) if uint64(int32(rx.VirtualAddress)) != rx.VirtualAddress { return errorf("virtual address of a relocation is too big: 0x%x", rx.VirtualAddress) } diff --git a/src/cmd/link/internal/mips/asm.go b/src/cmd/link/internal/mips/asm.go index 17b1b20aff3..8505dc61093 100644 --- a/src/cmd/link/internal/mips/asm.go +++ b/src/cmd/link/internal/mips/asm.go @@ -140,7 +140,7 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade return val, 0, false } -func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64) int64 { +func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64, []byte) int64 { return -1 } diff --git a/src/cmd/link/internal/mips/obj.go b/src/cmd/link/internal/mips/obj.go index f20597c0f51..5ca75825293 100644 --- a/src/cmd/link/internal/mips/obj.go +++ b/src/cmd/link/internal/mips/obj.go @@ -34,11 +34,12 @@ import ( "cmd/internal/objabi" "cmd/internal/sys" "cmd/link/internal/ld" + "internal/buildcfg" ) func Init() (*sys.Arch, ld.Arch) { arch := sys.ArchMIPS - if objabi.GOARCH == "mipsle" { + if buildcfg.GOARCH == "mipsle" { arch = sys.ArchMIPSLE } diff --git a/src/cmd/link/internal/mips64/asm.go b/src/cmd/link/internal/mips64/asm.go index 4789b411eb5..f7f91d1e8b2 100644 --- a/src/cmd/link/internal/mips64/asm.go +++ b/src/cmd/link/internal/mips64/asm.go @@ -52,6 +52,8 @@ func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, // type uint8 // addend int64 + addend := r.Xadd + out.Write64(uint64(sectoff)) elfsym := ld.ElfSymForReloc(ctxt, r.Xsym) @@ -77,11 +79,17 @@ func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, out.Write8(uint8(elf.R_MIPS_HI16)) case objabi.R_ADDRMIPSTLS: out.Write8(uint8(elf.R_MIPS_TLS_TPREL_LO16)) + if ctxt.Target.IsOpenbsd() { + // OpenBSD mips64 does not currently offset TLS by 0x7000, + // as such we need to add this back to get the correct offset + // via the external linker. + addend += 0x7000 + } case objabi.R_CALLMIPS, objabi.R_JMPMIPS: out.Write8(uint8(elf.R_MIPS_26)) } - out.Write64(uint64(r.Xadd)) + out.Write64(uint64(addend)) return true } @@ -124,6 +132,11 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade case objabi.R_ADDRMIPSTLS: // thread pointer is at 0x7000 offset from the start of TLS data area t := ldr.SymValue(rs) + r.Add() - 0x7000 + if target.IsOpenbsd() { + // OpenBSD mips64 does not currently offset TLS by 0x7000, + // as such we need to add this back to get the correct offset. + t += 0x7000 + } if t < -32768 || t >= 32678 { ldr.Errorf(s, "TLS offset out of range %d", t) } @@ -138,7 +151,7 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade return val, 0, false } -func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64) int64 { +func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64, []byte) int64 { return -1 } diff --git a/src/cmd/link/internal/mips64/obj.go b/src/cmd/link/internal/mips64/obj.go index 01d89a209ca..544e1ef7bed 100644 --- a/src/cmd/link/internal/mips64/obj.go +++ b/src/cmd/link/internal/mips64/obj.go @@ -34,11 +34,12 @@ import ( "cmd/internal/objabi" "cmd/internal/sys" "cmd/link/internal/ld" + "internal/buildcfg" ) func Init() (*sys.Arch, ld.Arch) { arch := sys.ArchMIPS64 - if objabi.GOARCH == "mips64le" { + if buildcfg.GOARCH == "mips64le" { arch = sys.ArchMIPS64LE } diff --git a/src/cmd/link/internal/ppc64/asm.go b/src/cmd/link/internal/ppc64/asm.go index 602f0b52999..b877864b759 100644 --- a/src/cmd/link/internal/ppc64/asm.go +++ b/src/cmd/link/internal/ppc64/asm.go @@ -121,16 +121,21 @@ func genplt(ctxt *ld.Link, ldr *loader.Loader) { // Update the relocation to use the call stub r.SetSym(stub.Sym()) - // make sure the data is writeable - if ldr.AttrReadOnly(s) { - panic("can't write to read-only sym data") - } + // Make the symbol writeable so we can fixup toc. + su := ldr.MakeSymbolUpdater(s) + su.MakeWritable() + p := su.Data() - // Restore TOC after bl. The compiler put a - // nop here for us to overwrite. - sp := ldr.Data(s) + // Check for toc restore slot (a nop), and replace with toc restore. + var nop uint32 + if len(p) >= int(r.Off()+8) { + nop = ctxt.Arch.ByteOrder.Uint32(p[r.Off()+4:]) + } + if nop != 0x60000000 { + ldr.Errorf(s, "Symbol %s is missing toc restoration slot at offset %d", ldr.SymName(s), r.Off()+4) + } const o1 = 0xe8410018 // ld r2,24(r1) - ctxt.Arch.ByteOrder.PutUint32(sp[r.Off()+4:], o1) + ctxt.Arch.ByteOrder.PutUint32(p[r.Off()+4:], o1) } } // Put call stubs at the beginning (instead of the end). @@ -413,6 +418,7 @@ func xcoffreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sy emitReloc(ld.XCOFF_R_TOCU|(0x0F<<8), 2) emitReloc(ld.XCOFF_R_TOCL|(0x0F<<8), 6) case objabi.R_POWER_TLS_LE: + // This only supports 16b relocations. It is fixed up in archreloc. emitReloc(ld.XCOFF_R_TLS_LE|0x0F<<8, 2) case objabi.R_CALLPOWER: if r.Size != 4 { @@ -453,7 +459,10 @@ func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, case objabi.R_POWER_TLS: out.Write64(uint64(elf.R_PPC64_TLS) | uint64(elfsym)<<32) case objabi.R_POWER_TLS_LE: - out.Write64(uint64(elf.R_PPC64_TPREL16) | uint64(elfsym)<<32) + out.Write64(uint64(elf.R_PPC64_TPREL16_HA) | uint64(elfsym)<<32) + out.Write64(uint64(r.Xadd)) + out.Write64(uint64(sectoff + 4)) + out.Write64(uint64(elf.R_PPC64_TPREL16_LO) | uint64(elfsym)<<32) case objabi.R_POWER_TLS_IE: out.Write64(uint64(elf.R_PPC64_GOT_TPREL16_HA) | uint64(elfsym)<<32) out.Write64(uint64(r.Xadd)) @@ -642,6 +651,16 @@ func archrelocaddr(ldr *loader.Loader, target *ld.Target, syms *ld.ArchSyms, r l return int64(o2)<<32 | int64(o1) } +// Determine if the code was compiled so that the TOC register R2 is initialized and maintained +func r2Valid(ctxt *ld.Link) bool { + switch ctxt.BuildMode { + case ld.BuildModeCArchive, ld.BuildModeCShared, ld.BuildModePIE, ld.BuildModeShared, ld.BuildModePlugin: + return true + } + // -linkshared option + return ctxt.IsSharedGoLink() +} + // resolve direct jump relocation r in s, and add trampoline if necessary func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) { @@ -649,7 +668,7 @@ func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) { // For internal linking, trampolines are always created for long calls. // For external linking, the linker can insert a call stub to handle a long call, but depends on having the TOC address in // r2. For those build modes with external linking where the TOC address is not maintained in r2, trampolines must be created. - if ctxt.IsExternal() && (ctxt.DynlinkingGo() || ctxt.BuildMode == ld.BuildModeCArchive || ctxt.BuildMode == ld.BuildModeCShared || ctxt.BuildMode == ld.BuildModePIE) { + if ctxt.IsExternal() && r2Valid(ctxt) { // No trampolines needed since r2 contains the TOC return } @@ -703,7 +722,7 @@ func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) { } } if ldr.SymType(tramp) == 0 { - if ctxt.DynlinkingGo() || ctxt.BuildMode == ld.BuildModeCArchive || ctxt.BuildMode == ld.BuildModeCShared || ctxt.BuildMode == ld.BuildModePIE { + if r2Valid(ctxt) { // Should have returned for above cases ctxt.Errorf(s, "unexpected trampoline for shared or dynamic linking") } else { @@ -792,11 +811,25 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade if !target.IsAIX() { return val, nExtReloc, false } - case objabi.R_POWER_TLS, objabi.R_POWER_TLS_LE, objabi.R_POWER_TLS_IE: - // check Outer is nil, Type is TLSBSS? + case objabi.R_POWER_TLS: nExtReloc = 1 - if rt == objabi.R_POWER_TLS_IE { - nExtReloc = 2 // need two ELF relocations, see elfreloc1 + return val, nExtReloc, true + case objabi.R_POWER_TLS_LE, objabi.R_POWER_TLS_IE: + if target.IsAIX() && rt == objabi.R_POWER_TLS_LE { + // Fixup val, an addis/addi pair of instructions, which generate a 32b displacement + // from the threadpointer (R13), into a 16b relocation. XCOFF only supports 16b + // TLS LE relocations. Likewise, verify this is an addis/addi sequence. + const expectedOpcodes = 0x3C00000038000000 + const expectedOpmasks = 0xFC000000FC000000 + if uint64(val)&expectedOpmasks != expectedOpcodes { + ldr.Errorf(s, "relocation for %s+%d is not an addis/addi pair: %16x", ldr.SymName(rs), r.Off(), uint64(val)) + } + nval := (int64(uint32(0x380d0000)) | val&0x03e00000) << 32 // addi rX, r13, $0 + nval |= int64(0x60000000) // nop + val = nval + nExtReloc = 1 + } else { + nExtReloc = 2 } return val, nExtReloc, true case objabi.R_ADDRPOWER, @@ -850,16 +883,32 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade // the TLS. v -= 0x800 } - if int64(int16(v)) != v { + + var o1, o2 uint32 + if int64(int32(v)) != v { ldr.Errorf(s, "TLS offset out of range %d", v) } - return (val &^ 0xffff) | (v & 0xffff), nExtReloc, true + if target.IsBigEndian() { + o1 = uint32(val >> 32) + o2 = uint32(val) + } else { + o1 = uint32(val) + o2 = uint32(val >> 32) + } + + o1 |= uint32(((v + 0x8000) >> 16) & 0xFFFF) + o2 |= uint32(v & 0xFFFF) + + if target.IsBigEndian() { + return int64(o1)<<32 | int64(o2), nExtReloc, true + } + return int64(o2)<<32 | int64(o1), nExtReloc, true } return val, nExtReloc, false } -func archrelocvariant(target *ld.Target, ldr *loader.Loader, r loader.Reloc, rv sym.RelocVariant, s loader.Sym, t int64) (relocatedOffset int64) { +func archrelocvariant(target *ld.Target, ldr *loader.Loader, r loader.Reloc, rv sym.RelocVariant, s loader.Sym, t int64, p []byte) (relocatedOffset int64) { rs := ldr.ResolveABIAlias(r.Sym()) switch rv & sym.RV_TYPE_MASK { default: @@ -875,9 +924,10 @@ func archrelocvariant(target *ld.Target, ldr *loader.Loader, r loader.Reloc, rv // overflow depends on the instruction var o1 uint32 if target.IsBigEndian() { - o1 = binary.BigEndian.Uint32(ldr.Data(s)[r.Off()-2:]) + o1 = binary.BigEndian.Uint32(p[r.Off()-2:]) + } else { - o1 = binary.LittleEndian.Uint32(ldr.Data(s)[r.Off():]) + o1 = binary.LittleEndian.Uint32(p[r.Off():]) } switch o1 >> 26 { case 24, // ori @@ -909,9 +959,9 @@ func archrelocvariant(target *ld.Target, ldr *loader.Loader, r loader.Reloc, rv // overflow depends on the instruction var o1 uint32 if target.IsBigEndian() { - o1 = binary.BigEndian.Uint32(ldr.Data(s)[r.Off()-2:]) + o1 = binary.BigEndian.Uint32(p[r.Off()-2:]) } else { - o1 = binary.LittleEndian.Uint32(ldr.Data(s)[r.Off():]) + o1 = binary.LittleEndian.Uint32(p[r.Off():]) } switch o1 >> 26 { case 25, // oris @@ -933,9 +983,9 @@ func archrelocvariant(target *ld.Target, ldr *loader.Loader, r loader.Reloc, rv case sym.RV_POWER_DS: var o1 uint32 if target.IsBigEndian() { - o1 = uint32(binary.BigEndian.Uint16(ldr.Data(s)[r.Off():])) + o1 = uint32(binary.BigEndian.Uint16(p[r.Off():])) } else { - o1 = uint32(binary.LittleEndian.Uint16(ldr.Data(s)[r.Off():])) + o1 = uint32(binary.LittleEndian.Uint16(p[r.Off():])) } if t&3 != 0 { ldr.Errorf(s, "relocation for %s+%d is not aligned: %d", ldr.SymName(rs), r.Off(), t) @@ -1039,8 +1089,11 @@ func ensureglinkresolver(ctxt *ld.Link, ldr *loader.Loader) *loader.SymbolBuilde glink.AddUint32(ctxt.Arch, 0x7800f082) // srdi r0,r0,2 // r11 = address of the first byte of the PLT - glink.AddSymRef(ctxt.Arch, ctxt.PLT, 0, objabi.R_ADDRPOWER, 8) - + r, _ := glink.AddRel(objabi.R_ADDRPOWER) + r.SetSym(ctxt.PLT) + r.SetSiz(8) + r.SetOff(int32(glink.Size())) + r.SetAdd(0) glink.AddUint32(ctxt.Arch, 0x3d600000) // addis r11,0,.plt@ha glink.AddUint32(ctxt.Arch, 0x396b0000) // addi r11,r11,.plt@l diff --git a/src/cmd/link/internal/ppc64/obj.go b/src/cmd/link/internal/ppc64/obj.go index ef4393f4893..b6d5ad92afa 100644 --- a/src/cmd/link/internal/ppc64/obj.go +++ b/src/cmd/link/internal/ppc64/obj.go @@ -34,21 +34,22 @@ import ( "cmd/internal/objabi" "cmd/internal/sys" "cmd/link/internal/ld" + "internal/buildcfg" ) func Init() (*sys.Arch, ld.Arch) { arch := sys.ArchPPC64 - if objabi.GOARCH == "ppc64le" { + if buildcfg.GOARCH == "ppc64le" { arch = sys.ArchPPC64LE } theArch := ld.Arch{ - Funcalign: funcAlign, - Maxalign: maxAlign, - Minalign: minAlign, - Dwarfregsp: dwarfRegSP, - Dwarfreglr: dwarfRegLR, - WriteTextBlocks: true, + Funcalign: funcAlign, + Maxalign: maxAlign, + Minalign: minAlign, + Dwarfregsp: dwarfRegSP, + Dwarfreglr: dwarfRegLR, + TrampLimit: 0x1c00000, Adddynrel: adddynrel, Archinit: archinit, diff --git a/src/cmd/link/internal/riscv64/asm.go b/src/cmd/link/internal/riscv64/asm.go index c18e0540d8c..6eace617dc0 100644 --- a/src/cmd/link/internal/riscv64/asm.go +++ b/src/cmd/link/internal/riscv64/asm.go @@ -230,7 +230,7 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade return val, 0, false } -func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64) int64 { +func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64, []byte) int64 { log.Fatalf("archrelocvariant") return -1 } diff --git a/src/cmd/link/internal/s390x/asm.go b/src/cmd/link/internal/s390x/asm.go index 78d2cc81e44..1952971dcb9 100644 --- a/src/cmd/link/internal/s390x/asm.go +++ b/src/cmd/link/internal/s390x/asm.go @@ -371,7 +371,7 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade return val, 0, false } -func archrelocvariant(target *ld.Target, ldr *loader.Loader, r loader.Reloc, rv sym.RelocVariant, s loader.Sym, t int64) int64 { +func archrelocvariant(target *ld.Target, ldr *loader.Loader, r loader.Reloc, rv sym.RelocVariant, s loader.Sym, t int64, p []byte) int64 { switch rv & sym.RV_TYPE_MASK { default: ldr.Errorf(s, "unexpected relocation variant %d", rv) diff --git a/src/cmd/link/internal/sym/compilation_unit.go b/src/cmd/link/internal/sym/compilation_unit.go index 5d7206db66e..3bad5bf3f4f 100644 --- a/src/cmd/link/internal/sym/compilation_unit.go +++ b/src/cmd/link/internal/sym/compilation_unit.go @@ -20,7 +20,6 @@ type LoaderSym int // // These are used for both DWARF and pclntab generation. type CompilationUnit struct { - Pkg string // The package name, eg ("fmt", or "runtime") Lib *Library // Our library PclnIndex int // Index of this CU in pclntab PCs []dwarf.Range // PC ranges, relative to Textp[0] @@ -29,6 +28,7 @@ type CompilationUnit struct { Consts LoaderSym // Package constants DIEs FuncDIEs []LoaderSym // Function DIE subtrees + VarDIEs []LoaderSym // Global variable DIEs AbsFnDIEs []LoaderSym // Abstract function DIE subtrees RangeSyms []LoaderSym // Symbols for debug_range Textp []LoaderSym // Text symbols in this CU diff --git a/src/cmd/link/internal/wasm/asm.go b/src/cmd/link/internal/wasm/asm.go index 31851fbb567..5bdfdbaee63 100644 --- a/src/cmd/link/internal/wasm/asm.go +++ b/src/cmd/link/internal/wasm/asm.go @@ -10,6 +10,7 @@ import ( "cmd/link/internal/ld" "cmd/link/internal/loader" "cmd/link/internal/sym" + "internal/buildcfg" "io" "regexp" ) @@ -506,15 +507,15 @@ func writeProducerSec(ctxt *ld.Link) { writeUleb128(ctxt.Out, 2) // number of fields - writeName(ctxt.Out, "language") // field name - writeUleb128(ctxt.Out, 1) // number of values - writeName(ctxt.Out, "Go") // value: name - writeName(ctxt.Out, objabi.Version) // value: version + writeName(ctxt.Out, "language") // field name + writeUleb128(ctxt.Out, 1) // number of values + writeName(ctxt.Out, "Go") // value: name + writeName(ctxt.Out, buildcfg.Version) // value: version writeName(ctxt.Out, "processed-by") // field name writeUleb128(ctxt.Out, 1) // number of values writeName(ctxt.Out, "Go cmd/compile") // value: name - writeName(ctxt.Out, objabi.Version) // value: version + writeName(ctxt.Out, buildcfg.Version) // value: version writeSecSize(ctxt, sizeOffset) } diff --git a/src/cmd/link/internal/x86/asm.go b/src/cmd/link/internal/x86/asm.go index af0ce11255f..5f6bcfb8b1a 100644 --- a/src/cmd/link/internal/x86/asm.go +++ b/src/cmd/link/internal/x86/asm.go @@ -415,7 +415,7 @@ func archreloc(*ld.Target, *loader.Loader, *ld.ArchSyms, loader.Reloc, loader.Sy return -1, 0, false } -func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64) int64 { +func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64, []byte) int64 { log.Fatalf("unexpected relocation variant") return -1 } diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go index 08ddd00a0c7..8805ff1f02c 100644 --- a/src/cmd/link/link_test.go +++ b/src/cmd/link/link_test.go @@ -48,13 +48,9 @@ const X = "\n!\n" func main() {} ` - tmpdir, err := ioutil.TempDir("", "issue21703") - if err != nil { - t.Fatalf("failed to create temp dir: %v\n", err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() - err = ioutil.WriteFile(filepath.Join(tmpdir, "main.go"), []byte(source), 0666) + err := ioutil.WriteFile(filepath.Join(tmpdir, "main.go"), []byte(source), 0666) if err != nil { t.Fatalf("failed to write main.go: %v\n", err) } @@ -83,11 +79,7 @@ func TestIssue28429(t *testing.T) { testenv.MustHaveGoBuild(t) - tmpdir, err := ioutil.TempDir("", "issue28429-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() write := func(name, content string) { err := ioutil.WriteFile(filepath.Join(tmpdir, name), []byte(content), 0666) @@ -126,11 +118,7 @@ func TestUnresolved(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "unresolved-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() write := func(name, content string) { err := ioutil.WriteFile(filepath.Join(tmpdir, name), []byte(content), 0666) @@ -189,17 +177,14 @@ func TestIssue33979(t *testing.T) { case "mips", "mipsle", "mips64", "mips64le": t.Skipf("Skipping on %s/%s", runtime.GOOS, runtime.GOARCH) } - if runtime.GOOS == "aix" { + if runtime.GOOS == "aix" || + runtime.GOOS == "windows" && runtime.GOARCH == "arm64" { t.Skipf("Skipping on %s/%s", runtime.GOOS, runtime.GOARCH) } t.Parallel() - tmpdir, err := ioutil.TempDir("", "unresolved-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() write := func(name, content string) { err := ioutil.WriteFile(filepath.Join(tmpdir, name), []byte(content), 0666) @@ -300,11 +285,7 @@ func TestBuildForTvOS(t *testing.T) { "-framework", "CoreFoundation", } lib := filepath.Join("testdata", "testBuildFortvOS", "lib.go") - tmpDir, err := ioutil.TempDir("", "go-link-TestBuildFortvOS") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) + tmpDir := t.TempDir() ar := filepath.Join(tmpDir, "lib.a") cmd := exec.Command(testenv.GoToolPath(t), "build", "-buildmode=c-archive", "-o", ar, lib) @@ -339,14 +320,10 @@ func TestXFlag(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "TestXFlag") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() src := filepath.Join(tmpdir, "main.go") - err = ioutil.WriteFile(src, []byte(testXFlagSrc), 0666) + err := ioutil.WriteFile(src, []byte(testXFlagSrc), 0666) if err != nil { t.Fatal(err) } @@ -357,24 +334,20 @@ func TestXFlag(t *testing.T) { } } -var testMacOSVersionSrc = ` +var testMachOBuildVersionSrc = ` package main func main() { } ` -func TestMacOSVersion(t *testing.T) { +func TestMachOBuildVersion(t *testing.T) { testenv.MustHaveGoBuild(t) t.Parallel() - tmpdir, err := ioutil.TempDir("", "TestMacOSVersion") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() src := filepath.Join(tmpdir, "main.go") - err = ioutil.WriteFile(src, []byte(testMacOSVersionSrc), 0666) + err := ioutil.WriteFile(src, []byte(testMachOBuildVersionSrc), 0666) if err != nil { t.Fatal(err) } @@ -393,33 +366,34 @@ func TestMacOSVersion(t *testing.T) { if err != nil { t.Fatal(err) } + defer exef.Close() exem, err := macho.NewFile(exef) if err != nil { t.Fatal(err) } found := false - const LC_VERSION_MIN_MACOSX = 0x24 + const LC_BUILD_VERSION = 0x32 checkMin := func(ver uint32) { major, minor := (ver>>16)&0xff, (ver>>8)&0xff if major != 10 || minor < 9 { - t.Errorf("LC_VERSION_MIN_MACOSX version %d.%d < 10.9", major, minor) + t.Errorf("LC_BUILD_VERSION version %d.%d < 10.9", major, minor) } } for _, cmd := range exem.Loads { raw := cmd.Raw() type_ := exem.ByteOrder.Uint32(raw) - if type_ != LC_VERSION_MIN_MACOSX { + if type_ != LC_BUILD_VERSION { continue } - osVer := exem.ByteOrder.Uint32(raw[8:]) + osVer := exem.ByteOrder.Uint32(raw[12:]) checkMin(osVer) - sdkVer := exem.ByteOrder.Uint32(raw[12:]) + sdkVer := exem.ByteOrder.Uint32(raw[16:]) checkMin(sdkVer) found = true break } if !found { - t.Errorf("no LC_VERSION_MIN_MACOSX load command found") + t.Errorf("no LC_BUILD_VERSION load command found") } } @@ -446,14 +420,10 @@ func TestIssue34788Android386TLSSequence(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "TestIssue34788Android386TLSSequence") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() src := filepath.Join(tmpdir, "blah.go") - err = ioutil.WriteFile(src, []byte(Issue34788src), 0666) + err := ioutil.WriteFile(src, []byte(Issue34788src), 0666) if err != nil { t.Fatal(err) } @@ -506,14 +476,10 @@ func TestStrictDup(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "TestStrictDup") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() src := filepath.Join(tmpdir, "x.go") - err = ioutil.WriteFile(src, []byte(testStrictDupGoSrc), 0666) + err := ioutil.WriteFile(src, []byte(testStrictDupGoSrc), 0666) if err != nil { t.Fatal(err) } @@ -592,14 +558,10 @@ func TestFuncAlign(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "TestFuncAlign") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() src := filepath.Join(tmpdir, "go.mod") - err = ioutil.WriteFile(src, []byte("module cmd/link/TestFuncAlign/falign"), 0666) + err := ioutil.WriteFile(src, []byte("module cmd/link/TestFuncAlign/falign"), 0666) if err != nil { t.Fatal(err) } @@ -656,7 +618,7 @@ func TestTrampoline(t *testing.T) { // threshold for trampoline generation, and essentially all cross-package // calls will use trampolines. switch runtime.GOARCH { - case "arm", "ppc64", "ppc64le": + case "arm", "arm64", "ppc64", "ppc64le": default: t.Skipf("trampoline insertion is not implemented on %s", runtime.GOARCH) } @@ -665,14 +627,10 @@ func TestTrampoline(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "TestTrampoline") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() src := filepath.Join(tmpdir, "hello.go") - err = ioutil.WriteFile(src, []byte(testTrampSrc), 0666) + err := ioutil.WriteFile(src, []byte(testTrampSrc), 0666) if err != nil { t.Fatal(err) } @@ -693,6 +651,77 @@ func TestTrampoline(t *testing.T) { } } +const testTrampCgoSrc = ` +package main + +// #include +// void CHello() { printf("hello\n"); fflush(stdout); } +import "C" + +func main() { + C.CHello() +} +` + +func TestTrampolineCgo(t *testing.T) { + // Test that trampoline insertion works for cgo code. + // For stress test, we set -debugtramp=2 flag, which sets a very low + // threshold for trampoline generation, and essentially all cross-package + // calls will use trampolines. + switch runtime.GOARCH { + case "arm", "arm64", "ppc64", "ppc64le": + default: + t.Skipf("trampoline insertion is not implemented on %s", runtime.GOARCH) + } + + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + + t.Parallel() + + tmpdir := t.TempDir() + + src := filepath.Join(tmpdir, "hello.go") + err := ioutil.WriteFile(src, []byte(testTrampCgoSrc), 0666) + if err != nil { + t.Fatal(err) + } + exe := filepath.Join(tmpdir, "hello.exe") + + cmd := exec.Command(testenv.GoToolPath(t), "build", "-ldflags=-debugtramp=2", "-o", exe, src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("build failed: %v\n%s", err, out) + } + cmd = exec.Command(exe) + out, err = cmd.CombinedOutput() + if err != nil { + t.Errorf("executable failed to run: %v\n%s", err, out) + } + if string(out) != "hello\n" { + t.Errorf("unexpected output:\n%s", out) + } + + // Test internal linking mode. + + if runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || (runtime.GOARCH == "arm64" && runtime.GOOS == "windows") || !testenv.CanInternalLink() { + return // internal linking cgo is not supported + } + cmd = exec.Command(testenv.GoToolPath(t), "build", "-ldflags=-debugtramp=2 -linkmode=internal", "-o", exe, src) + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("build failed: %v\n%s", err, out) + } + cmd = exec.Command(exe) + out, err = cmd.CombinedOutput() + if err != nil { + t.Errorf("executable failed to run: %v\n%s", err, out) + } + if string(out) != "hello\n" { + t.Errorf("unexpected output:\n%s", out) + } +} + func TestIndexMismatch(t *testing.T) { // Test that index mismatch will cause a link-time error (not run-time error). // This shouldn't happen with "go build". We invoke the compiler and the linker @@ -701,11 +730,7 @@ func TestIndexMismatch(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "TestIndexMismatch") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() aSrc := filepath.Join("testdata", "testIndexMismatch", "a.go") bSrc := filepath.Join("testdata", "testIndexMismatch", "b.go") @@ -753,23 +778,20 @@ func TestIndexMismatch(t *testing.T) { } } -func TestPErsrc(t *testing.T) { +func TestPErsrcBinutils(t *testing.T) { // Test that PE rsrc section is handled correctly (issue 39658). testenv.MustHaveGoBuild(t) - if runtime.GOARCH != "amd64" || runtime.GOOS != "windows" { - t.Skipf("this is a windows/amd64-only test") + if (runtime.GOARCH != "386" && runtime.GOARCH != "amd64") || runtime.GOOS != "windows" { + // This test is limited to amd64 and 386, because binutils is limited as such + t.Skipf("this is only for windows/amd64 and windows/386") } t.Parallel() - tmpdir, err := ioutil.TempDir("", "TestPErsrc") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() - pkgdir := filepath.Join("testdata", "testPErsrc") + pkgdir := filepath.Join("testdata", "pe-binutils") exe := filepath.Join(tmpdir, "a.exe") cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", exe) cmd.Dir = pkgdir @@ -787,19 +809,32 @@ func TestPErsrc(t *testing.T) { if !bytes.Contains(b, []byte("Hello Gophers!")) { t.Fatalf("binary does not contain expected content") } +} - pkgdir = filepath.Join("testdata", "testPErsrc-complex") - exe = filepath.Join(tmpdir, "a.exe") - cmd = exec.Command(testenv.GoToolPath(t), "build", "-o", exe) +func TestPErsrcLLVM(t *testing.T) { + // Test that PE rsrc section is handled correctly (issue 39658). + testenv.MustHaveGoBuild(t) + + if runtime.GOOS != "windows" { + t.Skipf("this is a windows-only test") + } + + t.Parallel() + + tmpdir := t.TempDir() + + pkgdir := filepath.Join("testdata", "pe-llvm") + exe := filepath.Join(tmpdir, "a.exe") + cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", exe) cmd.Dir = pkgdir // cmd.Env = append(os.Environ(), "GOOS=windows", "GOARCH=amd64") // uncomment if debugging in a cross-compiling environment - out, err = cmd.CombinedOutput() + out, err := cmd.CombinedOutput() if err != nil { t.Fatalf("building failed: %v, output:\n%s", err, out) } // Check that the binary contains the rsrc data - b, err = ioutil.ReadFile(exe) + b, err := ioutil.ReadFile(exe) if err != nil { t.Fatalf("reading output failed: %v", err) } @@ -814,12 +849,6 @@ func TestContentAddressableSymbols(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "TestContentAddressableSymbols") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - src := filepath.Join("testdata", "testHashedSyms", "p.go") cmd := exec.Command(testenv.GoToolPath(t), "run", src) out, err := cmd.CombinedOutput() @@ -863,14 +892,10 @@ func TestIssue38554(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "TestIssue38554") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() src := filepath.Join(tmpdir, "x.go") - err = ioutil.WriteFile(src, []byte(testIssue38554Src), 0666) + err := ioutil.WriteFile(src, []byte(testIssue38554Src), 0666) if err != nil { t.Fatalf("failed to write source file: %v", err) } @@ -917,14 +942,10 @@ func TestIssue42396(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "TestIssue42396") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() src := filepath.Join(tmpdir, "main.go") - err = ioutil.WriteFile(src, []byte(testIssue42396src), 0666) + err := ioutil.WriteFile(src, []byte(testIssue42396src), 0666) if err != nil { t.Fatalf("failed to write source file: %v", err) } @@ -974,14 +995,10 @@ func TestLargeReloc(t *testing.T) { testenv.MustHaveGoBuild(t) t.Parallel() - tmpdir, err := ioutil.TempDir("", "TestIssue42396") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() src := filepath.Join(tmpdir, "x.go") - err = ioutil.WriteFile(src, []byte(testLargeRelocSrc), 0666) + err := ioutil.WriteFile(src, []byte(testLargeRelocSrc), 0666) if err != nil { t.Fatalf("failed to write source file: %v", err) } diff --git a/src/cmd/link/linkbig_test.go b/src/cmd/link/linkbig_test.go index 78d2bc1afec..9a4430c162b 100644 --- a/src/cmd/link/linkbig_test.go +++ b/src/cmd/link/linkbig_test.go @@ -10,29 +10,27 @@ package main import ( "bytes" - "cmd/internal/objabi" "fmt" + "internal/buildcfg" "internal/testenv" "io/ioutil" - "os" "os/exec" "testing" ) func TestLargeText(t *testing.T) { - if testing.Short() || (objabi.GOARCH != "ppc64le" && objabi.GOARCH != "ppc64" && objabi.GOARCH != "arm") { - t.Skipf("Skipping large text section test in short mode or on %s", objabi.GOARCH) + if testing.Short() || (buildcfg.GOARCH != "ppc64le" && buildcfg.GOARCH != "ppc64" && buildcfg.GOARCH != "arm") { + t.Skipf("Skipping large text section test in short mode or on %s", buildcfg.GOARCH) } testenv.MustHaveGoBuild(t) var w bytes.Buffer const FN = 4 - tmpdir, err := ioutil.TempDir("", "bigtext") - if err != nil { - t.Fatalf("can't create temp directory: %v\n", err) - } + tmpdir := t.TempDir() - defer os.RemoveAll(tmpdir) + if err := ioutil.WriteFile(tmpdir+"/go.mod", []byte("module big_test\n"), 0666); err != nil { + t.Fatal(err) + } // Generate the scenario where the total amount of text exceeds the // limit for the jmp/call instruction, on RISC architectures like ppc64le, @@ -44,7 +42,7 @@ func TestLargeText(t *testing.T) { "ppc64le": "\tMOVD\tR0,R3\n", "arm": "\tMOVW\tR0,R1\n", } - inst := instOnArch[objabi.GOARCH] + inst := instOnArch[buildcfg.GOARCH] for j := 0; j < FN; j++ { testname := fmt.Sprintf("bigfn%d", j) fmt.Fprintf(&w, "TEXT ·%s(SB),$0\n", testname) @@ -79,32 +77,34 @@ func TestLargeText(t *testing.T) { fmt.Fprintf(&w, "\t}\n") fmt.Fprintf(&w, "\tfmt.Printf(\"PASS\\n\")\n") fmt.Fprintf(&w, "}") - err = ioutil.WriteFile(tmpdir+"/bigfn.go", w.Bytes(), 0666) + err := ioutil.WriteFile(tmpdir+"/bigfn.go", w.Bytes(), 0666) if err != nil { t.Fatalf("can't write output: %v\n", err) } // Build and run with internal linking. - os.Chdir(tmpdir) cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "bigtext") + cmd.Dir = tmpdir out, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Build failed for big text program with internal linking: %v, output: %s", err, out) } - cmd = exec.Command(tmpdir + "/bigtext") + cmd = exec.Command("./bigtext") + cmd.Dir = tmpdir out, err = cmd.CombinedOutput() if err != nil { t.Fatalf("Program built with internal linking failed to run with err %v, output: %s", err, out) } // Build and run with external linking - os.Chdir(tmpdir) cmd = exec.Command(testenv.GoToolPath(t), "build", "-o", "bigtext", "-ldflags", "'-linkmode=external'") + cmd.Dir = tmpdir out, err = cmd.CombinedOutput() if err != nil { t.Fatalf("Build failed for big text program with external linking: %v, output: %s", err, out) } - cmd = exec.Command(tmpdir + "/bigtext") + cmd = exec.Command("./bigtext") + cmd.Dir = tmpdir out, err = cmd.CombinedOutput() if err != nil { t.Fatalf("Program built with external linking failed to run with err %v, output: %s", err, out) diff --git a/src/cmd/link/main.go b/src/cmd/link/main.go index 6b4ca9706d0..d92478e61e2 100644 --- a/src/cmd/link/main.go +++ b/src/cmd/link/main.go @@ -5,7 +5,6 @@ package main import ( - "cmd/internal/objabi" "cmd/internal/sys" "cmd/link/internal/amd64" "cmd/link/internal/arm" @@ -19,6 +18,7 @@ import ( "cmd/link/internal/wasm" "cmd/link/internal/x86" "fmt" + "internal/buildcfg" "os" ) @@ -40,9 +40,10 @@ func main() { var arch *sys.Arch var theArch ld.Arch - switch objabi.GOARCH { + buildcfg.Check() + switch buildcfg.GOARCH { default: - fmt.Fprintf(os.Stderr, "link: unknown architecture %q\n", objabi.GOARCH) + fmt.Fprintf(os.Stderr, "link: unknown architecture %q\n", buildcfg.GOARCH) os.Exit(2) case "386": arch, theArch = x86.Init() diff --git a/src/cmd/link/testdata/testPErsrc/main.go b/src/cmd/link/testdata/pe-binutils/main.go similarity index 65% rename from src/cmd/link/testdata/testPErsrc/main.go rename to src/cmd/link/testdata/pe-binutils/main.go index 5eb66fb9cce..14ea6f9e0ff 100644 --- a/src/cmd/link/testdata/testPErsrc/main.go +++ b/src/cmd/link/testdata/pe-binutils/main.go @@ -4,10 +4,9 @@ // Test that a PE rsrc section is handled correctly (issue 39658). // -// rsrc.syso is created with: -// windres -i a.rc -o rsrc.syso -O coff -// on windows-amd64-2016 builder, where a.rc is a text file with -// the following content: +// rsrc.syso is created using binutils with: +// {x86_64,i686}-w64-mingw32-windres -i a.rc -o rsrc_$GOARCH.syso -O coff +// where a.rc is a text file with the following content: // // resname RCDATA { // "Hello Gophers!\0", diff --git a/src/cmd/link/testdata/pe-binutils/rsrc_386.syso b/src/cmd/link/testdata/pe-binutils/rsrc_386.syso new file mode 100644 index 00000000000..b4abc58abee Binary files /dev/null and b/src/cmd/link/testdata/pe-binutils/rsrc_386.syso differ diff --git a/src/cmd/link/testdata/testPErsrc/rsrc.syso b/src/cmd/link/testdata/pe-binutils/rsrc_amd64.syso similarity index 100% rename from src/cmd/link/testdata/testPErsrc/rsrc.syso rename to src/cmd/link/testdata/pe-binutils/rsrc_amd64.syso diff --git a/src/cmd/link/testdata/testPErsrc-complex/main.go b/src/cmd/link/testdata/pe-llvm/main.go similarity index 92% rename from src/cmd/link/testdata/testPErsrc-complex/main.go rename to src/cmd/link/testdata/pe-llvm/main.go index affd6eada2e..099a71a3fff 100644 --- a/src/cmd/link/testdata/testPErsrc-complex/main.go +++ b/src/cmd/link/testdata/pe-llvm/main.go @@ -6,8 +6,8 @@ // have been created by llvm-rc or msvc's rc.exe, which means there's the // @feat.00 symbol as well as split .rsrc$00 and .rsrc$01 section to deal with. // -// rsrc.syso is created with: -// windres -i a.rc -o rsrc.syso -O coff +// rsrc.syso is created using llvm with: +// {i686,x86_64,armv7,arm64}-w64-mingw32-windres -i a.rc -o rsrc_$GOARCH.syso -O coff // where this windres calls into llvm-rc and llvm-cvtres. The source file, // a.rc, simply contains a reference to its own bytes: // diff --git a/src/cmd/link/testdata/pe-llvm/rsrc_386.syso b/src/cmd/link/testdata/pe-llvm/rsrc_386.syso new file mode 100644 index 00000000000..21126c9954f Binary files /dev/null and b/src/cmd/link/testdata/pe-llvm/rsrc_386.syso differ diff --git a/src/cmd/link/testdata/testPErsrc-complex/rsrc.syso b/src/cmd/link/testdata/pe-llvm/rsrc_amd64.syso similarity index 67% rename from src/cmd/link/testdata/testPErsrc-complex/rsrc.syso rename to src/cmd/link/testdata/pe-llvm/rsrc_amd64.syso index eff630b8a23..56f9260b0a3 100644 Binary files a/src/cmd/link/testdata/testPErsrc-complex/rsrc.syso and b/src/cmd/link/testdata/pe-llvm/rsrc_amd64.syso differ diff --git a/src/cmd/link/testdata/pe-llvm/rsrc_arm.syso b/src/cmd/link/testdata/pe-llvm/rsrc_arm.syso new file mode 100644 index 00000000000..c93a1e9ba0f Binary files /dev/null and b/src/cmd/link/testdata/pe-llvm/rsrc_arm.syso differ diff --git a/src/cmd/link/testdata/pe-llvm/rsrc_arm64.syso b/src/cmd/link/testdata/pe-llvm/rsrc_arm64.syso new file mode 100644 index 00000000000..7849638ddd4 Binary files /dev/null and b/src/cmd/link/testdata/pe-llvm/rsrc_arm64.syso differ diff --git a/src/cmd/nm/nm_cgo_test.go b/src/cmd/nm/nm_cgo_test.go index e0414e6b222..1544be041a8 100644 --- a/src/cmd/nm/nm_cgo_test.go +++ b/src/cmd/nm/nm_cgo_test.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build cgo // +build cgo package main @@ -28,6 +29,11 @@ func canInternalLink() bool { return false } case "openbsd": + switch runtime.GOARCH { + case "arm64", "mips64": + return false + } + case "windows": switch runtime.GOARCH { case "arm64": return false diff --git a/src/cmd/objdump/objdump_test.go b/src/cmd/objdump/objdump_test.go index 1748e13a537..f231a7c6e0e 100644 --- a/src/cmd/objdump/objdump_test.go +++ b/src/cmd/objdump/objdump_test.go @@ -64,13 +64,13 @@ var x86Need = []string{ // for both 386 and AMD64 } var amd64GnuNeed = []string{ - "movq", + "jmp", "callq", "cmpb", } var i386GnuNeed = []string{ - "mov", + "jmp", "call", "cmp", } @@ -345,3 +345,18 @@ func TestGoobjFileNumber(t *testing.T) { t.Logf("output:\n%s", text) } } + +func TestGoObjOtherVersion(t *testing.T) { + testenv.MustHaveExec(t) + t.Parallel() + + obj := filepath.Join("testdata", "go116.o") + cmd := exec.Command(exe, obj) + out, err := cmd.CombinedOutput() + if err == nil { + t.Fatalf("objdump go116.o succeeded unexpectly") + } + if !strings.Contains(string(out), "go object of a different version") { + t.Errorf("unexpected error message:\n%s", out) + } +} diff --git a/src/cmd/objdump/testdata/go116.o b/src/cmd/objdump/testdata/go116.o new file mode 100644 index 00000000000..6434d5c8cff Binary files /dev/null and b/src/cmd/objdump/testdata/go116.o differ diff --git a/src/cmd/pack/pack_test.go b/src/cmd/pack/pack_test.go index 118376f9df6..7842b562dc3 100644 --- a/src/cmd/pack/pack_test.go +++ b/src/cmd/pack/pack_test.go @@ -19,15 +19,6 @@ import ( "time" ) -// tmpDir creates a temporary directory and returns its name. -func tmpDir(t *testing.T) string { - name, err := os.MkdirTemp("", "pack") - if err != nil { - t.Fatal(err) - } - return name -} - // testCreate creates an archive in the specified directory. func testCreate(t *testing.T, dir string) { name := filepath.Join(dir, "pack.a") @@ -57,15 +48,13 @@ func testCreate(t *testing.T, dir string) { // Test that we can create an archive, write to it, and get the same contents back. // Tests the rv and then the pv command on a new archive. func TestCreate(t *testing.T) { - dir := tmpDir(t) - defer os.RemoveAll(dir) + dir := t.TempDir() testCreate(t, dir) } // Test that we can create an archive twice with the same name (Issue 8369). func TestCreateTwice(t *testing.T) { - dir := tmpDir(t) - defer os.RemoveAll(dir) + dir := t.TempDir() testCreate(t, dir) testCreate(t, dir) } @@ -73,8 +62,7 @@ func TestCreateTwice(t *testing.T) { // Test that we can create an archive, put some files in it, and get back a correct listing. // Tests the tv command. func TestTableOfContents(t *testing.T) { - dir := tmpDir(t) - defer os.RemoveAll(dir) + dir := t.TempDir() name := filepath.Join(dir, "pack.a") ar := openArchive(name, os.O_RDWR|os.O_CREATE, nil) @@ -131,8 +119,7 @@ func TestTableOfContents(t *testing.T) { // Test that we can create an archive, put some files in it, and get back a file. // Tests the x command. func TestExtract(t *testing.T) { - dir := tmpDir(t) - defer os.RemoveAll(dir) + dir := t.TempDir() name := filepath.Join(dir, "pack.a") ar := openArchive(name, os.O_RDWR|os.O_CREATE, nil) // Add some entries by hand. @@ -173,8 +160,7 @@ func TestExtract(t *testing.T) { func TestHello(t *testing.T) { testenv.MustHaveGoBuild(t) - dir := tmpDir(t) - defer os.RemoveAll(dir) + dir := t.TempDir() hello := filepath.Join(dir, "hello.go") prog := ` package main @@ -209,8 +195,7 @@ func TestLargeDefs(t *testing.T) { } testenv.MustHaveGoBuild(t) - dir := tmpDir(t) - defer os.RemoveAll(dir) + dir := t.TempDir() large := filepath.Join(dir, "large.go") f, err := os.Create(large) if err != nil { @@ -276,8 +261,7 @@ func TestLargeDefs(t *testing.T) { func TestIssue21703(t *testing.T) { testenv.MustHaveGoBuild(t) - dir := tmpDir(t) - defer os.RemoveAll(dir) + dir := t.TempDir() const aSrc = `package a; const X = "\n!\n"` err := os.WriteFile(filepath.Join(dir, "a.go"), []byte(aSrc), 0666) @@ -307,8 +291,7 @@ func TestIssue21703(t *testing.T) { func TestCreateWithCompilerObj(t *testing.T) { testenv.MustHaveGoBuild(t) - dir := tmpDir(t) - defer os.RemoveAll(dir) + dir := t.TempDir() src := filepath.Join(dir, "p.go") prog := "package p; var X = 42\n" err := os.WriteFile(src, []byte(prog), 0666) @@ -372,8 +355,7 @@ func TestCreateWithCompilerObj(t *testing.T) { func TestRWithNonexistentFile(t *testing.T) { testenv.MustHaveGoBuild(t) - dir := tmpDir(t) - defer os.RemoveAll(dir) + dir := t.TempDir() src := filepath.Join(dir, "p.go") prog := "package p; var X = 42\n" err := os.WriteFile(src, []byte(prog), 0666) diff --git a/src/cmd/pprof/pprof.go b/src/cmd/pprof/pprof.go index 11f91cbedb9..1d10a7b41f3 100644 --- a/src/cmd/pprof/pprof.go +++ b/src/cmd/pprof/pprof.go @@ -232,9 +232,9 @@ func (f *file) Name() string { return f.name } -func (f *file) Base() uint64 { +func (f *file) ObjAddr(addr uint64) (uint64, error) { // No support for shared libraries. - return 0 + return 0, nil } func (f *file) BuildID() string { diff --git a/src/cmd/pprof/readlineui.go b/src/cmd/pprof/readlineui.go index 0c9fafdad75..f46e934e0f3 100644 --- a/src/cmd/pprof/readlineui.go +++ b/src/cmd/pprof/readlineui.go @@ -5,6 +5,7 @@ // This file contains a driver.UI implementation // that provides the readline functionality if possible. +//go:build (darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows) && !appengine && !android // +build darwin dragonfly freebsd linux netbsd openbsd solaris windows // +build !appengine // +build !android @@ -18,7 +19,7 @@ import ( "strings" "github.com/google/pprof/driver" - "golang.org/x/crypto/ssh/terminal" + "golang.org/x/term" ) func init() { @@ -26,11 +27,11 @@ func init() { } // readlineUI implements driver.UI interface using the -// golang.org/x/crypto/ssh/terminal package. +// golang.org/x/term package. // The upstream pprof command implements the same functionality // using the github.com/chzyer/readline package. type readlineUI struct { - term *terminal.Terminal + term *term.Terminal } func newReadlineUI() driver.UI { @@ -38,19 +39,19 @@ func newReadlineUI() driver.UI { if v := strings.ToLower(os.Getenv("TERM")); v == "" || v == "dumb" { return nil } - // test if we can use terminal.ReadLine + // test if we can use term.ReadLine // that assumes operation in the raw mode. - oldState, err := terminal.MakeRaw(0) + oldState, err := term.MakeRaw(0) if err != nil { return nil } - terminal.Restore(0, oldState) + term.Restore(0, oldState) rw := struct { io.Reader io.Writer }{os.Stdin, os.Stderr} - return &readlineUI{term: terminal.NewTerminal(rw, "")} + return &readlineUI{term: term.NewTerminal(rw, "")} } // Read returns a line of text (a command) read from the user. @@ -60,8 +61,8 @@ func (r *readlineUI) ReadLine(prompt string) (string, error) { // skip error checking because we tested it // when creating this readlineUI initially. - oldState, _ := terminal.MakeRaw(0) - defer terminal.Restore(0, oldState) + oldState, _ := term.MakeRaw(0) + defer term.Restore(0, oldState) s, err := r.term.ReadLine() return s, err @@ -105,7 +106,7 @@ func colorize(msg string) string { // interactive terminal (as opposed to being redirected to a file). func (r *readlineUI) IsTerminal() bool { const stdout = 1 - return terminal.IsTerminal(stdout) + return term.IsTerminal(stdout) } // WantBrowser indicates whether browser should be opened with the -http option. diff --git a/src/cmd/trace/annotations_test.go b/src/cmd/trace/annotations_test.go index 9c2d0273668..acd5693c7d5 100644 --- a/src/cmd/trace/annotations_test.go +++ b/src/cmd/trace/annotations_test.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !js // +build !js package main diff --git a/src/cmd/trace/mmu.go b/src/cmd/trace/mmu.go index b92fac652cc..1d1fd2ea947 100644 --- a/src/cmd/trace/mmu.go +++ b/src/cmd/trace/mmu.go @@ -283,7 +283,7 @@ var templMMU = ` .done(function(worst) { details.text('Lowest mutator utilization in ' + niceDuration(windowNS) + ' windows:'); for (var i = 0; i < worst.length; i++) { - details.append($('
    ')); + details.append($('
    ')); var text = worst[i].MutatorUtil.toFixed(3) + ' at time ' + niceDuration(worst[i].Time); details.append($('').text(text).attr('href', worst[i].URL)); } @@ -328,27 +328,27 @@ var templMMU = `
    Loading plot...

    - View
    + View
    - ?Consider whole system utilization. For example, if one of four procs is available to the mutator, mutator utilization will be 0.25. This is the standard definition of an MMU.
    + ?Consider whole system utilization. For example, if one of four procs is available to the mutator, mutator utilization will be 0.25. This is the standard definition of an MMU.
    - ?Consider per-goroutine utilization. When even one goroutine is interrupted by GC, mutator utilization is 0.
    + ?Consider per-goroutine utilization. When even one goroutine is interrupted by GC, mutator utilization is 0.

    - Include
    + Include
    - ?Stop-the-world stops all goroutines simultaneously.
    + ?Stop-the-world stops all goroutines simultaneously.
    - ?Background workers are GC-specific goroutines. 25% of the CPU is dedicated to background workers during GC.
    + ?Background workers are GC-specific goroutines. 25% of the CPU is dedicated to background workers during GC.
    - ?Mark assists are performed by allocation to prevent the mutator from outpacing GC.
    + ?Mark assists are performed by allocation to prevent the mutator from outpacing GC.
    - ?Sweep reclaims unused memory between GCs. (Enabling this may be very slow.).
    + ?Sweep reclaims unused memory between GCs. (Enabling this may be very slow.).

    - Display
    + Display
    - ?Display percentile mutator utilization in addition to minimum. E.g., p99 MU drops the worst 1% of windows.
    + ?Display percentile mutator utilization in addition to minimum. E.g., p99 MU drops the worst 1% of windows.

    diff --git a/src/cmd/trace/trace.go b/src/cmd/trace/trace.go index 30c80f0e047..ca10736c320 100644 --- a/src/cmd/trace/trace.go +++ b/src/cmd/trace/trace.go @@ -627,7 +627,7 @@ func generateTrace(params *traceParams, consumer traceConsumer) error { } case trace.EvHeapAlloc: ctx.heapStats.heapAlloc = ev.Args[0] - case trace.EvNextGC: + case trace.EvHeapGoal: ctx.heapStats.nextGC = ev.Args[0] } if setGStateErr != nil { diff --git a/src/cmd/trace/trace_test.go b/src/cmd/trace/trace_test.go index ea0cc6f8800..2b1a68d7f3d 100644 --- a/src/cmd/trace/trace_test.go +++ b/src/cmd/trace/trace_test.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !js // +build !js package main diff --git a/src/cmd/trace/trace_unix_test.go b/src/cmd/trace/trace_unix_test.go index c569b40bb24..8dc56a8c7be 100644 --- a/src/cmd/trace/trace_unix_test.go +++ b/src/cmd/trace/trace_unix_test.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build darwin dragonfly freebsd linux netbsd openbsd solaris package main diff --git a/src/cmd/vendor/github.com/google/pprof/driver/driver.go b/src/cmd/vendor/github.com/google/pprof/driver/driver.go index e65bc2f417d..fc05f919baf 100644 --- a/src/cmd/vendor/github.com/google/pprof/driver/driver.go +++ b/src/cmd/vendor/github.com/google/pprof/driver/driver.go @@ -159,8 +159,8 @@ type ObjFile interface { // Name returns the underlying file name, if available. Name() string - // Base returns the base address to use when looking up symbols in the file. - Base() uint64 + // ObjAddr returns the objdump address corresponding to a runtime address. + ObjAddr(addr uint64) (uint64, error) // BuildID returns the GNU build ID of the file, or an empty string. BuildID() string diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go index c0661bf4aa9..0c702398d3a 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go @@ -70,7 +70,11 @@ func (a *addr2LinerJob) write(s string) error { } func (a *addr2LinerJob) readLine() (string, error) { - return a.out.ReadString('\n') + s, err := a.out.ReadString('\n') + if err != nil { + return "", err + } + return strings.TrimSpace(s), nil } // close releases any resources used by the addr2liner object. @@ -115,19 +119,11 @@ func newAddr2Liner(cmd, file string, base uint64) (*addr2Liner, error) { return a, nil } -func (d *addr2Liner) readString() (string, error) { - s, err := d.rw.readLine() - if err != nil { - return "", err - } - return strings.TrimSpace(s), nil -} - // readFrame parses the addr2line output for a single address. It // returns a populated plugin.Frame and whether it has reached the end of the // data. func (d *addr2Liner) readFrame() (plugin.Frame, bool) { - funcname, err := d.readString() + funcname, err := d.rw.readLine() if err != nil { return plugin.Frame{}, true } @@ -135,12 +131,12 @@ func (d *addr2Liner) readFrame() (plugin.Frame, bool) { // If addr2line returns a hex address we can assume it is the // sentinel. Read and ignore next two lines of output from // addr2line - d.readString() - d.readString() + d.rw.readLine() + d.rw.readLine() return plugin.Frame{}, true } - fileline, err := d.readString() + fileline, err := d.rw.readLine() if err != nil { return plugin.Frame{}, true } @@ -186,7 +182,7 @@ func (d *addr2Liner) rawAddrInfo(addr uint64) ([]plugin.Frame, error) { return nil, err } - resp, err := d.readString() + resp, err := d.rw.readLine() if err != nil { return nil, err } diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go index 68fa5593ad1..24c48e649b8 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go @@ -43,15 +43,21 @@ type llvmSymbolizerJob struct { cmd *exec.Cmd in io.WriteCloser out *bufio.Reader + // llvm-symbolizer requires the symbol type, CODE or DATA, for symbolization. + symType string } func (a *llvmSymbolizerJob) write(s string) error { - _, err := fmt.Fprint(a.in, s+"\n") + _, err := fmt.Fprintln(a.in, a.symType, s) return err } func (a *llvmSymbolizerJob) readLine() (string, error) { - return a.out.ReadString('\n') + s, err := a.out.ReadString('\n') + if err != nil { + return "", err + } + return strings.TrimSpace(s), nil } // close releases any resources used by the llvmSymbolizer object. @@ -64,13 +70,17 @@ func (a *llvmSymbolizerJob) close() { // information about the given executable file. If file is a shared // library, base should be the address at which it was mapped in the // program under consideration. -func newLLVMSymbolizer(cmd, file string, base uint64) (*llvmSymbolizer, error) { +func newLLVMSymbolizer(cmd, file string, base uint64, isData bool) (*llvmSymbolizer, error) { if cmd == "" { cmd = defaultLLVMSymbolizer } j := &llvmSymbolizerJob{ - cmd: exec.Command(cmd, "-inlining", "-demangle=false"), + cmd: exec.Command(cmd, "-inlining", "-demangle=false"), + symType: "CODE", + } + if isData { + j.symType = "DATA" } var err error @@ -97,19 +107,11 @@ func newLLVMSymbolizer(cmd, file string, base uint64) (*llvmSymbolizer, error) { return a, nil } -func (d *llvmSymbolizer) readString() (string, error) { - s, err := d.rw.readLine() - if err != nil { - return "", err - } - return strings.TrimSpace(s), nil -} - // readFrame parses the llvm-symbolizer output for a single address. It // returns a populated plugin.Frame and whether it has reached the end of the // data. func (d *llvmSymbolizer) readFrame() (plugin.Frame, bool) { - funcname, err := d.readString() + funcname, err := d.rw.readLine() if err != nil { return plugin.Frame{}, true } @@ -121,13 +123,17 @@ func (d *llvmSymbolizer) readFrame() (plugin.Frame, bool) { funcname = "" } - fileline, err := d.readString() + fileline, err := d.rw.readLine() if err != nil { return plugin.Frame{Func: funcname}, true } linenumber := 0 - if fileline == "??:0" { + // The llvm-symbolizer outputs the ::. + // When it cannot identify the source code location, it outputs "??:0:0". + // Older versions output just the filename and line number, so we check for + // both conditions here. + if fileline == "??:0" || fileline == "??:0:0" { fileline = "" } else { switch split := strings.Split(fileline, ":"); len(split) { diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_nm.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_nm.go index 1987bd3daba..8e0ccc728d5 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_nm.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_nm.go @@ -29,27 +29,42 @@ const ( defaultNM = "nm" ) -// addr2LinerNM is a connection to an nm command for obtaining address +// addr2LinerNM is a connection to an nm command for obtaining symbol // information from a binary. type addr2LinerNM struct { - m []symbolInfo // Sorted list of addresses from binary. + m []symbolInfo // Sorted list of symbol addresses from binary. } type symbolInfo struct { address uint64 + size uint64 name string + symType string } -// newAddr2LinerNM starts the given nm command reporting information about the -// given executable file. If file is a shared library, base should be -// the address at which it was mapped in the program under -// consideration. +// isData returns if the symbol has a known data object symbol type. +func (s *symbolInfo) isData() bool { + // The following symbol types are taken from https://linux.die.net/man/1/nm: + // Lowercase letter means local symbol, uppercase denotes a global symbol. + // - b or B: the symbol is in the uninitialized data section, e.g. .bss; + // - d or D: the symbol is in the initialized data section; + // - r or R: the symbol is in a read only data section; + // - v or V: the symbol is a weak object; + // - W: the symbol is a weak symbol that has not been specifically tagged as a + // weak object symbol. Experiments with some binaries, showed these to be + // mostly data objects. + return strings.ContainsAny(s.symType, "bBdDrRvVW") +} + +// newAddr2LinerNM starts the given nm command reporting information about the +// given executable file. If file is a shared library, base should be the +// address at which it was mapped in the program under consideration. func newAddr2LinerNM(cmd, file string, base uint64) (*addr2LinerNM, error) { if cmd == "" { cmd = defaultNM } var b bytes.Buffer - c := exec.Command(cmd, "-n", file) + c := exec.Command(cmd, "--numeric-sort", "--print-size", "--format=posix", file) c.Stdout = &b if err := c.Run(); err != nil { return nil, err @@ -74,17 +89,23 @@ func parseAddr2LinerNM(base uint64, nm io.Reader) (*addr2LinerNM, error) { return nil, err } line = strings.TrimSpace(line) - fields := strings.SplitN(line, " ", 3) - if len(fields) != 3 { + fields := strings.Split(line, " ") + if len(fields) != 4 { continue } - address, err := strconv.ParseUint(fields[0], 16, 64) + address, err := strconv.ParseUint(fields[2], 16, 64) + if err != nil { + continue + } + size, err := strconv.ParseUint(fields[3], 16, 64) if err != nil { continue } a.m = append(a.m, symbolInfo{ address: address + base, - name: fields[2], + size: size, + name: fields[0], + symType: fields[1], }) } @@ -94,7 +115,7 @@ func parseAddr2LinerNM(base uint64, nm io.Reader) (*addr2LinerNM, error) { // addrInfo returns the stack frame information for a specific program // address. It returns nil if the address could not be identified. func (a *addr2LinerNM) addrInfo(addr uint64) ([]plugin.Frame, error) { - if len(a.m) == 0 || addr < a.m[0].address || addr > a.m[len(a.m)-1].address { + if len(a.m) == 0 || addr < a.m[0].address || addr >= (a.m[len(a.m)-1].address+a.m[len(a.m)-1].size) { return nil, nil } @@ -113,12 +134,11 @@ func (a *addr2LinerNM) addrInfo(addr uint64) ([]plugin.Frame, error) { } } - // Address is between a.m[low] and a.m[high]. - // Pick low, as it represents [low, high). - f := []plugin.Frame{ - { - Func: a.m[low].name, - }, + // Address is between a.m[low] and a.m[high]. Pick low, as it represents + // [low, high). For data symbols, we use a strict check that the address is in + // the [start, start + size) range of a.m[low]. + if a.m[low].isData() && addr >= (a.m[low].address+a.m[low].size) { + return nil, nil } - return f, nil + return []plugin.Frame{{Func: a.m[low].name}}, nil } diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go index 4b67cc4ab05..5ed8a1f9f1e 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go @@ -18,6 +18,7 @@ package binutils import ( "debug/elf" "debug/macho" + "debug/pe" "encoding/binary" "errors" "fmt" @@ -41,7 +42,12 @@ type Binutils struct { rep *binrep } -var objdumpLLVMVerRE = regexp.MustCompile(`LLVM version (?:(\d*)\.(\d*)\.(\d*)|.*(trunk).*)`) +var ( + objdumpLLVMVerRE = regexp.MustCompile(`LLVM version (?:(\d*)\.(\d*)\.(\d*)|.*(trunk).*)`) + + // Defined for testing + elfOpen = elf.Open +) // binrep is an immutable representation for Binutils. It is atomically // replaced on every mutation to provide thread-safe access. @@ -255,7 +261,7 @@ func (bu *Binutils) Disasm(file string, start, end uint64, intelSyntax bool) ([] if !b.objdumpFound { return nil, errors.New("cannot disasm: no objdump tool available") } - args := []string{"--disassemble-all", "--demangle", "--no-show-raw-insn", + args := []string{"--disassemble", "--demangle", "--no-show-raw-insn", "--line-numbers", fmt.Sprintf("--start-address=%#x", start), fmt.Sprintf("--stop-address=%#x", end)} @@ -337,6 +343,15 @@ func (bu *Binutils) Open(name string, start, limit, offset uint64) (plugin.ObjFi return f, nil } + peMagic := string(header[:2]) + if peMagic == "MZ" { + f, err := b.openPE(name, start, limit, offset) + if err != nil { + return nil, fmt.Errorf("error reading PE file %s: %v", name, err) + } + return f, nil + } + return nil, fmt.Errorf("unrecognized binary format: %s", name) } @@ -411,14 +426,23 @@ func (b *binrep) openMachO(name string, start, limit, offset uint64) (plugin.Obj } func (b *binrep) openELF(name string, start, limit, offset uint64) (plugin.ObjFile, error) { - ef, err := elf.Open(name) + ef, err := elfOpen(name) if err != nil { return nil, fmt.Errorf("error parsing %s: %v", name, err) } defer ef.Close() - var stextOffset *uint64 - var pageAligned = func(addr uint64) bool { return addr%4096 == 0 } + buildID := "" + if f, err := os.Open(name); err == nil { + if id, err := elfexec.GetBuildID(f); err == nil { + buildID = fmt.Sprintf("%x", id) + } + } + + var ( + stextOffset *uint64 + pageAligned = func(addr uint64) bool { return addr%4096 == 0 } + ) if strings.Contains(name, "vmlinux") || !pageAligned(start) || !pageAligned(limit) || !pageAligned(offset) { // Reading all Symbols is expensive, and we only rarely need it so // we don't want to do it every time. But if _stext happens to be @@ -440,37 +464,171 @@ func (b *binrep) openELF(name string, start, limit, offset uint64) (plugin.ObjFi } } - base, err := elfexec.GetBase(&ef.FileHeader, elfexec.FindTextProgHeader(ef), stextOffset, start, limit, offset) - if err != nil { + // Check that we can compute a base for the binary. This may not be the + // correct base value, so we don't save it. We delay computing the actual base + // value until we have a sample address for this mapping, so that we can + // correctly identify the associated program segment that is needed to compute + // the base. + if _, err := elfexec.GetBase(&ef.FileHeader, elfexec.FindTextProgHeader(ef), stextOffset, start, limit, offset); err != nil { return nil, fmt.Errorf("could not identify base for %s: %v", name, err) } - buildID := "" - if f, err := os.Open(name); err == nil { - if id, err := elfexec.GetBuildID(f); err == nil { - buildID = fmt.Sprintf("%x", id) - } + if b.fast || (!b.addr2lineFound && !b.llvmSymbolizerFound) { + return &fileNM{file: file{ + b: b, + name: name, + buildID: buildID, + m: &elfMapping{start: start, limit: limit, offset: offset, stextOffset: stextOffset}, + }}, nil + } + return &fileAddr2Line{file: file{ + b: b, + name: name, + buildID: buildID, + m: &elfMapping{start: start, limit: limit, offset: offset, stextOffset: stextOffset}, + }}, nil +} + +func (b *binrep) openPE(name string, start, limit, offset uint64) (plugin.ObjFile, error) { + pf, err := pe.Open(name) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", name, err) + } + defer pf.Close() + + var imageBase uint64 + switch h := pf.OptionalHeader.(type) { + case *pe.OptionalHeader32: + imageBase = uint64(h.ImageBase) + case *pe.OptionalHeader64: + imageBase = uint64(h.ImageBase) + default: + return nil, fmt.Errorf("unknown OptionalHeader %T", pf.OptionalHeader) + } + + var base uint64 + if start > 0 { + base = start - imageBase } if b.fast || (!b.addr2lineFound && !b.llvmSymbolizerFound) { - return &fileNM{file: file{b, name, base, buildID}}, nil + return &fileNM{file: file{b: b, name: name, base: base}}, nil } - return &fileAddr2Line{file: file{b, name, base, buildID}}, nil + return &fileAddr2Line{file: file{b: b, name: name, base: base}}, nil +} + +// elfMapping stores the parameters of a runtime mapping that are needed to +// identify the ELF segment associated with a mapping. +type elfMapping struct { + // Runtime mapping parameters. + start, limit, offset uint64 + // Offset of _stext symbol. Only defined for kernel images, nil otherwise. + stextOffset *uint64 } // file implements the binutils.ObjFile interface. type file struct { b *binrep name string - base uint64 buildID string + + baseOnce sync.Once // Ensures the base, baseErr and isData are computed once. + base uint64 + baseErr error // Any eventual error while computing the base. + isData bool + // Mapping information. Relevant only for ELF files, nil otherwise. + m *elfMapping +} + +// computeBase computes the relocation base for the given binary file only if +// the elfMapping field is set. It populates the base and isData fields and +// returns an error. +func (f *file) computeBase(addr uint64) error { + if f == nil || f.m == nil { + return nil + } + if addr < f.m.start || addr >= f.m.limit { + return fmt.Errorf("specified address %x is outside the mapping range [%x, %x] for file %q", addr, f.m.start, f.m.limit, f.name) + } + ef, err := elfOpen(f.name) + if err != nil { + return fmt.Errorf("error parsing %s: %v", f.name, err) + } + defer ef.Close() + + var ph *elf.ProgHeader + // For user space executables, find the actual program segment that is + // associated with the given mapping. Skip this search if limit <= start. + // We cannot use just a check on the start address of the mapping to tell if + // it's a kernel / .ko module mapping, because with quipper address remapping + // enabled, the address would be in the lower half of the address space. + if f.m.stextOffset == nil && f.m.start < f.m.limit && f.m.limit < (uint64(1)<<63) { + // Get all program headers associated with the mapping. + headers, hasLoadables := elfexec.ProgramHeadersForMapping(ef, f.m.offset, f.m.limit-f.m.start) + + // Some ELF files don't contain any loadable program segments, e.g. .ko + // kernel modules. It's not an error to have no header in such cases. + if hasLoadables { + ph, err = matchUniqueHeader(headers, addr-f.m.start+f.m.offset) + if err != nil { + return fmt.Errorf("failed to find program header for file %q, ELF mapping %#v, address %x: %v", f.name, *f.m, addr, err) + } + } + } else { + // For the kernel, find the program segment that includes the .text section. + ph = elfexec.FindTextProgHeader(ef) + } + + base, err := elfexec.GetBase(&ef.FileHeader, ph, f.m.stextOffset, f.m.start, f.m.limit, f.m.offset) + if err != nil { + return err + } + f.base = base + f.isData = ph != nil && ph.Flags&elf.PF_X == 0 + return nil +} + +// matchUniqueHeader attempts to identify a unique header from the given list, +// using the given file offset to disambiguate between multiple segments. It +// returns an error if the header list is empty or if it cannot identify a +// unique header. +func matchUniqueHeader(headers []*elf.ProgHeader, fileOffset uint64) (*elf.ProgHeader, error) { + if len(headers) == 0 { + return nil, errors.New("no program header matches mapping info") + } + if len(headers) == 1 { + // Don't use the file offset if we already have a single header. + return headers[0], nil + } + // We have multiple input segments. Attempt to identify a unique one + // based on the given file offset. + var ph *elf.ProgHeader + for _, h := range headers { + if fileOffset >= h.Off && fileOffset < h.Off+h.Memsz { + if ph != nil { + // Assuming no other bugs, this can only happen if we have two or + // more small program segments that fit on the same page, and a + // segment other than the last one includes uninitialized data. + return nil, fmt.Errorf("found second program header (%#v) that matches file offset %x, first program header is %#v. Does first program segment contain uninitialized data?", *h, fileOffset, *ph) + } + ph = h + } + } + if ph == nil { + return nil, fmt.Errorf("no program header matches file offset %x", fileOffset) + } + return ph, nil } func (f *file) Name() string { return f.name } -func (f *file) Base() uint64 { - return f.base +func (f *file) ObjAddr(addr uint64) (uint64, error) { + f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) }) + if f.baseErr != nil { + return 0, f.baseErr + } + return addr - f.base, nil } func (f *file) BuildID() string { @@ -478,7 +636,11 @@ func (f *file) BuildID() string { } func (f *file) SourceLine(addr uint64) ([]plugin.Frame, error) { - return []plugin.Frame{}, nil + f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) }) + if f.baseErr != nil { + return nil, f.baseErr + } + return nil, nil } func (f *file) Close() error { @@ -505,6 +667,10 @@ type fileNM struct { } func (f *fileNM) SourceLine(addr uint64) ([]plugin.Frame, error) { + f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) }) + if f.baseErr != nil { + return nil, f.baseErr + } if f.addr2linernm == nil { addr2liner, err := newAddr2LinerNM(f.b.nm, f.name, f.base) if err != nil { @@ -524,9 +690,14 @@ type fileAddr2Line struct { file addr2liner *addr2Liner llvmSymbolizer *llvmSymbolizer + isData bool } func (f *fileAddr2Line) SourceLine(addr uint64) ([]plugin.Frame, error) { + f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) }) + if f.baseErr != nil { + return nil, f.baseErr + } f.once.Do(f.init) if f.llvmSymbolizer != nil { return f.llvmSymbolizer.addrInfo(addr) @@ -538,7 +709,7 @@ func (f *fileAddr2Line) SourceLine(addr uint64) ([]plugin.Frame, error) { } func (f *fileAddr2Line) init() { - if llvmSymbolizer, err := newLLVMSymbolizer(f.b.llvmSymbolizer, f.name, f.base); err == nil { + if llvmSymbolizer, err := newLLVMSymbolizer(f.b.llvmSymbolizer, f.name, f.base, f.isData); err == nil { f.llvmSymbolizer = llvmSymbolizer return } diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go index d0be614bdc4..e64adf58cd6 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go @@ -19,6 +19,7 @@ import ( "io" "regexp" "strconv" + "strings" "github.com/google/pprof/internal/plugin" "github.com/ianlancetaylor/demangle" @@ -121,6 +122,7 @@ func disassemble(asm []byte) ([]plugin.Inst, error) { break } } + input = strings.TrimSpace(input) if fields := objdumpAsmOutputRE.FindStringSubmatch(input); len(fields) == 3 { if address, err := strconv.ParseUint(fields[1], 16, 64); err == nil { @@ -167,6 +169,7 @@ func nextSymbol(buf *bytes.Buffer) (uint64, string, error) { return 0, "", err } } + line = strings.TrimSpace(line) if fields := nmOutputRE.FindStringSubmatch(line); len(fields) == 4 { if address, err := strconv.ParseUint(fields[1], 16, 64); err == nil { diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go index 878f2e1ead1..3967a12d45a 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go @@ -163,7 +163,7 @@ func applyCommandOverrides(cmd string, outputFormat int, cfg config) config { trim := cfg.Trim switch cmd { - case "disasm", "weblist": + case "disasm": trim = false cfg.Granularity = "addresses" // Force the 'noinlines' mode so that source locations for a given address @@ -172,6 +172,10 @@ func applyCommandOverrides(cmd string, outputFormat int, cfg config) config { // This is because the merge is done by address and in case of an inlined // stack each of the inlined entries is a separate callgraph node. cfg.NoInlines = true + case "weblist": + trim = false + cfg.Granularity = "addresses" + cfg.NoInlines = false // Need inline info to support call expansion case "peek": trim = false case "list": diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go index 4f7610c7e54..b8e8b50b94d 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go @@ -62,6 +62,7 @@ a { .header .title h1 { font-size: 1.75em; margin-right: 1rem; + margin-bottom: 4px; } .header .title a { color: #212121; diff --git a/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go b/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go index d520765cc91..2638b2db2d9 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go @@ -283,3 +283,72 @@ func FindTextProgHeader(f *elf.File) *elf.ProgHeader { } return nil } + +// ProgramHeadersForMapping returns the loadable program segment headers that +// are fully contained in the runtime mapping with file offset pgoff and memory +// size memsz, and if the binary includes any loadable segments. +func ProgramHeadersForMapping(f *elf.File, pgoff, memsz uint64) ([]*elf.ProgHeader, bool) { + const ( + // pageSize defines the virtual memory page size used by the loader. This + // value is dependent on the memory management unit of the CPU. The page + // size is 4KB virtually on all the architectures that we care about, so we + // define this metric as a constant. If we encounter architectures where + // page sie is not 4KB, we must try to guess the page size on the system + // where the profile was collected, possibly using the architecture + // specified in the ELF file header. + pageSize = 4096 + pageOffsetMask = pageSize - 1 + pageMask = ^uint64(pageOffsetMask) + ) + var headers []*elf.ProgHeader + hasLoadables := false + for _, p := range f.Progs { + // The segment must be fully included in the mapping. + if p.Type == elf.PT_LOAD && pgoff <= p.Off && p.Off+p.Memsz <= pgoff+memsz { + alignedOffset := uint64(0) + if p.Off > (p.Vaddr & pageOffsetMask) { + alignedOffset = p.Off - (p.Vaddr & pageOffsetMask) + } + if alignedOffset <= pgoff { + headers = append(headers, &p.ProgHeader) + } + } + if p.Type == elf.PT_LOAD { + hasLoadables = true + } + } + if len(headers) < 2 { + return headers, hasLoadables + } + + // If we have more than one matching segments, try a strict check on the + // segment memory size. We use a heuristic to compute the minimum mapping size + // required for a segment, assuming mappings are page aligned. + // The memory size based heuristic makes sense only if the mapping size is a + // multiple of page size. + if memsz%pageSize != 0 { + return headers, hasLoadables + } + + // Return all found headers if we cannot narrow the selection to a single + // program segment. + var ph *elf.ProgHeader + for _, h := range headers { + wantSize := (h.Vaddr+h.Memsz+pageSize-1)&pageMask - (h.Vaddr & pageMask) + if wantSize != memsz { + continue + } + if ph != nil { + // Found a second program header matching, so return all previously + // identified headers. + return headers, hasLoadables + } + ph = h + } + if ph == nil { + // No matching header for the strict check. Return all previously identified + // headers. + return headers, hasLoadables + } + return []*elf.ProgHeader{ph}, hasLoadables +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go b/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go index 8cb87da9af9..80086752484 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go @@ -322,8 +322,8 @@ func (b *builder) addEdge(edge *Edge, from, to int, hasNodelets bool) { } // dotColor returns a color for the given score (between -1.0 and -// 1.0), with -1.0 colored red, 0.0 colored grey, and 1.0 colored -// green. If isBackground is true, then a light (low-saturation) +// 1.0), with -1.0 colored green, 0.0 colored grey, and 1.0 colored +// red. If isBackground is true, then a light (low-saturation) // color is returned (suitable for use as a background color); // otherwise, a darker color is returned (suitable for use as a // foreground color). diff --git a/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go b/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go index e95b261bc25..53325740a3e 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go @@ -111,8 +111,9 @@ func compatibleValueTypes(v1, v2 *profile.ValueType) bool { } return v1.Unit == v2.Unit || - (isTimeUnit(v1.Unit) && isTimeUnit(v2.Unit)) || - (isMemoryUnit(v1.Unit) && isMemoryUnit(v2.Unit)) + (timeUnits.sniffUnit(v1.Unit) != nil && timeUnits.sniffUnit(v2.Unit) != nil) || + (memoryUnits.sniffUnit(v1.Unit) != nil && memoryUnits.sniffUnit(v2.Unit) != nil) || + (gcuUnits.sniffUnit(v1.Unit) != nil && gcuUnits.sniffUnit(v2.Unit) != nil) } // Scale a measurement from an unit to a different unit and returns @@ -124,12 +125,15 @@ func Scale(value int64, fromUnit, toUnit string) (float64, string) { v, u := Scale(-value, fromUnit, toUnit) return -v, u } - if m, u, ok := memoryLabel(value, fromUnit, toUnit); ok { + if m, u, ok := memoryUnits.convertUnit(value, fromUnit, toUnit); ok { return m, u } - if t, u, ok := timeLabel(value, fromUnit, toUnit); ok { + if t, u, ok := timeUnits.convertUnit(value, fromUnit, toUnit); ok { return t, u } + if g, u, ok := gcuUnits.convertUnit(value, fromUnit, toUnit); ok { + return g, u + } // Skip non-interesting units. switch toUnit { case "count", "sample", "unit", "minimum", "auto": @@ -172,157 +176,121 @@ func Percentage(value, total int64) string { } } -// isMemoryUnit returns whether a name is recognized as a memory size -// unit. -func isMemoryUnit(unit string) bool { - switch strings.TrimSuffix(strings.ToLower(unit), "s") { - case "byte", "b", "kilobyte", "kb", "megabyte", "mb", "gigabyte", "gb": - return true - } - return false +// unit includes a list of aliases representing a specific unit and a factor +// which one can multiple a value in the specified unit by to get the value +// in terms of the base unit. +type unit struct { + canonicalName string + aliases []string + factor float64 } -func memoryLabel(value int64, fromUnit, toUnit string) (v float64, u string, ok bool) { - fromUnit = strings.TrimSuffix(strings.ToLower(fromUnit), "s") - toUnit = strings.TrimSuffix(strings.ToLower(toUnit), "s") +// unitType includes a list of units that are within the same category (i.e. +// memory or time units) and a default unit to use for this type of unit. +type unitType struct { + defaultUnit unit + units []unit +} - switch fromUnit { - case "byte", "b": - case "kb", "kbyte", "kilobyte": - value *= 1024 - case "mb", "mbyte", "megabyte": - value *= 1024 * 1024 - case "gb", "gbyte", "gigabyte": - value *= 1024 * 1024 * 1024 - case "tb", "tbyte", "terabyte": - value *= 1024 * 1024 * 1024 * 1024 - case "pb", "pbyte", "petabyte": - value *= 1024 * 1024 * 1024 * 1024 * 1024 - default: - return 0, "", false - } - - if toUnit == "minimum" || toUnit == "auto" { - switch { - case value < 1024: - toUnit = "b" - case value < 1024*1024: - toUnit = "kb" - case value < 1024*1024*1024: - toUnit = "mb" - case value < 1024*1024*1024*1024: - toUnit = "gb" - case value < 1024*1024*1024*1024*1024: - toUnit = "tb" - default: - toUnit = "pb" +// findByAlias returns the unit associated with the specified alias. It returns +// nil if the unit with such alias is not found. +func (ut unitType) findByAlias(alias string) *unit { + for _, u := range ut.units { + for _, a := range u.aliases { + if alias == a { + return &u + } } } - - var output float64 - switch toUnit { - default: - output, toUnit = float64(value), "B" - case "kb", "kbyte", "kilobyte": - output, toUnit = float64(value)/1024, "kB" - case "mb", "mbyte", "megabyte": - output, toUnit = float64(value)/(1024*1024), "MB" - case "gb", "gbyte", "gigabyte": - output, toUnit = float64(value)/(1024*1024*1024), "GB" - case "tb", "tbyte", "terabyte": - output, toUnit = float64(value)/(1024*1024*1024*1024), "TB" - case "pb", "pbyte", "petabyte": - output, toUnit = float64(value)/(1024*1024*1024*1024*1024), "PB" - } - return output, toUnit, true + return nil } -// isTimeUnit returns whether a name is recognized as a time unit. -func isTimeUnit(unit string) bool { +// sniffUnit simpifies the input alias and returns the unit associated with the +// specified alias. It returns nil if the unit with such alias is not found. +func (ut unitType) sniffUnit(unit string) *unit { unit = strings.ToLower(unit) if len(unit) > 2 { unit = strings.TrimSuffix(unit, "s") } - - switch unit { - case "nanosecond", "ns", "microsecond", "millisecond", "ms", "s", "second", "sec", "hr", "day", "week", "year": - return true - } - return false + return ut.findByAlias(unit) } -func timeLabel(value int64, fromUnit, toUnit string) (v float64, u string, ok bool) { - fromUnit = strings.ToLower(fromUnit) - if len(fromUnit) > 2 { - fromUnit = strings.TrimSuffix(fromUnit, "s") - } - - toUnit = strings.ToLower(toUnit) - if len(toUnit) > 2 { - toUnit = strings.TrimSuffix(toUnit, "s") - } - - var d time.Duration - switch fromUnit { - case "nanosecond", "ns": - d = time.Duration(value) * time.Nanosecond - case "microsecond": - d = time.Duration(value) * time.Microsecond - case "millisecond", "ms": - d = time.Duration(value) * time.Millisecond - case "second", "sec", "s": - d = time.Duration(value) * time.Second - case "cycle": - return float64(value), "", true - default: - return 0, "", false - } - - if toUnit == "minimum" || toUnit == "auto" { - switch { - case d < 1*time.Microsecond: - toUnit = "ns" - case d < 1*time.Millisecond: - toUnit = "us" - case d < 1*time.Second: - toUnit = "ms" - case d < 1*time.Minute: - toUnit = "sec" - case d < 1*time.Hour: - toUnit = "min" - case d < 24*time.Hour: - toUnit = "hour" - case d < 15*24*time.Hour: - toUnit = "day" - case d < 120*24*time.Hour: - toUnit = "week" - default: - toUnit = "year" +// autoScale takes in the value with units of the base unit and returns +// that value scaled to a reasonable unit if a reasonable unit is +// found. +func (ut unitType) autoScale(value float64) (float64, string, bool) { + var f float64 + var unit string + for _, u := range ut.units { + if u.factor >= f && (value/u.factor) >= 1.0 { + f = u.factor + unit = u.canonicalName } } - - var output float64 - dd := float64(d) - switch toUnit { - case "ns", "nanosecond": - output, toUnit = dd/float64(time.Nanosecond), "ns" - case "us", "microsecond": - output, toUnit = dd/float64(time.Microsecond), "us" - case "ms", "millisecond": - output, toUnit = dd/float64(time.Millisecond), "ms" - case "min", "minute": - output, toUnit = dd/float64(time.Minute), "mins" - case "hour", "hr": - output, toUnit = dd/float64(time.Hour), "hrs" - case "day": - output, toUnit = dd/float64(24*time.Hour), "days" - case "week", "wk": - output, toUnit = dd/float64(7*24*time.Hour), "wks" - case "year", "yr": - output, toUnit = dd/float64(365*24*time.Hour), "yrs" - default: - // "sec", "second", "s" handled by default case. - output, toUnit = dd/float64(time.Second), "s" + if f == 0 { + return 0, "", false } - return output, toUnit, true + return value / f, unit, true +} + +// convertUnit converts a value from the fromUnit to the toUnit, autoscaling +// the value if the toUnit is "minimum" or "auto". If the fromUnit is not +// included in the unitType, then a false boolean will be returned. If the +// toUnit is not in the unitType, the value will be returned in terms of the +// default unitType. +func (ut unitType) convertUnit(value int64, fromUnitStr, toUnitStr string) (float64, string, bool) { + fromUnit := ut.sniffUnit(fromUnitStr) + if fromUnit == nil { + return 0, "", false + } + v := float64(value) * fromUnit.factor + if toUnitStr == "minimum" || toUnitStr == "auto" { + if v, u, ok := ut.autoScale(v); ok { + return v, u, true + } + return v / ut.defaultUnit.factor, ut.defaultUnit.canonicalName, true + } + toUnit := ut.sniffUnit(toUnitStr) + if toUnit == nil { + return v / ut.defaultUnit.factor, ut.defaultUnit.canonicalName, true + } + return v / toUnit.factor, toUnit.canonicalName, true +} + +var memoryUnits = unitType{ + units: []unit{ + {"B", []string{"b", "byte"}, 1}, + {"kB", []string{"kb", "kbyte", "kilobyte"}, float64(1 << 10)}, + {"MB", []string{"mb", "mbyte", "megabyte"}, float64(1 << 20)}, + {"GB", []string{"gb", "gbyte", "gigabyte"}, float64(1 << 30)}, + {"TB", []string{"tb", "tbyte", "terabyte"}, float64(1 << 40)}, + {"PB", []string{"pb", "pbyte", "petabyte"}, float64(1 << 50)}, + }, + defaultUnit: unit{"B", []string{"b", "byte"}, 1}, +} + +var timeUnits = unitType{ + units: []unit{ + {"ns", []string{"ns", "nanosecond"}, float64(time.Nanosecond)}, + {"us", []string{"μs", "us", "microsecond"}, float64(time.Microsecond)}, + {"ms", []string{"ms", "millisecond"}, float64(time.Millisecond)}, + {"s", []string{"s", "sec", "second"}, float64(time.Second)}, + {"hrs", []string{"hour", "hr"}, float64(time.Hour)}, + }, + defaultUnit: unit{"s", []string{}, float64(time.Second)}, +} + +var gcuUnits = unitType{ + units: []unit{ + {"n*GCU", []string{"nanogcu"}, 1e-9}, + {"u*GCU", []string{"microgcu"}, 1e-6}, + {"m*GCU", []string{"milligcu"}, 1e-3}, + {"GCU", []string{"gcu"}, 1}, + {"k*GCU", []string{"kilogcu"}, 1e3}, + {"M*GCU", []string{"megagcu"}, 1e6}, + {"G*GCU", []string{"gigagcu"}, 1e9}, + {"T*GCU", []string{"teragcu"}, 1e12}, + {"P*GCU", []string{"petagcu"}, 1e15}, + }, + defaultUnit: unit{"GCU", []string{}, 1.0}, } diff --git a/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go b/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go index 3a8d0af7305..a57a0b20a96 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go @@ -131,8 +131,9 @@ type ObjFile interface { // Name returns the underlyinf file name, if available Name() string - // Base returns the base address to use when looking up symbols in the file. - Base() uint64 + // ObjAddr returns the objdump (linker) address corresponding to a runtime + // address, and an error. + ObjAddr(addr uint64) (uint64, error) // BuildID returns the GNU build ID of the file, or an empty string. BuildID() string diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/report.go b/src/cmd/vendor/github.com/google/pprof/internal/report/report.go index bc5685d61e1..4a865548801 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/report/report.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/report/report.go @@ -445,7 +445,7 @@ func PrintAssembly(w io.Writer, rpt *Report, obj plugin.ObjTool, maxFuncs int) e return err } - ns := annotateAssembly(insts, sns, s.base) + ns := annotateAssembly(insts, sns, s.file) fmt.Fprintf(w, "ROUTINE ======================== %s\n", s.sym.Name[0]) for _, name := range s.sym.Name[1:] { @@ -534,7 +534,6 @@ func symbolsFromBinaries(prof *profile.Profile, g *graph.Graph, rx *regexp.Regex addr = *address } msyms, err := f.Symbols(rx, addr) - base := f.Base() f.Close() if err != nil { continue @@ -543,7 +542,6 @@ func symbolsFromBinaries(prof *profile.Profile, g *graph.Graph, rx *regexp.Regex objSyms = append(objSyms, &objSymbol{ sym: ms, - base: base, file: f, }, ) @@ -558,7 +556,6 @@ func symbolsFromBinaries(prof *profile.Profile, g *graph.Graph, rx *regexp.Regex // added to correspond to sample addresses type objSymbol struct { sym *plugin.Sym - base uint64 file plugin.ObjFile } @@ -578,8 +575,7 @@ func nodesPerSymbol(ns graph.Nodes, symbols []*objSymbol) map[*objSymbol]graph.N for _, s := range symbols { // Gather samples for this symbol. for _, n := range ns { - address := n.Info.Address - s.base - if address >= s.sym.Start && address < s.sym.End { + if address, err := s.file.ObjAddr(n.Info.Address); err == nil && address >= s.sym.Start && address < s.sym.End { symNodes[s] = append(symNodes[s], n) } } @@ -621,7 +617,7 @@ func (a *assemblyInstruction) cumValue() int64 { // annotateAssembly annotates a set of assembly instructions with a // set of samples. It returns a set of nodes to display. base is an // offset to adjust the sample addresses. -func annotateAssembly(insts []plugin.Inst, samples graph.Nodes, base uint64) []assemblyInstruction { +func annotateAssembly(insts []plugin.Inst, samples graph.Nodes, file plugin.ObjFile) []assemblyInstruction { // Add end marker to simplify printing loop. insts = append(insts, plugin.Inst{ Addr: ^uint64(0), @@ -645,7 +641,10 @@ func annotateAssembly(insts []plugin.Inst, samples graph.Nodes, base uint64) []a // Sum all the samples until the next instruction (to account // for samples attributed to the middle of an instruction). - for next := insts[ix+1].Addr; s < len(samples) && samples[s].Info.Address-base < next; s++ { + for next := insts[ix+1].Addr; s < len(samples); s++ { + if addr, err := file.ObjAddr(samples[s].Info.Address); err != nil || addr >= next { + break + } sample := samples[s] n.flatDiv += sample.FlatDiv n.flat += sample.Flat diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/source.go b/src/cmd/vendor/github.com/google/pprof/internal/report/source.go index b4805354390..54245e5f9ea 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/report/source.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/report/source.go @@ -24,12 +24,15 @@ import ( "io" "os" "path/filepath" + "regexp" + "sort" "strconv" "strings" "github.com/google/pprof/internal/graph" "github.com/google/pprof/internal/measurement" "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/profile" ) // printSource prints an annotated source listing, include all @@ -126,19 +129,75 @@ func printWebSource(w io.Writer, rpt *Report, obj plugin.ObjTool) error { return nil } +// sourcePrinter holds state needed for generating source+asm HTML listing. +type sourcePrinter struct { + reader *sourceReader + synth *synthCode + objectTool plugin.ObjTool + objects map[string]plugin.ObjFile // Opened object files + sym *regexp.Regexp // May be nil + files map[string]*sourceFile // Set of files to print. + insts map[uint64]instructionInfo // Instructions of interest (keyed by address). + + // Set of function names that we are interested in (because they had + // a sample and match sym). + interest map[string]bool + + // Mapping from system function names to printable names. + prettyNames map[string]string +} + +// addrInfo holds information for an address we are interested in. +type addrInfo struct { + loc *profile.Location // Always non-nil + obj plugin.ObjFile // May be nil +} + +// instructionInfo holds collected information for an instruction. +type instructionInfo struct { + objAddr uint64 // Address in object file (with base subtracted out) + length int // Instruction length in bytes + disasm string // Disassembly of instruction + file string // For top-level function in which instruction occurs + line int // For top-level function in which instruction occurs + flat, cum int64 // Samples to report (divisor already applied) +} + +// sourceFile contains collected information for files we will print. +type sourceFile struct { + fname string + cum int64 + flat int64 + lines map[int][]sourceInst // Instructions to show per line + funcName map[int]string // Function name per line +} + +// sourceInst holds information for an instruction to be displayed. +type sourceInst struct { + addr uint64 + stack []callID // Inlined call-stack +} + +// sourceFunction contains information for a contiguous range of lines per function we +// will print. +type sourceFunction struct { + name string + begin, end int // Line numbers (end is not included in the range) + flat, cum int64 +} + +// addressRange is a range of addresses plus the object file that contains it. +type addressRange struct { + begin, end uint64 + obj plugin.ObjFile + mapping *profile.Mapping + score int64 // Used to order ranges for processing +} + // PrintWebList prints annotated source listing of rpt to w. +// rpt.prof should contain inlined call info. func PrintWebList(w io.Writer, rpt *Report, obj plugin.ObjTool, maxFiles int) error { - o := rpt.options - g := rpt.newGraph(nil) - - // If the regexp source can be parsed as an address, also match - // functions that land on that address. - var address *uint64 - if hex, err := strconv.ParseUint(o.Symbol.String(), 0, 64); err == nil { - address = &hex - } - - sourcePath := o.SourcePath + sourcePath := rpt.options.SourcePath if sourcePath == "" { wd, err := os.Getwd() if err != nil { @@ -146,171 +205,544 @@ func PrintWebList(w io.Writer, rpt *Report, obj plugin.ObjTool, maxFiles int) er } sourcePath = wd } - reader := newSourceReader(sourcePath, o.TrimPath) + sp := newSourcePrinter(rpt, obj, sourcePath) + sp.print(w, maxFiles, rpt) + sp.close() + return nil +} - type fileFunction struct { - fileName, functionName string +func newSourcePrinter(rpt *Report, obj plugin.ObjTool, sourcePath string) *sourcePrinter { + sp := &sourcePrinter{ + reader: newSourceReader(sourcePath, rpt.options.TrimPath), + synth: newSynthCode(rpt.prof.Mapping), + objectTool: obj, + objects: map[string]plugin.ObjFile{}, + sym: rpt.options.Symbol, + files: map[string]*sourceFile{}, + insts: map[uint64]instructionInfo{}, + prettyNames: map[string]string{}, + interest: map[string]bool{}, } - // Extract interesting symbols from binary files in the profile and - // classify samples per symbol. - symbols := symbolsFromBinaries(rpt.prof, g, o.Symbol, address, obj) - symNodes := nodesPerSymbol(g.Nodes, symbols) + // If the regexp source can be parsed as an address, also match + // functions that land on that address. + var address *uint64 + if sp.sym != nil { + if hex, err := strconv.ParseUint(sp.sym.String(), 0, 64); err == nil { + address = &hex + } + } - // Identify sources associated to a symbol by examining - // symbol samples. Classify samples per source file. - fileNodes := make(map[fileFunction]graph.Nodes) - if len(symNodes) == 0 { - for _, n := range g.Nodes { - if n.Info.File == "" || !o.Symbol.MatchString(n.Info.Name) { + addrs := map[uint64]addrInfo{} + flat := map[uint64]int64{} + cum := map[uint64]int64{} + + // Record an interest in the function corresponding to lines[index]. + markInterest := func(addr uint64, loc *profile.Location, index int) { + fn := loc.Line[index] + if fn.Function == nil { + return + } + sp.interest[fn.Function.Name] = true + sp.interest[fn.Function.SystemName] = true + if _, ok := addrs[addr]; !ok { + addrs[addr] = addrInfo{loc, sp.objectFile(loc.Mapping)} + } + } + + // See if sp.sym matches line. + matches := func(line profile.Line) bool { + if line.Function == nil { + return false + } + return sp.sym.MatchString(line.Function.Name) || + sp.sym.MatchString(line.Function.SystemName) || + sp.sym.MatchString(line.Function.Filename) + } + + // Extract sample counts and compute set of interesting functions. + for _, sample := range rpt.prof.Sample { + value := rpt.options.SampleValue(sample.Value) + if rpt.options.SampleMeanDivisor != nil { + div := rpt.options.SampleMeanDivisor(sample.Value) + if div != 0 { + value /= div + } + } + + // Find call-sites matching sym. + for i := len(sample.Location) - 1; i >= 0; i-- { + loc := sample.Location[i] + for _, line := range loc.Line { + if line.Function == nil { + continue + } + sp.prettyNames[line.Function.SystemName] = line.Function.Name + } + + addr := loc.Address + if addr == 0 { + // Some profiles are missing valid addresses. + addr = sp.synth.address(loc) + } + + cum[addr] += value + if i == 0 { + flat[addr] += value + } + + if sp.sym == nil || (address != nil && addr == *address) { + // Interested in top-level entry of stack. + if len(loc.Line) > 0 { + markInterest(addr, loc, len(loc.Line)-1) + } continue } - ff := fileFunction{n.Info.File, n.Info.Name} - fileNodes[ff] = append(fileNodes[ff], n) - } - } else { - for _, nodes := range symNodes { - for _, n := range nodes { - if n.Info.File != "" { - ff := fileFunction{n.Info.File, n.Info.Name} - fileNodes[ff] = append(fileNodes[ff], n) + + // Seach in inlined stack for a match. + matchFile := (loc.Mapping != nil && sp.sym.MatchString(loc.Mapping.File)) + for j, line := range loc.Line { + if (j == 0 && matchFile) || matches(line) { + markInterest(addr, loc, j) } } } } - if len(fileNodes) == 0 { - return fmt.Errorf("no source information for %s", o.Symbol.String()) - } + sp.expandAddresses(rpt, addrs, flat) + sp.initSamples(flat, cum) + return sp +} - sourceFiles := make(graph.Nodes, 0, len(fileNodes)) - for _, nodes := range fileNodes { - sNode := *nodes[0] - sNode.Flat, sNode.Cum = nodes.Sum() - sourceFiles = append(sourceFiles, &sNode) - } - - // Limit number of files printed? - if maxFiles < 0 { - sourceFiles.Sort(graph.FileOrder) - } else { - sourceFiles.Sort(graph.FlatNameOrder) - if maxFiles < len(sourceFiles) { - sourceFiles = sourceFiles[:maxFiles] +func (sp *sourcePrinter) close() { + for _, objFile := range sp.objects { + if objFile != nil { + objFile.Close() } } +} - // Print each file associated with this function. - for _, n := range sourceFiles { - ff := fileFunction{n.Info.File, n.Info.Name} - fns := fileNodes[ff] +func (sp *sourcePrinter) expandAddresses(rpt *Report, addrs map[uint64]addrInfo, flat map[uint64]int64) { + // We found interesting addresses (ones with non-zero samples) above. + // Get covering address ranges and disassemble the ranges. + ranges, unprocessed := sp.splitIntoRanges(rpt.prof, addrs, flat) + sp.handleUnprocessed(addrs, unprocessed) - asm := assemblyPerSourceLine(symbols, fns, ff.fileName, obj, o.IntelSyntax) - start, end := sourceCoordinates(asm) + // Trim ranges if there are too many. + const maxRanges = 25 + sort.Slice(ranges, func(i, j int) bool { + return ranges[i].score > ranges[j].score + }) + if len(ranges) > maxRanges { + ranges = ranges[:maxRanges] + } - fnodes, path, err := getSourceFromFile(ff.fileName, reader, fns, start, end) + for _, r := range ranges { + objBegin, err := r.obj.ObjAddr(r.begin) if err != nil { - fnodes, path = getMissingFunctionSource(ff.fileName, asm, start, end) + fmt.Fprintf(os.Stderr, "Failed to compute objdump address for range start %x: %v\n", r.begin, err) + continue + } + objEnd, err := r.obj.ObjAddr(r.end) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to compute objdump address for range end %x: %v\n", r.end, err) + continue + } + base := r.begin - objBegin + insts, err := sp.objectTool.Disasm(r.mapping.File, objBegin, objEnd, rpt.options.IntelSyntax) + if err != nil { + // TODO(sanjay): Report that the covered addresses are missing. + continue } - printFunctionHeader(w, ff.functionName, path, n.Flat, n.Cum, rpt) - for _, fn := range fnodes { - printFunctionSourceLine(w, fn, asm[fn.Info.Lineno], reader, rpt) + var lastFrames []plugin.Frame + var lastAddr, maxAddr uint64 + for i, inst := range insts { + addr := inst.Addr + base + + // Guard against duplicate output from Disasm. + if addr <= maxAddr { + continue + } + maxAddr = addr + + length := 1 + if i+1 < len(insts) && insts[i+1].Addr > inst.Addr { + // Extend to next instruction. + length = int(insts[i+1].Addr - inst.Addr) + } + + // Get inlined-call-stack for address. + frames, err := r.obj.SourceLine(addr) + if err != nil { + // Construct a frame from disassembler output. + frames = []plugin.Frame{{Func: inst.Function, File: inst.File, Line: inst.Line}} + } + + x := instructionInfo{objAddr: inst.Addr, length: length, disasm: inst.Text} + if len(frames) > 0 { + // We could consider using the outer-most caller's source + // location so we give the some hint as to where the + // inlining happened that led to this instruction. So for + // example, suppose we have the following (inlined) call + // chains for this instruction: + // F1->G->H + // F2->G->H + // We could tag the instructions from the first call with + // F1 and instructions from the second call with F2. But + // that leads to a somewhat confusing display. So for now, + // we stick with just the inner-most location (i.e., H). + // In the future we will consider changing the display to + // make caller info more visible. + index := 0 // Inner-most frame + x.file = frames[index].File + x.line = frames[index].Line + } + sp.insts[addr] = x + + // We sometimes get instructions with a zero reported line number. + // Make such instructions have the same line info as the preceding + // instruction, if an earlier instruction is found close enough. + const neighborhood = 32 + if len(frames) > 0 && frames[0].Line != 0 { + lastFrames = frames + lastAddr = addr + } else if (addr-lastAddr <= neighborhood) && lastFrames != nil { + frames = lastFrames + } + + sp.addStack(addr, frames) + } + } +} + +func (sp *sourcePrinter) addStack(addr uint64, frames []plugin.Frame) { + // See if the stack contains a function we are interested in. + for i, f := range frames { + if !sp.interest[f.Func] { + continue + } + + // Record sub-stack under frame's file/line. + fname := canonicalizeFileName(f.File) + file := sp.files[fname] + if file == nil { + file = &sourceFile{ + fname: fname, + lines: map[int][]sourceInst{}, + funcName: map[int]string{}, + } + sp.files[fname] = file + } + callees := frames[:i] + stack := make([]callID, 0, len(callees)) + for j := len(callees) - 1; j >= 0; j-- { // Reverse so caller is first + stack = append(stack, callID{ + file: callees[j].File, + line: callees[j].Line, + }) + } + file.lines[f.Line] = append(file.lines[f.Line], sourceInst{addr, stack}) + + // Remember the first function name encountered per source line + // and assume that that line belongs to that function. + if _, ok := file.funcName[f.Line]; !ok { + file.funcName[f.Line] = f.Func + } + } +} + +// synthAsm is the special disassembler value used for instructions without an object file. +const synthAsm = "" + +// handleUnprocessed handles addresses that were skipped by splitIntoRanges because they +// did not belong to a known object file. +func (sp *sourcePrinter) handleUnprocessed(addrs map[uint64]addrInfo, unprocessed []uint64) { + // makeFrames synthesizes a []plugin.Frame list for the specified address. + // The result will typically have length 1, but may be longer if address corresponds + // to inlined calls. + makeFrames := func(addr uint64) []plugin.Frame { + loc := addrs[addr].loc + stack := make([]plugin.Frame, 0, len(loc.Line)) + for _, line := range loc.Line { + fn := line.Function + if fn == nil { + continue + } + stack = append(stack, plugin.Frame{ + Func: fn.Name, + File: fn.Filename, + Line: int(line.Line), + }) + } + return stack + } + + for _, addr := range unprocessed { + frames := makeFrames(addr) + x := instructionInfo{ + objAddr: addr, + length: 1, + disasm: synthAsm, + } + if len(frames) > 0 { + x.file = frames[0].File + x.line = frames[0].Line + } + sp.insts[addr] = x + + sp.addStack(addr, frames) + } +} + +// splitIntoRanges converts the set of addresses we are interested in into a set of address +// ranges to disassemble. It also returns the set of addresses found that did not have an +// associated object file and were therefore not added to an address range. +func (sp *sourcePrinter) splitIntoRanges(prof *profile.Profile, addrMap map[uint64]addrInfo, flat map[uint64]int64) ([]addressRange, []uint64) { + // Partition addresses into two sets: ones with a known object file, and ones without. + var addrs, unprocessed []uint64 + for addr, info := range addrMap { + if info.obj != nil { + addrs = append(addrs, addr) + } else { + unprocessed = append(unprocessed, addr) + } + } + sort.Slice(addrs, func(i, j int) bool { return addrs[i] < addrs[j] }) + + const expand = 500 // How much to expand range to pick up nearby addresses. + var result []addressRange + for i, n := 0, len(addrs); i < n; { + begin, end := addrs[i], addrs[i] + sum := flat[begin] + i++ + + info := addrMap[begin] + m := info.loc.Mapping + obj := info.obj // Non-nil because of the partitioning done above. + + // Find following addresses that are close enough to addrs[i]. + for i < n && addrs[i] <= end+2*expand && addrs[i] < m.Limit { + // When we expand ranges by "expand" on either side, the ranges + // for addrs[i] and addrs[i-1] will merge. + end = addrs[i] + sum += flat[end] + i++ + } + if m.Start-begin >= expand { + begin -= expand + } else { + begin = m.Start + } + if m.Limit-end >= expand { + end += expand + } else { + end = m.Limit + } + + result = append(result, addressRange{begin, end, obj, m, sum}) + } + return result, unprocessed +} + +func (sp *sourcePrinter) initSamples(flat, cum map[uint64]int64) { + for addr, inst := range sp.insts { + // Move all samples that were assigned to the middle of an instruction to the + // beginning of that instruction. This takes care of samples that were recorded + // against pc+1. + instEnd := addr + uint64(inst.length) + for p := addr; p < instEnd; p++ { + inst.flat += flat[p] + inst.cum += cum[p] + } + sp.insts[addr] = inst + } +} + +func (sp *sourcePrinter) print(w io.Writer, maxFiles int, rpt *Report) { + // Finalize per-file counts. + for _, file := range sp.files { + seen := map[uint64]bool{} + for _, line := range file.lines { + for _, x := range line { + if seen[x.addr] { + // Same address can be displayed multiple times in a file + // (e.g., if we show multiple inlined functions). + // Avoid double-counting samples in this case. + continue + } + seen[x.addr] = true + inst := sp.insts[x.addr] + file.cum += inst.cum + file.flat += inst.flat + } + } + } + + // Get sorted list of files to print. + var files []*sourceFile + for _, f := range sp.files { + files = append(files, f) + } + order := func(i, j int) bool { return files[i].flat > files[j].flat } + if maxFiles < 0 { + // Order by name for compatibility with old code. + order = func(i, j int) bool { return files[i].fname < files[j].fname } + maxFiles = len(files) + } + sort.Slice(files, order) + for i, f := range files { + if i < maxFiles { + sp.printFile(w, f, rpt) + } + } +} + +func (sp *sourcePrinter) printFile(w io.Writer, f *sourceFile, rpt *Report) { + for _, fn := range sp.functions(f) { + if fn.cum == 0 { + continue + } + printFunctionHeader(w, fn.name, f.fname, fn.flat, fn.cum, rpt) + var asm []assemblyInstruction + for l := fn.begin; l < fn.end; l++ { + lineContents, ok := sp.reader.line(f.fname, l) + if !ok { + if len(f.lines[l]) == 0 { + // Outside of range of valid lines and nothing to print. + continue + } + if l == 0 { + // Line number 0 shows up if line number is not known. + lineContents = "" + } else { + // Past end of file, but have data to print. + lineContents = "???" + } + } + + // Make list of assembly instructions. + asm = asm[:0] + var flatSum, cumSum int64 + var lastAddr uint64 + for _, inst := range f.lines[l] { + addr := inst.addr + x := sp.insts[addr] + flatSum += x.flat + cumSum += x.cum + startsBlock := (addr != lastAddr+uint64(sp.insts[lastAddr].length)) + lastAddr = addr + + // divisors already applied, so leave flatDiv,cumDiv as 0 + asm = append(asm, assemblyInstruction{ + address: x.objAddr, + instruction: x.disasm, + function: fn.name, + file: x.file, + line: x.line, + flat: x.flat, + cum: x.cum, + startsBlock: startsBlock, + inlineCalls: inst.stack, + }) + } + + printFunctionSourceLine(w, l, flatSum, cumSum, lineContents, asm, sp.reader, rpt) } printFunctionClosing(w) } - return nil } -// sourceCoordinates returns the lowest and highest line numbers from -// a set of assembly statements. -func sourceCoordinates(asm map[int][]assemblyInstruction) (start, end int) { - for l := range asm { - if start == 0 || l < start { - start = l - } - if end == 0 || l > end { - end = l - } +// functions splits apart the lines to show in a file into a list of per-function ranges. +func (sp *sourcePrinter) functions(f *sourceFile) []sourceFunction { + var funcs []sourceFunction + + // Get interesting lines in sorted order. + lines := make([]int, 0, len(f.lines)) + for l := range f.lines { + lines = append(lines, l) } - return start, end + sort.Ints(lines) + + // Merge adjacent lines that are in same function and not too far apart. + const mergeLimit = 20 + for _, l := range lines { + name := f.funcName[l] + if pretty, ok := sp.prettyNames[name]; ok { + // Use demangled name if available. + name = pretty + } + + fn := sourceFunction{name: name, begin: l, end: l + 1} + for _, x := range f.lines[l] { + inst := sp.insts[x.addr] + fn.flat += inst.flat + fn.cum += inst.cum + } + + // See if we should merge into preceding function. + if len(funcs) > 0 { + last := funcs[len(funcs)-1] + if l-last.end < mergeLimit && last.name == name { + last.end = l + 1 + last.flat += fn.flat + last.cum += fn.cum + funcs[len(funcs)-1] = last + continue + } + } + + // Add new function. + funcs = append(funcs, fn) + } + + // Expand function boundaries to show neighborhood. + const expand = 5 + for i, f := range funcs { + if i == 0 { + // Extend backwards, stopping at line number 1, but do not disturb 0 + // since that is a special line number that can show up when addr2line + // cannot determine the real line number. + if f.begin > expand { + f.begin -= expand + } else if f.begin > 1 { + f.begin = 1 + } + } else { + // Find gap from predecessor and divide between predecessor and f. + halfGap := (f.begin - funcs[i-1].end) / 2 + if halfGap > expand { + halfGap = expand + } + funcs[i-1].end += halfGap + f.begin -= halfGap + } + funcs[i] = f + } + + // Also extend the ending point of the last function. + if len(funcs) > 0 { + funcs[len(funcs)-1].end += expand + } + + return funcs } -// assemblyPerSourceLine disassembles the binary containing a symbol -// and classifies the assembly instructions according to its -// corresponding source line, annotating them with a set of samples. -func assemblyPerSourceLine(objSyms []*objSymbol, rs graph.Nodes, src string, obj plugin.ObjTool, intelSyntax bool) map[int][]assemblyInstruction { - assembly := make(map[int][]assemblyInstruction) - // Identify symbol to use for this collection of samples. - o := findMatchingSymbol(objSyms, rs) - if o == nil { - return assembly +// objectFile return the object for the specified mapping, opening it if necessary. +// It returns nil on error. +func (sp *sourcePrinter) objectFile(m *profile.Mapping) plugin.ObjFile { + if m == nil { + return nil } - - // Extract assembly for matched symbol - insts, err := obj.Disasm(o.sym.File, o.sym.Start, o.sym.End, intelSyntax) + if object, ok := sp.objects[m.File]; ok { + return object // May be nil if we detected an error earlier. + } + object, err := sp.objectTool.Open(m.File, m.Start, m.Limit, m.Offset) if err != nil { - return assembly + object = nil } - - srcBase := filepath.Base(src) - anodes := annotateAssembly(insts, rs, o.base) - var lineno = 0 - var prevline = 0 - for _, an := range anodes { - // Do not rely solely on the line number produced by Disasm - // since it is not what we want in the presence of inlining. - // - // E.g., suppose we are printing source code for F and this - // instruction is from H where F called G called H and both - // of those calls were inlined. We want to use the line - // number from F, not from H (which is what Disasm gives us). - // - // So find the outer-most linenumber in the source file. - found := false - if frames, err := o.file.SourceLine(an.address + o.base); err == nil { - for i := len(frames) - 1; i >= 0; i-- { - if filepath.Base(frames[i].File) == srcBase { - for j := i - 1; j >= 0; j-- { - an.inlineCalls = append(an.inlineCalls, callID{frames[j].File, frames[j].Line}) - } - lineno = frames[i].Line - found = true - break - } - } - } - if !found && filepath.Base(an.file) == srcBase { - lineno = an.line - } - - if lineno != 0 { - if lineno != prevline { - // This instruction starts a new block - // of contiguous instructions on this line. - an.startsBlock = true - } - prevline = lineno - assembly[lineno] = append(assembly[lineno], an) - } - } - - return assembly -} - -// findMatchingSymbol looks for the symbol that corresponds to a set -// of samples, by comparing their addresses. -func findMatchingSymbol(objSyms []*objSymbol, ns graph.Nodes) *objSymbol { - for _, n := range ns { - for _, o := range objSyms { - if filepath.Base(o.sym.File) == filepath.Base(n.Info.Objfile) && - o.sym.Start <= n.Info.Address-o.base && - n.Info.Address-o.base <= o.sym.End { - return o - } - } - } - return nil + sp.objects[m.File] = object // Cache even on error. + return object } // printHeader prints the page header for a weblist report. @@ -348,22 +780,39 @@ func printFunctionHeader(w io.Writer, name, path string, flatSum, cumSum int64, } // printFunctionSourceLine prints a source line and the corresponding assembly. -func printFunctionSourceLine(w io.Writer, fn *graph.Node, assembly []assemblyInstruction, reader *sourceReader, rpt *Report) { +func printFunctionSourceLine(w io.Writer, lineNo int, flat, cum int64, lineContents string, + assembly []assemblyInstruction, reader *sourceReader, rpt *Report) { if len(assembly) == 0 { fmt.Fprintf(w, " %6d %10s %10s %8s %s \n", - fn.Info.Lineno, - valueOrDot(fn.Flat, rpt), valueOrDot(fn.Cum, rpt), - "", template.HTMLEscapeString(fn.Info.Name)) + lineNo, + valueOrDot(flat, rpt), valueOrDot(cum, rpt), + "", template.HTMLEscapeString(lineContents)) return } + nestedInfo := false + cl := "deadsrc" + for _, an := range assembly { + if len(an.inlineCalls) > 0 || an.instruction != synthAsm { + nestedInfo = true + cl = "livesrc" + } + } + fmt.Fprintf(w, - " %6d %10s %10s %8s %s ", - fn.Info.Lineno, - valueOrDot(fn.Flat, rpt), valueOrDot(fn.Cum, rpt), - "", template.HTMLEscapeString(fn.Info.Name)) - srcIndent := indentation(fn.Info.Name) + " %6d %10s %10s %8s %s ", + lineNo, cl, + valueOrDot(flat, rpt), valueOrDot(cum, rpt), + "", template.HTMLEscapeString(lineContents)) + if nestedInfo { + srcIndent := indentation(lineContents) + printNested(w, srcIndent, assembly, reader, rpt) + } + fmt.Fprintln(w) +} + +func printNested(w io.Writer, srcIndent int, assembly []assemblyInstruction, reader *sourceReader, rpt *Report) { fmt.Fprint(w, "") var curCalls []callID for i, an := range assembly { @@ -374,15 +823,9 @@ func printFunctionSourceLine(w io.Writer, fn *graph.Node, assembly []assemblyIns var fileline string if an.file != "" { - fileline = fmt.Sprintf("%s:%d", template.HTMLEscapeString(an.file), an.line) + fileline = fmt.Sprintf("%s:%d", template.HTMLEscapeString(filepath.Base(an.file)), an.line) } flat, cum := an.flat, an.cum - if an.flatDiv != 0 { - flat = flat / an.flatDiv - } - if an.cumDiv != 0 { - cum = cum / an.cumDiv - } // Print inlined call context. for j, c := range an.inlineCalls { @@ -398,17 +841,23 @@ func printFunctionSourceLine(w io.Writer, fn *graph.Node, assembly []assemblyIns text := strings.Repeat(" ", srcIndent+4+4*j) + strings.TrimSpace(fline) fmt.Fprintf(w, " %8s %10s %10s %8s %s %s:%d\n", "", "", "", "", - template.HTMLEscapeString(fmt.Sprintf("%-80s", text)), + template.HTMLEscapeString(rightPad(text, 80)), template.HTMLEscapeString(filepath.Base(c.file)), c.line) } curCalls = an.inlineCalls + if an.instruction == synthAsm { + continue + } text := strings.Repeat(" ", srcIndent+4+4*len(curCalls)) + an.instruction fmt.Fprintf(w, " %8s %10s %10s %8x: %s %s\n", "", valueOrDot(flat, rpt), valueOrDot(cum, rpt), an.address, - template.HTMLEscapeString(fmt.Sprintf("%-80s", text)), - template.HTMLEscapeString(fileline)) + template.HTMLEscapeString(rightPad(text, 80)), + // fileline should not be escaped since it was formed by appending + // line number (just digits) to an escaped file name. Escaping here + // would cause double-escaping of file name. + fileline) } - fmt.Fprintln(w, "") + fmt.Fprint(w, "") } // printFunctionClosing prints the end of a function in a weblist report. @@ -482,36 +931,6 @@ func getSourceFromFile(file string, reader *sourceReader, fns graph.Nodes, start return src, file, nil } -// getMissingFunctionSource creates a dummy function body to point to -// the source file and annotates it with the samples in asm. -func getMissingFunctionSource(filename string, asm map[int][]assemblyInstruction, start, end int) (graph.Nodes, string) { - var fnodes graph.Nodes - for i := start; i <= end; i++ { - insts := asm[i] - if len(insts) == 0 { - continue - } - var group assemblyInstruction - for _, insn := range insts { - group.flat += insn.flat - group.cum += insn.cum - group.flatDiv += insn.flatDiv - group.cumDiv += insn.cumDiv - } - flat := group.flatValue() - cum := group.cumValue() - fnodes = append(fnodes, &graph.Node{ - Info: graph.NodeInfo{ - Name: "???", - Lineno: i, - }, - Flat: flat, - Cum: cum, - }) - } - return fnodes, filename -} - // sourceReader provides access to source code with caching of file contents. type sourceReader struct { // searchPath is a filepath.ListSeparator-separated list of directories where @@ -543,6 +962,7 @@ func (reader *sourceReader) fileError(path string) error { return reader.errors[path] } +// line returns the line numbered "lineno" in path, or _,false if lineno is out of range. func (reader *sourceReader) line(path string, lineno int) (string, bool) { lines, ok := reader.files[path] if !ok { @@ -651,3 +1071,37 @@ func indentation(line string) int { } return column } + +// rightPad pads the input with spaces on the right-hand-side to make it have +// at least width n. It treats tabs as enough spaces that lead to the next +// 8-aligned tab-stop. +func rightPad(s string, n int) string { + var str strings.Builder + + // Convert tabs to spaces as we go so padding works regardless of what prefix + // is placed before the result. + column := 0 + for _, c := range s { + column++ + if c == '\t' { + str.WriteRune(' ') + for column%8 != 0 { + column++ + str.WriteRune(' ') + } + } else { + str.WriteRune(c) + } + } + for column < n { + column++ + str.WriteRune(' ') + } + return str.String() +} + +func canonicalizeFileName(fname string) string { + fname = strings.TrimPrefix(fname, "/proc/self/cwd/") + fname = strings.TrimPrefix(fname, "./") + return filepath.Clean(fname) +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go b/src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go index 02a6d772487..17c9f6eb947 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go @@ -25,12 +25,11 @@ func AddSourceTemplates(t *template.Template) { } const weblistPageCSS = `