$ go build wiki.go
$ ./wiki
-This is a sample page.
+This is a sample Page.
@@ -213,6 +213,12 @@ worry about its second parameter, nil, for now.)
This function will block until the program is terminated.
+
+ListenAndServe always returns an error, since it only returns when an
+unexpected error occurs.
+In order to log that error we wrap the function call with log.Fatal.
+
+
The function handler is of the type http.HandlerFunc.
It takes an http.ResponseWriter and an http.Request as
diff --git a/doc/articles/wiki/notemplate.go b/doc/articles/wiki/notemplate.go
index be214d1111d..0fda7a98ce5 100644
--- a/doc/articles/wiki/notemplate.go
+++ b/doc/articles/wiki/notemplate.go
@@ -7,6 +7,7 @@ package main
import (
"fmt"
"io/ioutil"
+ "log"
"net/http"
)
@@ -52,5 +53,5 @@ func editHandler(w http.ResponseWriter, r *http.Request) {
func main() {
http.HandleFunc("/view/", viewHandler)
http.HandleFunc("/edit/", editHandler)
- http.ListenAndServe(":8080", nil)
+ log.Fatal(http.ListenAndServe(":8080", nil))
}
diff --git a/doc/articles/wiki/part2.go b/doc/articles/wiki/part2.go
index c0231693efb..30f9dcf146d 100644
--- a/doc/articles/wiki/part2.go
+++ b/doc/articles/wiki/part2.go
@@ -7,6 +7,7 @@ package main
import (
"fmt"
"io/ioutil"
+ "log"
"net/http"
)
@@ -37,5 +38,5 @@ func viewHandler(w http.ResponseWriter, r *http.Request) {
func main() {
http.HandleFunc("/view/", viewHandler)
- http.ListenAndServe(":8080", nil)
+ log.Fatal(http.ListenAndServe(":8080", nil))
}
diff --git a/doc/articles/wiki/part3-errorhandling.go b/doc/articles/wiki/part3-errorhandling.go
index bb4ecda84bb..34b13a60864 100644
--- a/doc/articles/wiki/part3-errorhandling.go
+++ b/doc/articles/wiki/part3-errorhandling.go
@@ -7,6 +7,7 @@ package main
import (
"html/template"
"io/ioutil"
+ "log"
"net/http"
)
@@ -69,5 +70,5 @@ func main() {
http.HandleFunc("/view/", viewHandler)
http.HandleFunc("/edit/", editHandler)
http.HandleFunc("/save/", saveHandler)
- http.ListenAndServe(":8080", nil)
+ log.Fatal(http.ListenAndServe(":8080", nil))
}
diff --git a/doc/articles/wiki/part3.go b/doc/articles/wiki/part3.go
index 174f3abcd76..5e5d5056c49 100644
--- a/doc/articles/wiki/part3.go
+++ b/doc/articles/wiki/part3.go
@@ -7,6 +7,7 @@ package main
import (
"html/template"
"io/ioutil"
+ "log"
"net/http"
)
@@ -53,5 +54,5 @@ func main() {
http.HandleFunc("/view/", viewHandler)
http.HandleFunc("/edit/", editHandler)
//http.HandleFunc("/save/", saveHandler)
- http.ListenAndServe(":8080", nil)
+ log.Fatal(http.ListenAndServe(":8080", nil))
}
diff --git a/doc/asm.html b/doc/asm.html
index 79dc7df322f..e3e17f85f58 100644
--- a/doc/asm.html
+++ b/doc/asm.html
@@ -876,6 +876,12 @@ Addressing modes:
+
+The value of GOMIPS environment variable (hardfloat or
+softfloat) is made available to assembly code by predefining either
+GOMIPS_hardfloat or GOMIPS_softfloat.
+
+
Unsupported opcodes
diff --git a/doc/contrib.html b/doc/contrib.html
index 0290923bcd8..2dc1e7d0e47 100644
--- a/doc/contrib.html
+++ b/doc/contrib.html
@@ -34,6 +34,7 @@ We encourage all Go users to subscribe to
A summary of the changes between Go releases. Notes for the major releases:
diff --git a/doc/contribute.html b/doc/contribute.html
index f204b06e651..b35e9f4b180 100644
--- a/doc/contribute.html
+++ b/doc/contribute.html
@@ -88,7 +88,7 @@ script as *nix.)
-Your secret authentication token is now in a .gitcookie file
+Your secret authentication token is now in a .gitcookies file
and Git is configured to use this file.
@@ -158,7 +158,7 @@ completed and update the AUTHORS file.
Changes to Go must be reviewed before they are accepted, no matter who makes the change.
A custom git command called git-codereview, discussed below,
helps manage the code review process through a Google-hosted
-instance Gerrit.
+instance of Gerrit.
Install the git-codereview command
@@ -208,12 +208,6 @@ daily work, install the hooks in a new Git checkout by running
git-codereviewhooks.
-
-The workflow described below assumes a single change per branch.
-It is also possible to prepare a sequence of (usually related) changes in a single branch.
-See the git-codereview documentation for details.
-
-
Set up git aliases
@@ -305,6 +299,15 @@ which only bug fixes and doc updates are accepted. New contributions can be
sent during a feature freeze but will not be accepted until the freeze thaws.
+
Not sure what change to make?
+
+
+If you want to become familiar with Gerrit and the contribution process,
+but aren't sure what you'd like to contribute just yet, you can use the scratch repository to practice
+making a change.
+
+
Making a change
Getting Go Source
@@ -420,6 +423,12 @@ In the Go contribution workflow this is done with a git
directly to that local branch.
+
+The workflow described here assumes a single change per branch.
+It is also possible to prepare a sequence of (usually related) changes in a single branch.
+See the git-codereview documentation for details.
+
+
$ git change <branch>
@@ -681,8 +690,8 @@ reviewers asking them to visit the issue's URL and make comments on the change.
When done, the reviewer adds comments through the Gerrit user interface
and clicks "Reply" to send comments back.
You will receive a mail notification when this happens.
-You must reply through the web interface.
-(Unlike with the old Rietveld review system, replying by mail has no effect.)
+You may reply through the web interface or
+via email.
Revise and resend
@@ -696,8 +705,8 @@ all the changes and comments made in the single URL.
-You must respond to review comments through the web interface.
-(Unlike with the old Rietveld review system, responding by mail has no effect.)
+You may respond to review comments through the web interface or
+via email.
diff --git a/doc/debugging_with_gdb.html b/doc/debugging_with_gdb.html
index f0e65ea2912..633d058c111 100644
--- a/doc/debugging_with_gdb.html
+++ b/doc/debugging_with_gdb.html
@@ -31,8 +31,8 @@ In time, a more Go-centric debugging architecture may be required.
When you compile and link your Go programs with the gc toolchain
-on Linux, Mac OS X, FreeBSD or NetBSD, the resulting binaries contain DWARFv3
-debugging information that recent versions (>7.1) of the GDB debugger can
+on Linux, Mac OS X, FreeBSD or NetBSD, the resulting binaries contain DWARFv4
+debugging information that recent versions (≥7.5) of the GDB debugger can
use to inspect a live process or a core dump.
diff --git a/doc/devel/release.html b/doc/devel/release.html
index eac2ddd3ef1..2e5b0d44ae0 100644
--- a/doc/devel/release.html
+++ b/doc/devel/release.html
@@ -89,6 +89,16 @@ See the Go
1.8.4 milestone on our issue tracker for details.
+
+go1.8.5 (released 2017/10/25) includes fixes to the compiler, linker, runtime,
+documentation, go command,
+and the crypto/x509 and net/smtp packages.
+It includes a fix to a bug introduced in Go 1.8.4 that broke goget
+of non-Git repositories under certain conditions.
+See the Go
+1.8.5 milestone on our issue tracker for details.
+
+The Go ecosystem provides a large suite of APIs and tools to
+diagnose logic and performance problems in Go programs. This page
+summarizes the available tools and helps Go users pick the right one
+for their specific problem.
+
+
+
+Diagnostics solutions can be categorized into the following groups:
+
+
+
+
Profiling: Profiling tools analyze the complexity and costs of a
+Go program such as its memory usage and frequently called
+functions to identify the expensive sections of a Go program.
+
Tracing: Tracing is a way to instrument code to analyze latency
+throughout the lifecycle of a call or user request. Traces provide an
+overview of how much latency each component contributes to the overall
+latency in a system. Traces can span multiple Go processes.
+
Debugging: Debugging allows us to pause a Go program and examine
+its execution. Program state and flow can be verified with debugging.
+
Runtime statistics and events: Collection and analysis of runtime stats and events
+provides a high-level overview of the health of Go programs. Spikes/dips of metrics
+helps us to identify changes in throughput, utilization, and performance.
+
+
+
+Note: Some diagnostics tools may interfere with each other. For example, precise
+memory profiling skews CPU profiles and goroutine blocking profiling affects scheduler
+trace. Use tools in isolation to get more precise info.
+
+
+
Profiling
+
+
+Profiling is useful for identifying expensive or frequently called sections
+of code. The Go runtime provides
+profiling data in the format expected by the
+pprof visualization tool.
+The profiling data can be collected during testing
+via go test or endpoints made available from the
+net/http/pprof package. Users need to collect the profiling data and use pprof tools to filter
+and visualize the top code paths.
+
+
+
Predefined profiles provided by the runtime/pprof package:
+
+
+
+cpu: CPU profile determines where a program spends
+its time while actively consuming CPU cycles (as opposed to while sleeping or waiting for I/O).
+
+
+heap: Heap profile reports memory allocation samples;
+used to monitor current and historical memory usage, and to check for memory leaks.
+
+
+threadcreate: Thread creation profile reports the sections
+of the program that lead the creation of new OS threads.
+
+
+goroutine: Goroutine profile reports the stack traces of all current goroutines.
+
+
+block: Block profile shows where goroutines block waiting on synchronization
+primitives (including timer channels). Block profile is not enabled by default;
+use runtime.SetBlockProfileRate to enable it.
+
+
+mutex: Mutex profile reports the lock contentions. When you think your
+CPU is not fully utilized due to a mutex contention, use this profile. Mutex profile
+is not enabled by default, see runtime.SetMutexProfileFraction to enable it.
+
+
+
+
+
What other profilers can I use to profile Go programs?
+
+
+On Linux, perf tools
+can be used for profiling Go programs. Perf can profile
+and unwind cgo/SWIG code and kernel, so it can be useful to get insights into
+native/kernel performance bottlenecks. On macOS,
+Instruments
+suite can be used profile Go programs.
+
+
+
Can I profile my production services?
+
+
Yes. It is safe to profile programs in production, but enabling
+some profiles (e.g. the CPU profile) adds cost. You should expect to
+see performance downgrade. The performance penalty can be estimated
+by measuring the overhead of the profiler before turning it on in
+production.
+
+
+
+You may want to periodically profile your production services.
+Especially in a system with many replicas of a single process, selecting
+a random replica periodically is a safe option.
+Select a production process, profile it for
+X seconds for every Y seconds and save the results for visualization and
+analysis; then repeat periodically. Results may be manually and/or automatically
+reviewed to find problems.
+Collection of profiles can interfere with each other,
+so it is recommended to collect only a single profile at a time.
+
+
+
+What are the best ways to visualize the profiling data?
+
+
+
+Listing of the most expensive calls as text.
+
+
+
+
+
+Visualization of the most expensive calls as a graph.
+
+
+
Weblist view displays the expensive parts of the source line by line in
+an HTML page. In the following example, 530ms is spent in the
+runtime.concatstrings and cost of each line is presented
+in the listing.
+
+
+
+
+Visualization of the most expensive calls as weblist.
+
+
+
+Another way to visualize profile data is a flame graph.
+Flame graphs allow you to move in a specific ancestry path, so you can zoom
+in/out specific sections of code more easily.
+
+
+
+
+
+Flame graphs offers visualization to spot the most expensive code-paths.
+
+
+
Am I restricted to the built-in profiles?
+
+
+Additionally to what is provided by the runtime, Go users can create
+their custom profiles via pprof.Profile
+and use the existing tools to examine them.
+
+
+
Can I serve the profiler handlers (/debug/pprof/...) on a different path and port?
+
+
+Yes. The net/http/pprof package registers its handlers to the default
+mux by default, but you can also register them yourself by using the handlers
+exported from the package.
+
+
+
+For example, the following example will serve the pprof.Profile
+handler on :7777 at /custom_debug_path/profile:
+
+Tracing is a way to instrument code to analyze latency throughout the
+lifecycle of a chain of calls. Go provides
+golang.org/x/net/trace
+package as a minimal tracing backend per Go node and provides a minimal
+instrumentation library with a simple dashboard. Go also provides
+an execution tracer to trace the runtime events within an interval.
+
+
+
Tracing enables us to:
+
+
+
Instrument and profile application latency in a Go process.
+
Measure the cost of specific calls in a long chain of calls.
+
Figure out the utilization and performance improvements.
+Bottlenecks are not always obvious without tracing data.
+
+
+
+In monolithic systems, it's relatively easy to collect diagnostic data
+from the building blocks of a program. All modules live within one
+process and share common resources to report logs, errors, and other
+diagnostic information. Once your system grows beyond a single process and
+starts to become distributed, it becomes harder to follow a call starting
+from the front-end web server to all of its back-ends until a response is
+returned back to the user. This is where distributed tracing plays a big
+role to instrument and analyze your production systems.
+
+
+
+Distributed tracing is a way to instrument code to analyze latency throughout
+the lifecycle of a user request. When a system is distributed and when
+conventional profiling and debugging tools don’t scale, you might want
+to use distributed tracing tools to analyze the performance of your user
+requests and RPCs.
+
+
+
Distributed tracing enables us to:
+
+
+
Instrument and profile application latency in a large system.
+
Track all RPCs within the lifecycle of a user request and see integration issues
+that are only visible in production.
+
Figure out performance improvements that can be applied to our systems.
+Many bottlenecks are not obvious before the collection of tracing data.
+
+
+
The Go ecosystem provides various distributed tracing libraries per tracing system
+and backend-agnostic ones.
+
+
+
Is there a way to automatically intercept each function call and create traces?
+
+
+Go doesn’t provide a way to automatically intercept every function call and create
+trace spans. You need to manually instrument your code to create, end, and annotate spans.
+
+
+
How should I propagate trace headers in Go libraries?
+
+
+You can propagate trace identifiers and tags in the context.Context.
+There is no canonical trace key or common representation of trace headers
+in the industry yet. Each tracing provider is responsible for providing propagation
+utilities in their Go libraries.
+
+
+
+What other low-level events from the standard library or
+runtime can be included in a trace?
+
+
+
+The standard library and runtime are trying to expose several additional APIs
+to notify on low level internal events. For example, httptrace.ClientTrace
+provides APIs to follow low-level events in the life cycle of an outgoing request.
+There is an ongoing effort to retrieve low-level runtime events from
+the runtime execution tracer and allow users to define and record their user events.
+
+
+
Debugging
+
+
+Debugging is the process of identifying why a program misbehaves.
+Debuggers allow us to understand a program’s execution flow and current state.
+There are several styles of debugging; this section will only focus on attaching
+a debugger to a program and core dump debugging.
+
+
+
Go users mostly use the following debuggers:
+
+
+
+Delve:
+Delve is a debugger for the Go programming language. It has
+support for Go’s runtime concepts and built-in types. Delve is
+trying to be a fully featured reliable debugger for Go programs.
+
+
+GDB:
+Go provides GDB support via the standard Go compiler and Gccgo.
+The stack management, threading, and runtime contain aspects that differ
+enough from the execution model GDB expects that they can confuse the
+debugger, even when the program is compiled with gccgo. Even though
+GDB can be used to debug Go programs, it is not ideal and may
+create confusion.
+
+
+
+
How well do debuggers work with Go programs?
+
+
+As of Go 1.9, the DWARF info generated by the gc compiler is not complete
+and sometimes makes debugging harder. There is an ongoing effort to improve the
+DWARF information to help the debuggers display more accurate information.
+Until those improvements are in you may prefer to disable compiler
+optimizations during development for more accuracy. To disable optimizations,
+use the "-N -l" compiler flags. For example, the following command builds
+a package with no compiler optimizations:
+
+
+
+$ go build -gcflags="-N -l"
+
+
+
+
+As of Go 1.10, the Go binaries will have the required DWARF information
+for accurate debugging. To enable the DWARF improvements, use the following
+compiler flags and use GDB until Delve supports location lists:
+
+
+
+
+$ go build -gcflags="-dwarflocationlists=true"
+
+
+
+
What’s the recommended debugger user interface?
+
+
+Even though both delve and gdb provides CLIs, most editor integrations
+and IDEs provides debugging-specific user interfaces. Please refer to
+the editors guide to see the options
+with debugger UI support.
+
+
+
Is it possible to do postmortem debugging with Go programs?
+
+
+A core dump file is a file that contains the memory dump of a running
+process and its process status. It is primarily used for post-mortem
+debugging of a program and to understand its state
+while it is still running. These two cases make debugging of core
+dumps a good diagnostic aid to postmortem and analyze production
+services. It is possible to obtain core files from Go programs and
+use delve or gdb to debug, see the
+core dump debugging
+page for a step-by-step guide.
+
+
+
Runtime statistics and events
+
+
+The runtime provides stats and reporting of internal events for
+users to diagnose performance and utilization problems at the
+runtime level.
+
+
+
+Users can monitor these stats to better understand the overall
+health and performance of Go programs.
+Some frequently monitored stats and states:
+
+
+
+
runtime.ReadMemStats
+reports the metrics related to heap
+allocation and garbage collection. Memory stats are useful for
+monitoring how much memory resources a process is consuming,
+whether the process can utilize memory well, and to catch
+memory leaks.
+
debug.ReadGCStats
+reads statistics about garbage collection.
+It is useful to see how much of the resources are spent on GC pauses.
+It also reports a timeline of garbage collector pauses and pause time percentiles.
+
debug.Stack
+returns the current stack trace. Stack trace
+is useful to see how many goroutines are currently running,
+what they are doing, and whether they are blocked or not.
+
debug.WriteHeapDump
+suspends the execution of all goroutines
+and allows you to dump the heap to a file. A heap dump is a
+snapshot of a Go process' memory at a given time. It contains all
+allocated objects as well as goroutines, finalizers, and more.
+
runtime.NumGoroutine
+returns the number of current goroutines.
+The value can be monitored to see whether enough goroutines are
+utilized, or to detect goroutine leaks.
+
+
+
Execution tracer
+
+
Go comes with a runtime execution tracer to capture a wide range
+of runtime events. Scheduling, syscall, garbage collections,
+heap size, and other events are collected by runtime and available
+for visualization by the go tool trace. Execution tracer is a tool
+to detect latency and utilization problems. You can examine how well
+the CPU is utilized, and when networking or syscalls are a cause of
+preemption for the goroutines.
+
+
Tracer is useful to:
+
+
Understand how your goroutines execute.
+
Understand some of the core runtime events such as GC runs.
+
Identify poorly parallelized execution.
+
+
+
However, it is not great for identifying hot spots such as
+analyzing the cause of excessive memory or CPU usage.
+Use profiling tools instead first to address them.
+
+
+
+
+
+
Above, the go tool trace visualization shows the execution started
+fine, and then it became serialized. It suggests that there might
+be lock contention for a shared resource that creates a bottleneck.
+
+
See go tool trace
+to collect and analyze runtime traces.
+
+
+
GODEBUG
+
+
Runtime also emits events and information if
+GODEBUG
+environmental variable is set accordingly.
+
+
+
GODEBUG=gctrace=1 prints garbage collector events at
+each collection, summarizing the amount of memory collected
+and the length of the pause.
+
GODEBUG=schedtrace=X prints scheduling events every X milliseconds.
+ {{if $.GoogleCN}}
+ A Tour of Go
+ {{else}}
+ A Tour of Go
+ {{end}}
+
An interactive introduction to Go in three sections.
The first section covers basic syntax and data structures; the second discusses
methods and interfaces; and the third introduces Go's concurrency primitives.
Each section concludes with a few exercises so you can practice what you've
-learned. You can take the tour online or
-install it locally with:
+learned. You can {{if not $.GoogleCN}}take the tour
+online or{{end}} install it locally with:
-
$ go get golang.org/x/tour/gotour
+
This will place the gotour binary in your workspace's bin directory.
-Also available as a
-screencast, this doc
-explains how to use the go command to fetch, build, and
-install packages, commands, and run tests.
+{{if not $.GoogleCN}}
+Also available as a screencast, this
+{{else}}
+This
+{{end}}
+doc explains how to use the go command
+to fetch, build, and install packages, commands, and run tests.
vim: vim-go plugin provides Go programming language support
Visual Studio Code:
Go extension provides support for the Go programming language
-
Gogland: Gogland is distributed either as a standalone IDE
-or as a plugin for the commercial IntelliJ Platform IDEs
+
GoLand: GoLand is distributed either as a standalone IDE
+or as a plugin for IntelliJ IDEA Ultimate
Atom: Go-Plus is an Atom package that provides enhanced Go support
@@ -44,7 +44,7 @@ The following feature matrix lists and compares the most significant features.
vim
Visual Studio Code
-
Gogland
+
GoLand
Atom
@@ -159,7 +159,7 @@ The following feature matrix lists and compares the most significant features.
Auto generate tests for packages, files and identifiers
No
Yes
-
No
+
Yes
No
diff --git a/doc/editors/gogland.png b/doc/editors/goland.png
similarity index 100%
rename from doc/editors/gogland.png
rename to doc/editors/goland.png
diff --git a/doc/effective_go.html b/doc/effective_go.html
index bc70b0c8e3f..61de824fcd2 100644
--- a/doc/effective_go.html
+++ b/doc/effective_go.html
@@ -1431,9 +1431,7 @@ func Append(slice, data []byte) []byte {
slice = newSlice
}
slice = slice[0:l+len(data)]
- for i, c := range data {
- slice[l+i] = c
- }
+ copy(slice[l:], data)
return slice
}
@@ -1521,7 +1519,7 @@ for i := range picture {
Maps are a convenient and powerful built-in data structure that associate
values of one type (the key) with values of another type
-(the element or value)
+(the element or value).
The key can be of any type for which the equality operator is defined,
such as integers,
floating point and complex numbers,
@@ -2792,7 +2790,7 @@ job := &Job{command, log.New(os.Stderr, "Job: ", log.Ldate)}
If we need to refer to an embedded field directly, the type name of the field,
ignoring the package qualifier, serves as a field name, as it did
-in the Read method of our ReaderWriter struct.
+in the Read method of our ReadWriter struct.
Here, if we needed to access the
*log.Logger of a Job variable job,
we would write job.Logger,
diff --git a/doc/gccgo_install.html b/doc/gccgo_install.html
index 4f6a911541f..d4eac12f11d 100644
--- a/doc/gccgo_install.html
+++ b/doc/gccgo_install.html
@@ -59,10 +59,17 @@ should not be visible to Go programs.
-The GCC 7 releases are expected to include a complete implementation
-of the Go 1.8 user libraries. As with earlier releases, the Go 1.8
-runtime is not fully merged, but that should not be visible to Go
-programs.
+The GCC 7 releases include a complete implementation of the Go 1.8.1
+user libraries. As with earlier releases, the Go 1.8 runtime is not
+fully merged, but that should not be visible to Go programs.
+
+
+
+The GCC 8 releases are expected to include a complete implementation
+of the Go 1.10 release, depending on release timing. The Go 1.10
+runtime has now been fully merged into the GCC development sources,
+and concurrent garbage collection is expected to be fully supported in
+GCC 8.
+ Go 1.10 is not yet released. These are work-in-progress
+ release notes. Go 1.10 is expected to be released in February 2018.
+
+
+
+The latest Go release, version 1.10, arrives six months after go1.9.
+Most of its changes are in the implementation of the toolchain, runtime, and libraries.
+As always, the release maintains the Go 1 promise of compatibility.
+We expect almost all Go programs to continue to compile and run as before.
+
+
+
+OVERVIEW HERE
+
+
+
Changes to the language
+
+
+There are no substantive changes to the language.
+
+
+
+A corner case involving shifts by untyped constants has been clarified,
+and as a result the compilers have been updated to allow the index expression
+x[1.0<<s] where s is an untyped constant;
+the go/types package already did.
+
+
+
+The grammar for method expressions has been updated to relax the
+syntax to allow any type expression as a receiver;
+this matches what the compilers were already implementing.
+For example, struct{io.Reader}.Read is a valid, if unusual,
+method expression that the compilers already accepted and is
+now permitted by the language grammar.
+
+
+
Ports
+
+
+There are no new supported operating systems or processor architectures in this release.
+Most of the work has focused on strengthening the support for existing ports,
+in particular new instructions in the assembler
+and improvements to the code generated by the compilers.
+
+By default, the go tool creates its temporary files and directories
+in the system temporary directory (for example, $TMPDIR on Unix).
+If the new environment variable $GOTMPDIR is set,
+the go tool will creates its temporary files and directories in that directory instead.
+
+
+
Build & Install
+
+
+The gobuild command now detects out-of-date packages
+purely based on the content of source files, specified build flags, and metadata stored in the compiled packages.
+Modification times are no longer consulted or relevant.
+The old advice to add -a to force a rebuild in cases where
+the modification times were misleading for one reason or another
+(for example, changes in build flags) is no longer necessary:
+builds now always detect when packages must be rebuilt.
+(If you observe otherwise, please file a bug.)
+
+
+
+The gobuild-asmflags, -gcflags, -gccgoflags, and -ldflags options
+now apply by default only to the packages listed directly on the command line.
+For example, gobuild-gcflags=-mmypkg
+passes the compiler the -m flag when building mypkg
+but not its dependencies.
+The new, more general form -asmflags=pattern=flags (and similarly for the others)
+applies the flags only to the packages matching the pattern.
+For example: goinstall-ldflags=cmd/gofmt=-X=main.version=1.2.3cmd/...
+installs all the commands matching cmd/... but only applies the -X option
+to the linker flags for cmd/gofmt.
+For more details, see gohelpbuild.
+
+
+
+The gobuild command now maintains a cache of
+recently built packages, separate from the installed packages in $GOROOT/pkg or $GOPATH/pkg.
+The effect of the cache should be to speed builds that do not explicitly install packages
+or when switching between different copies of source code (for example, when changing
+back and forth between different branches in a version control system).
+The old advice to add the -i flag for speed, as in gobuild-i
+or gotest-i,
+is no longer necessary: builds run just as fast without -i.
+For more details, see gohelpcache.
+
+
+
+The go install command now installs only the
+packages and commands listed directly on the command line.
+For example, goinstallcmd/gofmt
+installs the gofmt program but not any of the packages on which it depends.
+The new build cache makes future commands still run as quickly as if the
+dependencies had been installed.
+To force the installation of dependencies, use the new
+goinstall-i flag.
+Installing dependencies should not be necessary in general,
+and the very concept or installed packages may disappear in a future release.
+
+
+
+Many details of the gobuild implementation have changed to support these improvements.
+One new requirement implied by these changes is that
+binary-only packages must now declare accurate import blocks in their
+stub source code, so that those imports can be made available when
+linking a program using the binary-only package.
+For more details, see gohelpfiletype.
+
+
+
Test
+
+
+The gotest command now caches test results:
+if the test executable and command line match a previous run
+and the files and environment variables consulted by that run
+have not changed either, gotest will print
+the previous test output, replacing the elapsed time with the string “(cached).”
+Test caching applies only to successful test results;
+only to gotest
+commands with an explicit list of packages; and
+only to command lines using a subset of the
+-cpu, -list, -parallel,
+-run, -short, and -v test flags.
+The idiomatic way to bypass test caching is to use -count=1.
+
+
+
+The gotest command now automatically runs
+govet on the package being tested,
+to identify significant problems before running the test.
+Any such problems are treated like build errors and prevent execution of the test.
+Only a high-confidence subset of the available govet
+checks are enabled for this automatic check.
+To disable the running of govet, use
+gotest-vet=off.
+
+
+
+The gotest-coverpkg flag now
+interprets its argument as a comma-separated list of patterns to match against
+the dependencies of each test, not as a list of packages to load anew.
+For example, gotest-coverpkg=all
+is now a meaningful way to run a test with coverage enabled for the test package
+and all its dependencies.
+Also, the gotest-coverprofile option is now
+supported when running multiple tests.
+
+
+
+In case of failure due to timeout, tests are now more likely to write their profiles before exiting.
+
+
+
+The gotest command now always
+merges the standard output and standard error from a given test binary execution
+and writes both to gotest's standard output.
+In past releases, gotest only applied this
+merging most of the time.
+
+
+
+The gotest-v output
+now includes PAUSE and CONT status update
+lines to make clearer when parallel tests pause and continue.
+
+
+
+Finally, the new gotest-json flag
+filters test output through the new command
+gotooltest2json
+to produce a machine-readable JSON-formatted description of test execution.
+This should allow the creation of rich presentations of test execution
+in IDEs and other tools.
+
+Cgo now implements a C typedef like “typedefXY;” using a Go type alias,
+so that Go code may use the types C.X and C.Y interchangeably.
+It also now supports the use of niladic function-like macros.
+Also, the documentation has been updated to clarify that
+Go structs and Go arrays are not supported in the type signatures of cgo-exported functions.
+
+
+
+During toolchain bootstrap, the environment variables CC and CC_FOR_TARGET specify
+the default C compiler that the resulting toolchain will use for host and target builds, respectively.
+However, if the toolchain will be used with multiple targets, it may be necessary to specify a different C compiler for each
+(for example, a different compiler for darwin/arm64 versus linux/ppc64le).
+The new set of environment variables CC_FOR_goos_goarch
+allows specifying a different default C compiler for each target.
+Note that these variables only apply during toolchain bootstrap,
+to set the defaults used by the resulting toolchain.
+Later gobuild commands refer to the CC environment
+variable or else the built-in default.
+For more details, see the cgo documentation.
+
+
+
Doc
+
+
+The godoc tool now adds functions returning slices of T or *T
+to the display of type T, similar to the existing behavior for functions returning single T or *T results.
+For example:
+
+
+
+$ go doc mail.Address
+package mail // import "net/mail"
+
+type Address struct {
+ Name string
+ Address string
+}
+ Address represents a single mail address.
+
+func ParseAddress(address string) (*Address, error)
+func ParseAddressList(list string) ([]*Address, error)
+func (a *Address) String() string
+$
+
+
+
+Previously, ParseAddressList was only shown in the package overview (godocmail).
+
+
+
Fix
+
+
+The gofix tool now replaces imports of "golang.org/x/net/context"
+with "context".
+(Forwarding aliases in the former make it completely equivalent to the latter when using Go 1.9 or later.)
+
+
+
Get
+
+
+The goget command now supports Fossil source code repositories.
+
+
+
Pprof
+
+
+The blocking and mutex profiles produced by the runtime/pprof package
+now include symbol information, so they can be viewed
+in gotoolpprof
+without the binary that produced the profile.
+(All other profile types were changed to include symbol information in Go 1.9.)
+
+The govet command now always has access to
+complete, up-to-date type information when checking packages, even for packages using cgo or vendored imports.
+The reports should be more accurate as a result.
+Note that only govet has access to this information;
+the more low-level gotoolvet does not
+and should be avoided except when working on vet itself.
+(As of Go 1.9, govet provides access to all the same flags as
+gotoolvet.)
+
+A few minor details of the default formatting of Go source code have changed.
+First, some complex three-index slice expressions previously formatted like
+x[i+1:j:k] and now
+format with more consistent spacing: x[i+1:j:k].
+Second, single-method interface literals written on a single line,
+which are sometimes used in type assertions,
+are no longer split onto multiple lines.
+Third, blank lines following an opening brace are now always elided.
+
+
+
+Note that these kinds of minor updates to gofmt are expected from time to time.
+In general, we recommend against building systems that check that source code
+matches the output of a specific version of gofmt.
+For example, a continuous integration test that fails if any code already checked into
+a repository is not “properly formatted” is inherently fragile and not recommended.
+
+
+
+If multiple programs must agree about which version of gofmt is used to format a source file,
+we recommend that they do this by arranging to invoke the same gofmt binary.
+For example, in the Go open source repository, we arrange for goimports and
+our Git pre-commit hook to agree about source code formatting by having both
+invoke the gofmt binary found in the current path.
+TODO: Make goimports actually do that. #22695.
+As another example, inside Google we arrange that source code presubmit
+checks run a gofmt binary maintained at a fixed path in a shared, distributed file system;
+that on engineering workstations /usr/bin/gofmt
+is a symbolic link to that same path;
+and that all editor integrations used for Google development
+explicitly invoke /usr/bin/gofmt.
+TODO: TMI?
+
+
+
Compiler Toolchain
+
+
+The compiler includes many improvements to the performance of generated code,
+spread fairly evenly across the supported architectures.
+
+
+
+TODO: What to say about DWARF work, if anything?
+Global constants (CL 61019), variable decomposition (CL 50878), variable liveness and location lists (CL 41770), more?
+What is enabled by default?
+
+
+
+TODO: What to say about FMA, if anything?
+The spec change was mentioned in Go 1.9 but I am not sure whether any new architectures turned it on in Go 1.10.
+
+
+
+The various build modes
+has been ported to more systems.
+Specifically, c-shared now works on linux/ppc64le, windows/386, and windows/amd64;
+pie now works on darwin/amd64 and also forces the use of external linking on all systems;
+and plugin now works on linux/ppc64le.
+
+
+
+The linux/ppc64le port now requires the use of external linking
+with any programs that use cgo, even uses by the standard library.
+
+
+
Assembler
+
+
+For the ARM 32-bit port, the assembler now supports the instructions
+BFC,
+BFI,
+BFX,
+BFXU,
+FMULAD,
+FMULAF,
+FMULSD,
+FMULSF,
+FNMULAD,
+FNMULAF,
+FNMULSD,
+FNMULSF,
+MULAD,
+MULAF,
+MULSD,
+MULSF,
+NMULAD,
+NMULAF,
+NMULD,
+NMULF,
+NMULSD,
+NMULSF,
+XTAB,
+XTABU,
+XTAH,
+and
+XTAHU.
+
+
+
+For the ARM 64-bit port, the assembler now supports the
+VADD,
+VADDP,
+VADDV,
+VAND,
+VCMEQ,
+VDUP,
+VEOR,
+VLD1,
+VMOV,
+VMOVI,
+VMOVS,
+VORR,
+VREV32,
+and
+VST1
+instructions.
+
+
+
+For the PowerPC 64-bit port, the assembler now supports the POWER9 instructions
+ADDEX,
+CMPEQB,
+COPY,
+DARN,
+LDMX,
+MADDHD,
+MADDHDU,
+MADDLD,
+MFVSRLD,
+MTVSRDD,
+MTVSRWS,
+PASTECC,
+VCMPNEZB,
+VCMPNEZBCC,
+and
+VMSUMUDM.
+
+
+
+For the S390X port, the assembler now supports the
+TMHH,
+TMHL,
+TMLH,
+and
+TMLL
+instructions.
+
+
+
+For the X86 64-bit port, the assembler now supports 359 new instructions
+and is believed to be complete up to and including the Intel AVX-256 extensions.
+The assembler also no longer implements MOVL$0,AX
+as an XORL instruction,
+to avoid clearing the condition flags unexpectedly.
+
+
+
Gccgo
+
+
+TODO: Words about GCC 8 and Go 1.10.
+
+
+
Runtime
+
+
+TODO: Don't start new threads from locked threads or threads that Go did not create. LockOSThread/UnlockOSThread now nest. LockOSThread + return kills the thread
+
+
+
+Stack traces no longer include implicit wrapper functions (previously marked <autogenerated>),
+unless a fault or panic happens in the wrapper itself.
+
+
+
+There is no longer a limit on the GOMAXPROCS setting.
+(In Go 1.9 the limit was 1024.)
+
+
+
Performance
+
+
+As always, the changes are so general and varied that precise
+statements about performance are difficult to make. Most programs
+should run a bit faster, due to speedups in the garbage collector,
+better generated code, and optimizations in the core library.
+
+
+
Garbage Collector
+
+
+TODO: Anything?
+
+
+
Core library
+
+
+All of the changes to the standard library are minor.
+The changes in bytes
+and net/url are the most likely to require updating of existing programs.
+
+
+
Minor changes to the library
+
+
+As always, there are various minor changes and updates to the library,
+made with the Go 1 promise of compatibility
+in mind.
+
+In general, the handling of special header formats is significantly improved and expanded.
+
+
+FileInfoHeader has always
+recorded the Unix UID and GID numbers from its os.FileInfo argument
+(specifically, from the system-dependent information returned by the FileInfo's Sys method)
+in the returned Header.
+Now it also records the user and group names corresponding to those IDs,
+as well as the major and minor device numbers for device files.
+
+
+Errors created by the package now begin with a consistent “tar:” prefix.
+(Previously they almost all began with a consistent “archive/tar:” prefix.)
+TODO: Why are we changing these? (#22740)
+
+
+The new Header.Format field
+of type Format
+controls which tar header format the Writer uses.
+The default, as before, is to select the most widely-supported header type
+that can encoding the fields needed by the header (USTAR if possible, or else PAX if possible, or else GNU).
+The Reader sets Header.Format for each header it reads.
+
+
+Reader and the Writer now support PAX records,
+using the new Header.PAXRecords field.
+
+
+The Reader no longer insists that the file name or link name in GNU headers
+be valid UTF-8.
+
+
+When writing PAX- or GNU-format headers, the Writer now includes
+the Header.AccessTime and Header.ChangeTime fields (if set).
+When writing PAX-format headers, the times include sub-second precision.
+
+
+The Writer.Flush method,
+which has had no real effect since Go 1.1, is now marked deprecated.
+
+Go 1.10 adds more complete support for times and character set encodings in ZIP archives.
+
+
+The original ZIP format used the standard MS-DOS encoding of year, month, day, hour, minute, and second into fields in two 16-bit values.
+That encoding cannot represent time zones or odd seconds, so multiple extensions have been
+introduced to allow richer encodings.
+In Go 1.10, the Reader and Writer
+now support the widely-understood Info-Zip extension that encodes the time separately in the 32-bit Unix “seconds since epoch” form.
+The FileHeader's new Modified field of type time.Time
+obsoletes the ModifiedTime and ModifiedDate fields, which continue to hold the MS-DOS encoding.
+The ModTime and
+SetModTime methods
+now simply read and write the new Modified field.
+The Reader and Writer now adopt the common
+convention that ZIP archive storing the Unix time encoding store the local time
+in the MS-DOS field, so that the time zone offset can be inferred.
+TODO: These last bits are not true but probably should be (#22738)
+
+
+The header for each file in a ZIP archive has a flag bit indicating whether
+the name and comment fields are encoded as UTF-8, as opposed to a system-specific default encoding.
+In Go 1.8 and earlier, the Writer never set the UTF-8 bit.
+In Go 1.9, the Writer changed to set the UTF-8 bit almost always.
+This broke the creation of ZIP archives containing Shift-JIS file names.
+In Go 1.10, the Writer now sets the UTF-8 bit only when
+both the name and the comment field are valid UTF-8 and at least one is non-ASCII.
+Because non-ASCII encodings very rarely look like valid UTF-8, the new
+heuristic should be correct nearly all the time.
+Setting a FileHeader's new NonUTF8 field to true
+disables the heuristic entirely for that file.
+
+
+The Writer also now support setting the end-of-central-directory record's comment field,
+by setting the Writer's new Comment field
+before calling the Close method.
+TODO: May change (#22737).
+
+The
+Fields,
+FieldsFunc,
+Split,
+and
+SplitAfter
+each already returned slices pointing into the same underlying array as its input.
+Go 1.10 changes each of the returned subslices to have capacity equal to its length,
+so that appending to a subslice will not overwrite adjacent data in the original input.
+
+NewOFB now panics if given
+an initialization vector of incorrect length, like the other constructors in the
+package always have.
+(Previously it returned a nil Stream implementation.)
+
+The TLS server now advertises support for SHA-512 signatures when using TLS 1.2.
+The server already supported the signatures, but some clients would not select
+them unless explicitly advertised.
+
+Leaf certificate validation now enforces the name constraints for all
+names contained in the certificate, not just the one name that a client has asked about.
+Extended key usage restrictions are similarly now checked all at once.
+As a result, after a certificate has been validated, now it can be trusted in its entirety.
+It is no longer necessary to revalidate the certificate for each additional name
+or key usage.
+TODO: Link to docs that may not exist yet.
+
+
+
+Parsed certificates also now report URI names and IP, email, and URI constraints, using the new
+Certificate fields
+URIs, PermittedIPRanges, ExcludedIPRanges,
+PermittedEmailAddresses, ExcludedEmailAddresses,
+PermittedURIDomains, and ExcludedURIDomains.
+
+
+
+The new MarshalPKCS8PrivateKey
+function converts a private key to PKCS#8 encoded form.
+
+Drivers that want to construct a sql.DB for
+their clients can now implement the Connector interface
+and call the new sql.OpenDB function,
+instead of needing to encode all configuration into a string
+passed to sql.Open.
+
+
+Drivers that implement ExecerContext
+no longer need to implement Execer;
+similarly, drivers that implement QueryerContext
+no longer need to implement Queryer.
+Previously, even if the context-based interfaces were implemented they were ignored
+unless the non-context-based interfaces were also implemented.
+
+
+To allow drivers to better isolate different clients using a cached driver connection in succession,
+if a Conn implements the new
+SessionResetter interface,
+database/sql will now call ResetSession before
+reusing the Conn for a new client.
+
+Go 1.10 also adds support for the LC_RPATH load command,
+represented by the types
+RpathCmd and
+Rpath,
+and new named constants
+for the various flag bits found in headers.
+
+Marshal now correctly encodes
+strings containing asterisks as type UTF8String instead of PrintableString,
+unless the string is in a struct field with a tag forcing the use of PrintableString.
+Marshal also now respects struct tags containing application directives.
+
+
+Unmarshal now respects
+struct field tags using the explicit and tag
+directives.
+
+Reader now disallows the use of
+nonsensical Comma and Comment settings,
+such as NUL, carriage return, newline, invalid runes, and the Unicode replacement character,
+or setting Comma and Comment equal to each other.
+
+
+In the case of a syntax error in a CSV record that spans multiple input lines, Reader
+now reports the line on which the record started in the ParseError's new StartLine field.
+
+
+Reader also no longer strips carriage return characters
+appearing before newline characters in multiline quoted strings.
+TODO: Maybe not (#22746).
+
+When the functions
+Decode
+and
+DecodeString
+encounter malformed input,
+they each now return the number of bytes already converted
+along with the error.
+Previously they always returned a count of 0 with any error.
+
+The Decoder
+adds a new method
+DisallowUnknownFields
+that causes it to report inputs with unknown JSON fields as a decoding error.
+(The default behavior has always been to discard unknown fields.)
+
+The new function
+NewTokenDecoder
+is like
+NewDecoder
+but creates a decoder reading from a TokenReader
+instead of an XML-formatted byte stream.
+This is meant to enable the construction of XML stream transformers in client libraries.
+
+The default
+Usage function now prints
+its first line of output to
+CommandLine.Output()
+instead of assuming os.Stderr,
+so that the usage message is properly redirected for
+clients using CommandLine.SetOutput.
+
+
+PrintDefaults now
+adds appropriate indentation after newlines in flag usage strings,
+so that multi-line usage strings display nicely.
+
+To support the doc change described above,
+functions returning slices of T, *T, **T, and so on
+are now reported in T's Type's Funcs list,
+instead of in the Package's Funcs list.
+
+The changes to the default formatting of Go source code
+discussed in the gofmt section above
+are implemented in the go/printer package
+and also affect the output of the higher-level go/format package.
+
+Int now supports conversions to and from bases 2 through 62
+in its SetString and Text methods.
+(Previously it only allowed bases 2 through 36.)
+The value of the constant MaxBase has been updated.
+
+
+Int adds a new
+CmpAbs method
+that is like Cmp but
+compares only the absolute values (not the signs) of its arguments.
+
+
+Float adds a new
+Sqrt method to
+compute square roots.
+
+The existing function and corresponding
+Rand.Perm method
+have been updated to use a more efficient algorithm, with the result
+that the specific permutations they return have changed.
+TODO: Remove? (#22744)
+
+The new functions
+Round
+and
+RoundToEven
+round their arguments to the nearest integer;
+Round rounds a half-integer to its larger integer neighbor (away from zero)
+while RoundToEven rounds a half-integer its even integer neighbor.
+
+
+
+The new functions
+Erfinv
+and
+Erfcinv
+compute the inverse error function and the
+inverse complementary error function.
+
+The Conn and
+Listener implementations
+in this package now guarantee that when Close returns,
+the underlying file descriptor has been closed.
+(In earlier releases, if the Close stopped pending I/O
+in other goroutines, the closing of the file descriptor could happen in one of those
+goroutines shortly after Close returned.)
+
+On the client side, an HTTP proxy (most commonly configured by
+ProxyFromEnvironment)
+can now be specified as an https:// URL,
+meaning that the client connects to the proxy over HTTPS before issuing a standard, proxied HTTP request.
+(Previously, HTTP proxy URLs were required to begin with http:// or socks5://.)
+
+
+On the server side, FileServer and its single-file equivalent ServeFile
+now apply If-Range checks to HEAD requests.
+FileServer also now reports directory read failures to the Server's ErrorLog.
+
+
+Redirect now sets the Content-Type header before writing its HTTP response.
+
+The Client adds a new
+Noop method,
+to test whether the server is still responding.
+It also now defends against possible SMTP injection in the inputs
+to the Hello
+and Verify methods.
+
+ResolveReference
+now preseves multiple leading slashes in the target URL.
+Previously it rewrote multiple leading slashes to a single slash,
+which resulted in the http.Client
+following certain redirects incorrectly.
+
+Note the doubled slashes around path.
+In Go 1.9 and earlier, the resolved URL was http://host/path//to/page2:
+the doubled slash before path was incorrectly rewritten
+to a single slash, while the doubled slash after path was
+correctly preserved.
+Go 1.10 preserves both doubled slashes, resolving to http://host//path//to/page2
+as required by RFC 3986.
+
+
+
This change may break existing buggy programs that unintentionally
+construct a base URL with a leading doubled slash in the path and inadvertently
+depend on ResolveReference to correct that mistake.
+For example, this can happen if code adds a host prefix
+like http://host/ to a path like /my/api,
+resulting in a URL with a doubled slash: http://host//my/api.
+
+File adds new methods
+SetDeadline,
+SetReadDeadline,
+and
+SetWriteDeadline
+that allow setting I/O deadlines when the
+underlying file descriptor supports non-blocking I/O operations.
+The definition of these methods matches those in net.Conn.
+
+
+
+Also matching net.Conn,
+File's
+Close method
+now guarantee that when Close returns,
+the underlying file descriptor has been closed.
+(In earlier releases, like for net.Conn's,
+if the Close stopped pending I/O
+in other goroutines, the closing of the file descriptor could happen in one of those
+goroutines shortly after Close returned.)
+
+
+
+On BSD, macOS, and Solaris systems,
+Chtimes
+now supports setting file times with nanosecond precision
+(assuming the underlying file system can represent them).
+
+As noted above, the blocking and mutex profiles
+now include symbol information so that they can be viewed without needing
+the binary that generated them.
+
+ParseUint now returns
+the maximum magnitude integer of the appropriate size
+with any ErrRange error, as it was already documented to do.
+Previously it returned 0 with ErrRange errors.
+
+A new type
+Builder is a replacement for
+bytes.Buffer for the use case of
+accumulating text into a string result.
+The Builder's API is a restricted subset of bytes.Buffer's
+that allows it to safely avoid making a duplicate copy of the data
+during the String method.
+
+LoadLocation now uses the directory
+or uncompressed zip file named by the $ZONEINFO
+environment variable before looking in the default system-specific list of
+known installation locations or in $GOROOT/lib/time/zoneinfo.zip.
+
+The unicode package and associated
+support throughout the system has been upgraded from version 9.0 to
+Unicode 10.0,
+which adds 8,518 new characters, including four new scripts, one new property,
+a Bitcoin currency symbol, and 56 new emoji.
+
+
diff --git a/doc/go1.9.html b/doc/go1.9.html
index 222e0e9ba0f..fa50ae78059 100644
--- a/doc/go1.9.html
+++ b/doc/go1.9.html
@@ -719,6 +719,11 @@ version of gccgo.
header when matching handlers. The host is matched unmodified for CONNECT requests.
+
Server.WriteTimeout
now applies to HTTP/2 connections and is enforced per-stream.
diff --git a/doc/go_faq.html b/doc/go_faq.html
index f8322efcd32..62349fe5f7d 100644
--- a/doc/go_faq.html
+++ b/doc/go_faq.html
@@ -1476,6 +1476,53 @@ For more detail on this topic see the talk entitled,
Concurrency
is not Parallelism.
+
+Why is there no goroutine ID?
+
+
+Goroutines do not have names; they are just anonymous workers.
+They expose no unique identifier, name, or data structure to the programmer.
+Some people are surprised by this, expecting the go
+statement to return some item that can be used to access and control
+the goroutine later.
+
+
+
+The fundamental reason goroutines are anonymous is so that
+the full Go language is available when programming concurrent code.
+By contrast, the usage patterns that develop when threads and goroutines are
+named can restrict what a library using them can do.
+
+
+
+Here is an illustration of the difficulties.
+Once one names a goroutine and constructs a model around
+it, it becomes special, and one is tempted to associate all computation
+with that goroutine, ignoring the possibility
+of using multiple, possibly shared goroutines for the processing.
+If the net/http package associated per-request
+state with a goroutine,
+clients would be unable to use more goroutines
+when serving a request.
+
+
+
+Moreover, experience with libraries such as those for graphics systems
+that require all processing to occur on the "main thread"
+has shown how awkward and limiting the approach can be when
+deployed in a concurrent language.
+The very existence of a special thread or goroutine forces
+the programmer to distort the program to avoid crashes
+and other problems caused by inadvertently operating
+on the wrong thread.
+
+
+
+For those cases where a particular goroutine is truly special,
+the language provides features such as channels that can be
+used in flexible ways to interact with it.
+
+
Functions and Methods
diff --git a/doc/go_spec.html b/doc/go_spec.html
index 6642869d0c5..ebf1cefffea 100644
--- a/doc/go_spec.html
+++ b/doc/go_spec.html
@@ -1,6 +1,6 @@
@@ -16,8 +16,7 @@ Go is a general-purpose language designed with systems programming
in mind. It is strongly typed and garbage-collected and has explicit
support for concurrent programming. Programs are constructed from
packages, whose properties allow efficient management of
-dependencies. The existing implementations use a traditional
-compile/link model to generate executable binaries.
+dependencies.
@@ -577,11 +576,7 @@ or conversion, or implicitly when used in a
assignment or as an
operand in an expression.
It is an error if the constant value
-cannot be represented as a value of the respective type.
-For instance, 3.0 can be given any integer or any
-floating-point type, while 2147483648.0 (equal to 1<<31)
-can be given the types float32, float64, or uint32 but
-not int32 or string.
+cannot be represented as a value of the respective type.
@@ -765,7 +760,8 @@ using a receiver of that type.
A boolean type represents the set of Boolean truth values
denoted by the predeclared constants true
-and false. The predeclared boolean type is bool.
+and false. The predeclared boolean type is bool;
+it is a defined type.
Numeric types
@@ -812,8 +808,9 @@ uintptr an unsigned integer large enough to store the uninterpreted bits of a p
-To avoid portability issues all numeric types are distinct except
-byte, which is an alias for uint8, and
+To avoid portability issues all numeric types are defined
+types and thus distinct except
+byte, which is an alias for uint8, and
rune, which is an alias for int32.
Conversions
are required when different numeric types are mixed in an expression
@@ -829,7 +826,8 @@ A string type represents the set of string values.
A string value is a (possibly empty) sequence of bytes.
Strings are immutable: once created,
it is impossible to change the contents of a string.
-The predeclared string type is string.
+The predeclared string type is string;
+it is a defined type.
@@ -861,7 +859,8 @@ ElementType = Type .
The length is part of the array's type; it must evaluate to a
-non-negative constant representable by a value
+non-negative constant
+representable by a value
of type int.
The length of array a can be discovered
using the built-in function len.
@@ -1514,7 +1513,7 @@ are different because B0 is different from []string.
A value x is assignable to a variable of type T
-("x is assignable to T") in any of these cases:
+("x is assignable to T") if one of the following conditions applies:
@@ -1540,12 +1539,68 @@ and at least one of V or T is not a defined type.
is a pointer, function, slice, map, channel, or interface type.
+T is a floating-point type and x can be rounded to T's
+precision without overflow. Rounding uses IEEE 754 round-to-even rules but with an IEEE
+negative zero further simplified to an unsigned zero. Note that constant values never result
+in an IEEE negative zero, NaN, or infinity.
+
+
+
+T is a complex type, and x's
+componentsreal(x) and imag(x)
+are representable by values of T's component type (float32 or
+float64).
+
+
+
+
+x T x is representable by a value of T because
+
+'a' byte 97 is in the set of byte values
+97 rune rune is an alias for int32, and 97 is in the set of 32-bit integers
+"foo" string "foo" is in the set of string values
+1024 int16 1024 is in the set of 16-bit integers
+42.0 byte 42 is in the set of unsigned 8-bit integers
+1e10 uint64 10000000000 is in the set of unsigned 64-bit integers
+2.718281828459045 float32 2.718281828459045 rounds to 2.7182817 which is in the set of float32 values
+-1e-1000 float64 -1e-1000 rounds to IEEE -0.0 which is further simplified to 0.0
+0i int 0 is an integer value
+(42 + 0i) float32 42.0 (with zero imaginary part) is in the set of float32 values
+
+
+
+x T x is not representable by a value of T because
+
+0 bool 0 is not in the set of boolean values
+'a' string 'a' is a rune, it is not in the set of string values
+1024 byte 1024 is not in the set of unsigned 8-bit integers
+-1 uint16 -1 is not in the set of unsigned 16-bit integers
+1.1 int 1.1 is not an integer value
+42i float32 (0 + 42i) is not in the set of float32 values
+1e1000 float64 1e1000 overflows to IEEE +Inf after rounding
+
+
+
Blocks
@@ -1781,7 +1836,7 @@ const u, v float32 = 0, 3 // u = 0.0, v = 3.0
Within a parenthesized const declaration list the
-expression list may be omitted from any but the first declaration.
+expression list may be omitted from any but the first ConstSpec.
Such an empty list is equivalent to the textual substitution of the
first preceding non-empty expression list and its type if any.
Omitting the list of expressions is therefore equivalent to
@@ -1810,52 +1865,51 @@ const (
Within a constant declaration, the predeclared identifier
iota represents successive untyped integer
-constants. It is reset to 0 whenever the reserved word const
-appears in the source and increments after each ConstSpec.
+constants. Its value is the index of the respective ConstSpec
+in that constant declaration, starting at zero.
It can be used to construct a set of related constants:
-const ( // iota is reset to 0
+const (
c0 = iota // c0 == 0
c1 = iota // c1 == 1
c2 = iota // c2 == 2
)
-const ( // iota is reset to 0
- a = 1 << iota // a == 1
- b = 1 << iota // b == 2
- c = 3 // c == 3 (iota is not used but still incremented)
- d = 1 << iota // d == 8
+const (
+ a = 1 << iota // a == 1 (iota == 0)
+ b = 1 << iota // b == 2 (iota == 1)
+ c = 3 // c == 3 (iota == 2, unused)
+ d = 1 << iota // d == 8 (iota == 3)
)
-const ( // iota is reset to 0
+const (
u = iota * 42 // u == 0 (untyped integer constant)
v float64 = iota * 42 // v == 42.0 (float64 constant)
w = iota * 42 // w == 84 (untyped integer constant)
)
-const x = iota // x == 0 (iota has been reset)
-const y = iota // y == 0 (iota has been reset)
+const x = iota // x == 0
+const y = iota // y == 0
-Within an ExpressionList, the value of each iota is the same because
-it is only incremented after each ConstSpec:
+By definition, multiple uses of iota in the same ConstSpec all have the same value:
-This last example exploits the implicit repetition of the
-last non-empty expression list.
+This last example exploits the implicit repetition
+of the last non-empty expression list.
@@ -1946,7 +2000,7 @@ func (m *Mutex) Unlock() { /* Unlock implementation */ }
// NewMutex has the same composition as Mutex but its method set is empty.
type NewMutex Mutex
-// The method set of the base type of PtrMutex remains unchanged,
+// The method set of PtrMutex's underlying type *Mutex remains unchanged,
// but the method set of PtrMutex is empty.
type PtrMutex *Mutex
@@ -2224,7 +2278,6 @@ non-blank identifier denoting a
constant,
variable, or
function,
-a method expression yielding a function,
or a parenthesized expression.
@@ -2234,7 +2287,7 @@ operand only on the left-hand side of an assignment.
-Operand = Literal | OperandName | MethodExpr | "(" Expression ")" .
+Operand = Literal | OperandName | "(" Expression ")" .
Literal = BasicLit | CompositeLit | FunctionLit .
BasicLit = int_lit | float_lit | imaginary_lit | rune_lit | string_lit .
OperandName = identifier | QualifiedIdent.
@@ -2348,7 +2401,8 @@ For array and slice literals the following rules apply:
its position in the array.
An element with a key uses the key as its index. The
- key must be a non-negative constant representable by
+ key must be a non-negative constant
+ representable by
a value of type int; and if it is typed
it must be of integer type.
@@ -2499,6 +2553,7 @@ Primary expressions are the operands for unary and binary expressions.
PrimaryExpr =
Operand |
Conversion |
+ MethodExpr |
PrimaryExpr Selector |
PrimaryExpr Index |
PrimaryExpr Slice |
@@ -2685,7 +2740,7 @@ argument that is the receiver of the method.
@@ -2921,11 +2976,12 @@ The following rules apply:
If a is not a map:
-
the index x must be of integer type or untyped;
- it is in range if 0 <= x < len(a),
+
the index x must be of integer type or an untyped constant
+
a constant index must be non-negative and
+ representable by a value of type int
+
a constant index that is untyped is given type int
+
the index x is in range if 0 <= x < len(a),
otherwise it is out of range
-
a constant index must be non-negative
- and representable by a value of type int
@@ -3075,7 +3131,8 @@ For arrays or strings, the indices are in range if
0 <= low <= high <= len(a),
otherwise they are out of range.
For slices, the upper index bound is the slice capacity cap(a) rather than the length.
-A constant index must be non-negative and representable by a value of type
+A constant index must be non-negative and
+representable by a value of type
int; for arrays or constant strings, constant indices must also be in range.
If both indices are constant, they must satisfy low <= high.
If the indices are out of range at run time, a run-time panic occurs.
@@ -3091,8 +3148,8 @@ and the result of the slice operation is a slice with the same element type as t
If the sliced operand of a valid slice expression is a nil slice, the result
-is a nil slice. Otherwise, the result shares its underlying array with the
-operand.
+is a nil slice. Otherwise, if the result is a slice, it shares its underlying
+array with the operand.
Full slice expressions
@@ -3135,7 +3192,8 @@ If the sliced operand is an array, it must be addre
The right operand in a shift expression must have unsigned integer type
-or be an untyped constant representable by a value of type uint.
+or be an untyped constant representable by a
+value of type uint.
If the left operand of a non-constant shift expression is an untyped constant,
it is first converted to the type it would assume if the shift expression were
replaced by its left operand alone.
@@ -3392,18 +3451,20 @@ replaced by its left operand alone.
var s uint = 33
-var i = 1<<s // 1 has type int
-var j int32 = 1<<s // 1 has type int32; j == 0
-var k = uint64(1<<s) // 1 has type uint64; k == 1<<33
-var m int = 1.0<<s // 1.0 has type int; m == 0 if ints are 32bits in size
-var n = 1.0<<s == j // 1.0 has type int32; n == true
-var o = 1<<s == 2<<s // 1 and 2 have type int; o == true if ints are 32bits in size
-var p = 1<<s == 1<<33 // illegal if ints are 32bits in size: 1 has type int, but 1<<33 overflows int
-var u = 1.0<<s // illegal: 1.0 has type float64, cannot shift
-var u1 = 1.0<<s != 0 // illegal: 1.0 has type float64, cannot shift
-var u2 = 1<<s != 1.0 // illegal: 1 has type float64, cannot shift
-var v float32 = 1<<s // illegal: 1 has type float32, cannot shift
-var w int64 = 1.0<<33 // 1.0<<33 is a constant shift expression
+var i = 1<<s // 1 has type int
+var j int32 = 1<<s // 1 has type int32; j == 0
+var k = uint64(1<<s) // 1 has type uint64; k == 1<<33
+var m int = 1.0<<s // 1.0 has type int; m == 0 if ints are 32bits in size
+var n = 1.0<<s == j // 1.0 has type int32; n == true
+var o = 1<<s == 2<<s // 1 and 2 have type int; o == true if ints are 32bits in size
+var p = 1<<s == 1<<33 // illegal if ints are 32bits in size: 1 has type int, but 1<<33 overflows int
+var u = 1.0<<s // illegal: 1.0 has type float64, cannot shift
+var u1 = 1.0<<s != 0 // illegal: 1.0 has type float64, cannot shift
+var u2 = 1<<s != 1.0 // illegal: 1 has type float64, cannot shift
+var v float32 = 1<<s // illegal: 1 has type float32, cannot shift
+var w int64 = 1.0<<33 // 1.0<<33 is a constant shift expression
+var x = a[1.0<<s] // 1.0 has type int; x == a[0] if ints are 32bits in size
+var a = make([]byte, 1.0<<s) // 1.0 has type int; len(a) == 0 if ints are 32bits in size
@@ -3877,30 +3938,14 @@ func() int(x) // x is converted to func() int (unambiguous)
A constant value x can be converted to
-type T in any of these cases:
+type T if x is representable
+by a value of T.
+As a special case, an integer constant x can be converted to a
+string type using the
+same rule
+as for non-constant x.
-
-
- x is representable by a value of type T.
-
-
- x is a floating-point constant,
- T is a floating-point type,
- and x is representable by a value
- of type T after rounding using
- IEEE 754 round-to-even rules, but with an IEEE -0.0
- further rounded to an unsigned 0.0.
- The constant T(x) is the rounded value.
-
-
- x is an integer constant and T is a
- string type.
- The same rule
- as for non-constant x applies in this case.
-
-
-
Converting a constant yields a typed constant as result.
@@ -4187,7 +4232,8 @@ The divisor of a constant division or remainder operation must not be zero:
-The values of typed constants must always be accurately representable as values
+The values of typed constants must always be accurately
+representable by values
of the constant type. The following constant expressions are illegal:
@@ -4820,8 +4866,9 @@ in the TypeSwitchGuard.
-The type in a case may be nil;
-that case is used when the expression in the TypeSwitchGuard
+Instead of a type, a case may use the predeclared identifier
+nil;
+that case is selected when the expression in the TypeSwitchGuard
is a nil interface value.
There may be at most one nil case.
@@ -4979,12 +5026,10 @@ the range clause is equivalent to the same clause without that identifier.
-The range expression is evaluated once before beginning the loop,
-with one exception: if the range expression is an array or a pointer to an array
-and at most one iteration variable is present, only the range expression's
-length is evaluated; if that length is constant,
-by definition
-the range expression itself will not be evaluated.
+The range expression x is evaluated once before beginning the loop,
+with one exception: if at most one iteration variable is present and
+len(x) is constant,
+the range expression is not evaluated.
@@ -5680,9 +5725,10 @@ make(T, n) channel buffered channel of type T, buffer size n
-The size arguments n and m must be of integer type or untyped.
-A constant size argument must be non-negative and
-representable by a value of type int.
+Each of the size arguments n and m must be of integer type
+or an untyped constant.
+A constant size argument must be non-negative and representable
+by a value of type int; if it is an untyped constant it is given type int.
If both n and m are provided and are constant, then
n must be no larger than m.
If n is negative or larger than m at run time,
@@ -6157,7 +6203,7 @@ of make,
and no explicit initialization is provided, the variable or value is
given a default value. Each element of such a variable or value is
set to the zero value for its type: false for booleans,
-0 for integers, 0.0 for floats, ""
+0 for numeric types, ""
for strings, and nil for pointers, functions, interfaces, slices, channels, and maps.
This initialization is done recursively, so for instance each element of an
array of structs will have its fields zeroed if no value is specified.
@@ -6409,7 +6455,8 @@ type Error interface {
Package unsafe
-The built-in package unsafe, known to the compiler,
+The built-in package unsafe, known to the compiler
+and accessible through the import path"unsafe",
provides facilities for low-level programming including operations
that violate the type system. A package using unsafe
must be vetted manually for type safety and may not be portable.
diff --git a/doc/help.html b/doc/help.html
index 057d75290ee..f668196871d 100644
--- a/doc/help.html
+++ b/doc/help.html
@@ -1,6 +1,7 @@
@@ -64,6 +68,7 @@ for Go news and discussion.
The Go Time podcast is a panel of Go experts and special guests
discussing the Go programming language, the community, and everything in between.
+{{end}}
Community resources
@@ -73,11 +78,13 @@ Each month in places around the world, groups of Go programmers ("gophers")
meet to talk about Go. Find a chapter near you.
+{{if not $.GoogleCN}}
diff --git a/doc/install-source.html b/doc/install-source.html
index d120f7d6f2b..17b6ed3ea12 100644
--- a/doc/install-source.html
+++ b/doc/install-source.html
@@ -143,12 +143,13 @@ packaged Go distribution.
To build a bootstrap tool chain from source, use
either the git branch release-branch.go1.4 or
-go1.4-bootstrap-20170531.tar.gz,
+go1.4-bootstrap-20171003.tar.gz,
which contains the Go 1.4 source code plus accumulated fixes
to keep the tools running on newer operating systems.
(Go 1.4 was the last distribution in which the tool chain was written in C.)
After unpacking the Go 1.4 source, cd to
-the src subdirectory and run make.bash (or,
+the src subdirectory, set CGO_ENABLED=0 in
+the environment, and run make.bash (or,
on Windows, make.bat).
@@ -471,8 +472,9 @@ Choices for $GOARCH are
amd64 (64-bit x86, the most mature port),
386 (32-bit x86), arm (32-bit ARM), arm64 (64-bit ARM),
ppc64le (PowerPC 64-bit, little-endian), ppc64 (PowerPC 64-bit, big-endian),
-mips64le (MIPS 64-bit, little-endian), and mips64 (MIPS 64-bit, big-endian).
-mipsle (MIPS 32-bit, little-endian), and mips (MIPS 32-bit, big-endian).
+mips64le (MIPS 64-bit, little-endian), mips64 (MIPS 64-bit, big-endian),
+mipsle (MIPS 32-bit, little-endian), mips (MIPS 32-bit, big-endian), and
+s390x (IBM System z 64-bit, big-endian).
The valid combinations of $GOOS and $GOARCH are:
@@ -536,6 +538,9 @@ The valid combinations of $GOOS and $GOARCH are:
Official binary
-distributions are available for the FreeBSD (release 8-STABLE and above),
+distributions are available for the FreeBSD (release 10-STABLE and above),
Linux, Mac OS X (10.8 and above), and Windows operating systems and
the 32-bit (386) and 64-bit (amd64) x86 processor
architectures.
@@ -47,7 +47,7 @@ If your OS or architecture is not on the list, you may be able to
Notes
-
FreeBSD 9.3 or later
amd64, 386
Debian GNU/kFreeBSD not supported
+
FreeBSD 10.3 or later
amd64, 386
Debian GNU/kFreeBSD not supported
Linux 2.6.23 or later with glibc
amd64, 386, arm, arm64, s390x, ppc64le
CentOS/RHEL 5.x not supported. Install from source for other libc.
macOS 10.8 or later
amd64
use the clang or gcc† that comes with Xcode‡ for cgo support
Windows XP SP2 or later
amd64, 386
use MinGW gcc†. No need for cygwin or msys.
diff --git a/doc/progs/run.go b/doc/progs/run.go
index 8479a66b675..06ea130d999 100644
--- a/doc/progs/run.go
+++ b/doc/progs/run.go
@@ -219,12 +219,5 @@ func fixcgo() {
// cgo1 and cgo2 don't run on netbsd, srandom has a different signature
skipTest("cgo1")
skipTest("cgo2")
- // cgo3 and cgo4 don't run on netbsd, since cgo cannot handle stdout correctly, see issue #10715.
- skipTest("cgo3")
- skipTest("cgo4")
- case "openbsd", "solaris":
- // cgo3 and cgo4 don't run on openbsd and solaris, since cgo cannot handle stdout correctly, see issue #10715.
- skipTest("cgo3")
- skipTest("cgo4")
}
}
diff --git a/misc/cgo/errors/errors_test.go b/misc/cgo/errors/errors_test.go
new file mode 100644
index 00000000000..118187f23b8
--- /dev/null
+++ b/misc/cgo/errors/errors_test.go
@@ -0,0 +1,161 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package errorstest
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+func path(file string) string {
+ return filepath.Join("src", file)
+}
+
+func check(t *testing.T, file string) {
+ t.Run(file, func(t *testing.T) {
+ t.Parallel()
+
+ contents, err := ioutil.ReadFile(path(file))
+ if err != nil {
+ t.Fatal(err)
+ }
+ var errors []*regexp.Regexp
+ for i, line := range bytes.Split(contents, []byte("\n")) {
+ if bytes.HasSuffix(line, []byte("ERROR HERE")) {
+ re := regexp.MustCompile(regexp.QuoteMeta(fmt.Sprintf("%s:%d:", file, i+1)))
+ errors = append(errors, re)
+ continue
+ }
+
+ frags := bytes.SplitAfterN(line, []byte("ERROR HERE: "), 2)
+ if len(frags) == 1 {
+ continue
+ }
+ re, err := regexp.Compile(string(frags[1]))
+ if err != nil {
+ t.Errorf("Invalid regexp after `ERROR HERE: `: %#q", frags[1])
+ continue
+ }
+ errors = append(errors, re)
+ }
+ if len(errors) == 0 {
+ t.Fatalf("cannot find ERROR HERE")
+ }
+ expect(t, file, errors)
+ })
+}
+
+func expect(t *testing.T, file string, errors []*regexp.Regexp) {
+ dir, err := ioutil.TempDir("", filepath.Base(t.Name()))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ dst := filepath.Join(dir, strings.TrimSuffix(file, ".go"))
+ cmd := exec.Command("go", "build", "-gcflags=-L", "-o="+dst, path(file)) // TODO(gri) no need for -gcflags=-L if go tool is adjusted
+ out, err := cmd.CombinedOutput()
+ if err == nil {
+ t.Errorf("expected cgo to fail but it succeeded")
+ }
+
+ lines := bytes.Split(out, []byte("\n"))
+ for _, re := range errors {
+ found := false
+ for _, line := range lines {
+ if re.Match(line) {
+ t.Logf("found match for %#q: %q", re, line)
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("expected error output to contain %#q", re)
+ }
+ }
+
+ if t.Failed() {
+ t.Logf("actual output:\n%s", out)
+ }
+}
+
+func sizeofLongDouble(t *testing.T) int {
+ cmd := exec.Command("go", "run", path("long_double_size.go"))
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("%#q: %v:\n%s", strings.Join(cmd.Args, " "), err, out)
+ }
+
+ i, err := strconv.Atoi(strings.TrimSpace(string(out)))
+ if err != nil {
+ t.Fatalf("long_double_size.go printed invalid size: %s", out)
+ }
+ return i
+}
+
+func TestReportsTypeErrors(t *testing.T) {
+ for _, file := range []string{
+ "err1.go",
+ "err2.go",
+ "err3.go",
+ "issue7757.go",
+ "issue8442.go",
+ "issue11097a.go",
+ "issue11097b.go",
+ "issue13129.go",
+ "issue13423.go",
+ "issue13467.go",
+ "issue13635.go",
+ "issue13830.go",
+ "issue16116.go",
+ "issue16591.go",
+ "issue18452.go",
+ "issue18889.go",
+ } {
+ check(t, file)
+ }
+
+ if sizeofLongDouble(t) > 8 {
+ check(t, "err4.go")
+ }
+}
+
+func TestToleratesOptimizationFlag(t *testing.T) {
+ for _, cflags := range []string{
+ "",
+ "-O",
+ } {
+ cflags := cflags
+ t.Run(cflags, func(t *testing.T) {
+ t.Parallel()
+
+ cmd := exec.Command("go", "build", path("issue14669.go"))
+ cmd.Env = append(os.Environ(), "CGO_CFLAGS="+cflags)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Errorf("%#q: %v:\n%s", strings.Join(cmd.Args, " "), err, out)
+ }
+ })
+ }
+}
+
+func TestMallocCrashesOnNil(t *testing.T) {
+ t.Parallel()
+
+ cmd := exec.Command("go", "run", path("malloc.go"))
+ out, err := cmd.CombinedOutput()
+ if err == nil {
+ t.Logf("%#q:\n%s", strings.Join(cmd.Args, " "), out)
+ t.Fatalf("succeeded unexpectedly")
+ }
+}
diff --git a/misc/cgo/errors/issue13635.go b/misc/cgo/errors/issue13635.go
deleted file mode 100644
index 0ce2b1e83a1..00000000000
--- a/misc/cgo/errors/issue13635.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// issue 13635: used to output error about C.unsignedchar.
-// This test tests all such types.
-
-package pkg
-
-import "C"
-
-func main() {
- var (
- _ C.uchar = "uc" // ERROR HERE
- _ C.schar = "sc" // ERROR HERE
- _ C.ushort = "us" // ERROR HERE
- _ C.uint = "ui" // ERROR HERE
- _ C.ulong = "ul" // ERROR HERE
- _ C.longlong = "ll" // ERROR HERE
- _ C.ulonglong = "ull" // ERROR HERE
- _ C.complexfloat = "cf" // ERROR HERE
- _ C.complexdouble = "cd" // ERROR HERE
- )
-}
diff --git a/misc/cgo/errors/ptr.go b/misc/cgo/errors/ptr_test.go
similarity index 79%
rename from misc/cgo/errors/ptr.go
rename to misc/cgo/errors/ptr_test.go
index 3e117666bff..d295a5849db 100644
--- a/misc/cgo/errors/ptr.go
+++ b/misc/cgo/errors/ptr_test.go
@@ -4,20 +4,18 @@
// Tests that cgo detects invalid pointer passing at runtime.
-package main
+package errorstest
import (
"bufio"
"bytes"
"fmt"
- "io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
- "runtime"
"strings"
- "sync"
+ "testing"
)
// ptrTest is the tests without the boilerplate.
@@ -344,7 +342,7 @@ var ptrTests = []ptrTest{
fail: false,
},
{
- // Issue #21306.
+ // Test preemption while entering a cgo call. Issue #21306.
name: "preempt-during-call",
c: `void f() {}`,
imports: []string{"runtime", "sync"},
@@ -353,219 +351,145 @@ var ptrTests = []ptrTest{
},
}
-func main() {
- os.Exit(doTests())
+func TestPointerChecks(t *testing.T) {
+ for _, pt := range ptrTests {
+ pt := pt
+ t.Run(pt.name, func(t *testing.T) {
+ testOne(t, pt)
+ })
+ }
}
-func doTests() int {
- gopath, err := ioutil.TempDir("", "cgoerrors")
+func testOne(t *testing.T, pt ptrTest) {
+ t.Parallel()
+
+ gopath, err := ioutil.TempDir("", filepath.Base(t.Name()))
if err != nil {
- fmt.Fprintln(os.Stderr, err)
- return 2
+ t.Fatal(err)
}
defer os.RemoveAll(gopath)
- if err := os.MkdirAll(filepath.Join(gopath, "src"), 0777); err != nil {
- fmt.Fprintln(os.Stderr, err)
- return 2
+ src := filepath.Join(gopath, "src")
+ if err := os.Mkdir(src, 0777); err != nil {
+ t.Fatal(err)
}
- workers := runtime.NumCPU() + 1
-
- var wg sync.WaitGroup
- c := make(chan int)
- errs := make(chan int)
- for i := 0; i < workers; i++ {
- wg.Add(1)
- go func() {
- worker(gopath, c, errs)
- wg.Done()
- }()
- }
-
- for i := range ptrTests {
- c <- i
- }
- close(c)
-
- go func() {
- wg.Wait()
- close(errs)
- }()
-
- tot := 0
- for e := range errs {
- tot += e
- }
- return tot
-}
-
-func worker(gopath string, c, errs chan int) {
- e := 0
- for i := range c {
- if !doOne(gopath, i) {
- e++
- }
- }
- if e > 0 {
- errs <- e
- }
-}
-
-func doOne(gopath string, i int) bool {
- t := &ptrTests[i]
-
- dir := filepath.Join(gopath, "src", fmt.Sprintf("dir%d", i))
- if err := os.Mkdir(dir, 0777); err != nil {
- fmt.Fprintln(os.Stderr, err)
- return false
- }
-
- name := filepath.Join(dir, fmt.Sprintf("t%d.go", i))
+ name := filepath.Join(src, fmt.Sprintf("%s.go", filepath.Base(t.Name())))
f, err := os.Create(name)
if err != nil {
- fmt.Fprintln(os.Stderr, err)
- return false
+ t.Fatal(err)
}
b := bufio.NewWriter(f)
fmt.Fprintln(b, `package main`)
fmt.Fprintln(b)
fmt.Fprintln(b, `/*`)
- fmt.Fprintln(b, t.c)
+ fmt.Fprintln(b, pt.c)
fmt.Fprintln(b, `*/`)
fmt.Fprintln(b, `import "C"`)
fmt.Fprintln(b)
- for _, imp := range t.imports {
+ for _, imp := range pt.imports {
fmt.Fprintln(b, `import "`+imp+`"`)
}
- if len(t.imports) > 0 {
+ if len(pt.imports) > 0 {
fmt.Fprintln(b)
}
- if len(t.support) > 0 {
- fmt.Fprintln(b, t.support)
+ if len(pt.support) > 0 {
+ fmt.Fprintln(b, pt.support)
fmt.Fprintln(b)
}
fmt.Fprintln(b, `func main() {`)
- fmt.Fprintln(b, t.body)
+ fmt.Fprintln(b, pt.body)
fmt.Fprintln(b, `}`)
if err := b.Flush(); err != nil {
- fmt.Fprintf(os.Stderr, "flushing %s: %v\n", name, err)
- return false
+ t.Fatalf("flushing %s: %v", name, err)
}
if err := f.Close(); err != nil {
- fmt.Fprintf(os.Stderr, "closing %s: %v\n", name, err)
- return false
+ t.Fatalf("closing %s: %v", name, err)
}
- for _, e := range t.extra {
- if err := ioutil.WriteFile(filepath.Join(dir, e.name), []byte(e.contents), 0644); err != nil {
- fmt.Fprintf(os.Stderr, "writing %s: %v\n", e.name, err)
- return false
+ for _, e := range pt.extra {
+ if err := ioutil.WriteFile(filepath.Join(src, e.name), []byte(e.contents), 0644); err != nil {
+ t.Fatalf("writing %s: %v", e.name, err)
}
}
- ok := true
+ args := func(cmd *exec.Cmd) string {
+ return strings.Join(cmd.Args, " ")
+ }
cmd := exec.Command("go", "build")
- cmd.Dir = dir
+ cmd.Dir = src
cmd.Env = addEnv("GOPATH", gopath)
buf, err := cmd.CombinedOutput()
if err != nil {
- fmt.Fprintf(os.Stderr, "test %s failed to build: %v\n%s", t.name, err, buf)
- return false
+ t.Logf("%#q:\n%s", args(cmd), buf)
+ t.Fatalf("failed to build: %v", err)
}
- exe := filepath.Join(dir, filepath.Base(dir))
+ exe := filepath.Join(src, filepath.Base(src))
cmd = exec.Command(exe)
- cmd.Dir = dir
+ cmd.Dir = src
- if t.expensive {
+ if pt.expensive {
cmd.Env = cgocheckEnv("1")
buf, err := cmd.CombinedOutput()
if err != nil {
- var errbuf bytes.Buffer
- if t.fail {
- fmt.Fprintf(&errbuf, "test %s marked expensive but failed when not expensive: %v\n", t.name, err)
+ t.Logf("%#q:\n%s", args(cmd), buf)
+ if pt.fail {
+ t.Fatalf("test marked expensive, but failed when not expensive: %v", err)
} else {
- fmt.Fprintf(&errbuf, "test %s failed unexpectedly with GODEBUG=cgocheck=1: %v\n", t.name, err)
+ t.Errorf("failed unexpectedly with GODEBUG=cgocheck=1: %v", err)
}
- reportTestOutput(&errbuf, t.name, buf)
- os.Stderr.Write(errbuf.Bytes())
- ok = false
}
cmd = exec.Command(exe)
- cmd.Dir = dir
+ cmd.Dir = src
}
- if t.expensive {
+ if pt.expensive {
cmd.Env = cgocheckEnv("2")
}
buf, err = cmd.CombinedOutput()
-
- if t.fail {
+ if pt.fail {
if err == nil {
- var errbuf bytes.Buffer
- fmt.Fprintf(&errbuf, "test %s did not fail as expected\n", t.name)
- reportTestOutput(&errbuf, t.name, buf)
- os.Stderr.Write(errbuf.Bytes())
- ok = false
+ t.Logf("%#q:\n%s", args(cmd), buf)
+ t.Fatalf("did not fail as expected")
} else if !bytes.Contains(buf, []byte("Go pointer")) {
- var errbuf bytes.Buffer
- fmt.Fprintf(&errbuf, "test %s output does not contain expected error (failed with %v)\n", t.name, err)
- reportTestOutput(&errbuf, t.name, buf)
- os.Stderr.Write(errbuf.Bytes())
- ok = false
+ t.Logf("%#q:\n%s", args(cmd), buf)
+ t.Fatalf("did not print expected error (failed with %v)", err)
}
} else {
if err != nil {
- var errbuf bytes.Buffer
- fmt.Fprintf(&errbuf, "test %s failed unexpectedly: %v\n", t.name, err)
- reportTestOutput(&errbuf, t.name, buf)
- os.Stderr.Write(errbuf.Bytes())
- ok = false
+ t.Logf("%#q:\n%s", args(cmd), buf)
+ t.Fatalf("failed unexpectedly: %v", err)
}
- if !t.expensive && ok {
+ if !pt.expensive {
// Make sure it passes with the expensive checks.
cmd := exec.Command(exe)
- cmd.Dir = dir
+ cmd.Dir = src
cmd.Env = cgocheckEnv("2")
buf, err := cmd.CombinedOutput()
if err != nil {
- var errbuf bytes.Buffer
- fmt.Fprintf(&errbuf, "test %s failed unexpectedly with expensive checks: %v\n", t.name, err)
- reportTestOutput(&errbuf, t.name, buf)
- os.Stderr.Write(errbuf.Bytes())
- ok = false
+ t.Logf("%#q:\n%s", args(cmd), buf)
+ t.Fatalf("failed unexpectedly with expensive checks: %v", err)
}
}
}
- if t.fail && ok {
+ if pt.fail {
cmd = exec.Command(exe)
- cmd.Dir = dir
+ cmd.Dir = src
cmd.Env = cgocheckEnv("0")
buf, err := cmd.CombinedOutput()
if err != nil {
- var errbuf bytes.Buffer
- fmt.Fprintf(&errbuf, "test %s failed unexpectedly with GODEBUG=cgocheck=0: %v\n", t.name, err)
- reportTestOutput(&errbuf, t.name, buf)
- os.Stderr.Write(errbuf.Bytes())
- ok = false
+ t.Logf("%#q:\n%s", args(cmd), buf)
+ t.Fatalf("failed unexpectedly with GODEBUG=cgocheck=0: %v", err)
}
}
-
- return ok
-}
-
-func reportTestOutput(w io.Writer, name string, buf []byte) {
- fmt.Fprintf(w, "=== test %s output ===\n", name)
- fmt.Fprintf(w, "%s", buf)
- fmt.Fprintf(w, "=== end of test %s output ===\n", name)
}
func cgocheckEnv(val string) []string {
diff --git a/misc/cgo/errors/err1.go b/misc/cgo/errors/src/err1.go
similarity index 100%
rename from misc/cgo/errors/err1.go
rename to misc/cgo/errors/src/err1.go
diff --git a/misc/cgo/errors/err2.go b/misc/cgo/errors/src/err2.go
similarity index 100%
rename from misc/cgo/errors/err2.go
rename to misc/cgo/errors/src/err2.go
diff --git a/misc/cgo/errors/err3.go b/misc/cgo/errors/src/err3.go
similarity index 100%
rename from misc/cgo/errors/err3.go
rename to misc/cgo/errors/src/err3.go
diff --git a/misc/cgo/errors/src/err4.go b/misc/cgo/errors/src/err4.go
new file mode 100644
index 00000000000..8e5f78e987b
--- /dev/null
+++ b/misc/cgo/errors/src/err4.go
@@ -0,0 +1,15 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+/*
+long double x = 0;
+*/
+import "C"
+
+func main() {
+ _ = C.x // ERROR HERE
+ _ = C.x
+}
diff --git a/misc/cgo/errors/issue11097a.go b/misc/cgo/errors/src/issue11097a.go
similarity index 100%
rename from misc/cgo/errors/issue11097a.go
rename to misc/cgo/errors/src/issue11097a.go
diff --git a/misc/cgo/errors/issue11097b.go b/misc/cgo/errors/src/issue11097b.go
similarity index 100%
rename from misc/cgo/errors/issue11097b.go
rename to misc/cgo/errors/src/issue11097b.go
diff --git a/misc/cgo/errors/issue13129.go b/misc/cgo/errors/src/issue13129.go
similarity index 88%
rename from misc/cgo/errors/issue13129.go
rename to misc/cgo/errors/src/issue13129.go
index f7ad7a7e149..057bce4b829 100644
--- a/misc/cgo/errors/issue13129.go
+++ b/misc/cgo/errors/src/issue13129.go
@@ -10,5 +10,5 @@ import "C"
func main() {
var x C.ushort
- x = int(0) // ERROR HERE
+ x = int(0) // ERROR HERE: C\.ushort
}
diff --git a/misc/cgo/errors/issue13423.go b/misc/cgo/errors/src/issue13423.go
similarity index 100%
rename from misc/cgo/errors/issue13423.go
rename to misc/cgo/errors/src/issue13423.go
diff --git a/misc/cgo/errors/src/issue13467.go b/misc/cgo/errors/src/issue13467.go
new file mode 100644
index 00000000000..e061880ddab
--- /dev/null
+++ b/misc/cgo/errors/src/issue13467.go
@@ -0,0 +1,15 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+/*
+static int transform(int x) { return x; }
+*/
+import "C"
+
+func F() {
+ var x rune = '✈'
+ var _ rune = C.transform(x) // ERROR HERE: C\.int
+}
diff --git a/misc/cgo/errors/src/issue13635.go b/misc/cgo/errors/src/issue13635.go
new file mode 100644
index 00000000000..3f38f5df4b5
--- /dev/null
+++ b/misc/cgo/errors/src/issue13635.go
@@ -0,0 +1,24 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// issue 13635: used to output error about C.unsignedchar.
+// This test tests all such types.
+
+package pkg
+
+import "C"
+
+func main() {
+ var (
+ _ C.uchar = "uc" // ERROR HERE: C\.uchar
+ _ C.schar = "sc" // ERROR HERE: C\.schar
+ _ C.ushort = "us" // ERROR HERE: C\.ushort
+ _ C.uint = "ui" // ERROR HERE: C\.uint
+ _ C.ulong = "ul" // ERROR HERE: C\.ulong
+ _ C.longlong = "ll" // ERROR HERE: C\.longlong
+ _ C.ulonglong = "ull" // ERROR HERE: C\.ulonglong
+ _ C.complexfloat = "cf" // ERROR HERE: C\.complexfloat
+ _ C.complexdouble = "cd" // ERROR HERE: C\.complexdouble
+ )
+}
diff --git a/misc/cgo/errors/issue13830.go b/misc/cgo/errors/src/issue13830.go
similarity index 100%
rename from misc/cgo/errors/issue13830.go
rename to misc/cgo/errors/src/issue13830.go
diff --git a/misc/cgo/errors/issue14669.go b/misc/cgo/errors/src/issue14669.go
similarity index 100%
rename from misc/cgo/errors/issue14669.go
rename to misc/cgo/errors/src/issue14669.go
diff --git a/misc/cgo/errors/issue16116.go b/misc/cgo/errors/src/issue16116.go
similarity index 100%
rename from misc/cgo/errors/issue16116.go
rename to misc/cgo/errors/src/issue16116.go
diff --git a/misc/cgo/errors/issue16591.go b/misc/cgo/errors/src/issue16591.go
similarity index 100%
rename from misc/cgo/errors/issue16591.go
rename to misc/cgo/errors/src/issue16591.go
diff --git a/misc/cgo/errors/issue18452.go b/misc/cgo/errors/src/issue18452.go
similarity index 75%
rename from misc/cgo/errors/issue18452.go
rename to misc/cgo/errors/src/issue18452.go
index 36ef7f54e12..0386d768927 100644
--- a/misc/cgo/errors/issue18452.go
+++ b/misc/cgo/errors/src/issue18452.go
@@ -13,6 +13,6 @@ import (
func a() {
fmt.Println("Hello, world!")
- C.function_that_does_not_exist() // line 16
- C.pi // line 17
+ C.function_that_does_not_exist() // ERROR HERE
+ C.pi // ERROR HERE
}
diff --git a/misc/cgo/errors/issue18889.go b/misc/cgo/errors/src/issue18889.go
similarity index 100%
rename from misc/cgo/errors/issue18889.go
rename to misc/cgo/errors/src/issue18889.go
diff --git a/misc/cgo/errors/issue7757.go b/misc/cgo/errors/src/issue7757.go
similarity index 100%
rename from misc/cgo/errors/issue7757.go
rename to misc/cgo/errors/src/issue7757.go
diff --git a/misc/cgo/errors/issue8442.go b/misc/cgo/errors/src/issue8442.go
similarity index 100%
rename from misc/cgo/errors/issue8442.go
rename to misc/cgo/errors/src/issue8442.go
diff --git a/misc/cgo/errors/src/long_double_size.go b/misc/cgo/errors/src/long_double_size.go
new file mode 100644
index 00000000000..8b797f886ae
--- /dev/null
+++ b/misc/cgo/errors/src/long_double_size.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+/*
+const int sizeofLongDouble = sizeof(long double);
+*/
+import "C"
+
+import "fmt"
+
+func main() {
+ fmt.Println(C.sizeofLongDouble)
+}
diff --git a/misc/cgo/errors/malloc.go b/misc/cgo/errors/src/malloc.go
similarity index 100%
rename from misc/cgo/errors/malloc.go
rename to misc/cgo/errors/src/malloc.go
diff --git a/misc/cgo/errors/test.bash b/misc/cgo/errors/test.bash
deleted file mode 100755
index ed0b0946925..00000000000
--- a/misc/cgo/errors/test.bash
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright 2013 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-check() {
- file=$1
- line=$(grep -n 'ERROR HERE' $file | sed 's/:.*//')
- if [ "$line" = "" ]; then
- echo 1>&2 misc/cgo/errors/test.bash: BUG: cannot find ERROR HERE in $file
- exit 1
- fi
- expect $file $file:$line:
-}
-
-expect() {
- file=$1
- shift
- if go build -gcflags=-C $file >errs 2>&1; then
- echo 1>&2 misc/cgo/errors/test.bash: BUG: expected cgo to fail on $file but it succeeded
- exit 1
- fi
- if ! test -s errs; then
- echo 1>&2 misc/cgo/errors/test.bash: BUG: expected error output for $file but saw none
- exit 1
- fi
- for error; do
- if ! fgrep $error errs >/dev/null 2>&1; then
- echo 1>&2 misc/cgo/errors/test.bash: BUG: expected error output for $file to contain \"$error\" but saw:
- cat 1>&2 errs
- exit 1
- fi
- done
-}
-
-check err1.go
-check err2.go
-check err3.go
-check issue7757.go
-check issue8442.go
-check issue11097a.go
-check issue11097b.go
-expect issue13129.go C.ushort
-check issue13423.go
-expect issue13635.go C.uchar C.schar C.ushort C.uint C.ulong C.longlong C.ulonglong C.complexfloat C.complexdouble
-check issue13830.go
-check issue16116.go
-check issue16591.go
-check issue18889.go
-expect issue18452.go issue18452.go:16 issue18452.go:17
-
-if ! go build issue14669.go; then
- exit 1
-fi
-if ! CGO_CFLAGS="-O" go build issue14669.go; then
- exit 1
-fi
-
-if ! go run ptr.go; then
- exit 1
-fi
-
-# The malloc.go test should crash.
-rm -f malloc.out
-if go run malloc.go >malloc.out 2>&1; then
- echo '`go run malloc.go` succeeded unexpectedly'
- cat malloc.out
- rm -f malloc.out
- exit 1
-fi
-rm -f malloc.out
-
-rm -rf errs _obj
-exit 0
diff --git a/misc/cgo/life/main.go b/misc/cgo/life/main.go
index aa2f6d116b3..45376fd05a9 100644
--- a/misc/cgo/life/main.go
+++ b/misc/cgo/life/main.go
@@ -1,4 +1,4 @@
-// cmpout
+// cmpout -tags=use_go_run
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
@@ -11,9 +11,10 @@
package main
import (
- "."
"flag"
"fmt"
+
+ "."
)
const MAXDIM = 100
diff --git a/misc/cgo/stdio/chain.go b/misc/cgo/stdio/chain.go
index 03cddb76888..0fa813cab70 100644
--- a/misc/cgo/stdio/chain.go
+++ b/misc/cgo/stdio/chain.go
@@ -1,4 +1,4 @@
-// cmpout
+// cmpout -tags=use_go_run
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/misc/cgo/stdio/fib.go b/misc/cgo/stdio/fib.go
index 61a1b83728c..56e32552ee6 100644
--- a/misc/cgo/stdio/fib.go
+++ b/misc/cgo/stdio/fib.go
@@ -1,4 +1,4 @@
-// cmpout
+// cmpout -tags=use_go_run
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/misc/cgo/stdio/hello.go b/misc/cgo/stdio/hello.go
index 47179ba4827..63bff4c617a 100644
--- a/misc/cgo/stdio/hello.go
+++ b/misc/cgo/stdio/hello.go
@@ -1,4 +1,4 @@
-// cmpout
+// cmpout -tags=use_go_run
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/misc/cgo/test/cgo_test.go b/misc/cgo/test/cgo_test.go
index 9485e25bf47..67abfff2c03 100644
--- a/misc/cgo/test/cgo_test.go
+++ b/misc/cgo/test/cgo_test.go
@@ -80,6 +80,11 @@ func Test20369(t *testing.T) { test20369(t) }
func Test18720(t *testing.T) { test18720(t) }
func Test20266(t *testing.T) { test20266(t) }
func Test20129(t *testing.T) { test20129(t) }
+func Test20910(t *testing.T) { test20910(t) }
func Test21708(t *testing.T) { test21708(t) }
+func Test21809(t *testing.T) { test21809(t) }
+func Test6907(t *testing.T) { test6907(t) }
+func Test6907Go(t *testing.T) { test6907Go(t) }
+func Test21897(t *testing.T) { test21897(t) }
func BenchmarkCgoCall(b *testing.B) { benchCgoCall(b) }
diff --git a/misc/cgo/test/issue18720.go b/misc/cgo/test/issue18720.go
index a93304498e0..3d64003be74 100644
--- a/misc/cgo/test/issue18720.go
+++ b/misc/cgo/test/issue18720.go
@@ -12,13 +12,39 @@ package cgotest
struct foo { char c; };
#define SIZE_OF(x) sizeof(x)
#define SIZE_OF_FOO SIZE_OF(struct foo)
+#define VAR1 VAR
+#define VAR var
+int var = 5;
+
+#define ADDR &var
+
+#define CALL fn()
+int fn(void) {
+ return ++var;
+}
*/
import "C"
import "testing"
func test18720(t *testing.T) {
- if C.HELLO_WORLD != "hello\000world" {
- t.Fatalf(`expected "hello\000world", but got %q`, C.HELLO_WORLD)
+ if got, want := C.HELLO_WORLD, "hello\000world"; got != want {
+ t.Errorf("C.HELLO_WORLD == %q, expected %q", got, want)
+ }
+
+ if got, want := C.VAR1, C.int(5); got != want {
+ t.Errorf("C.VAR1 == %v, expected %v", got, want)
+ }
+
+ if got, want := *C.ADDR, C.int(5); got != want {
+ t.Errorf("*C.ADDR == %v, expected %v", got, want)
+ }
+
+ if got, want := C.CALL, C.int(6); got != want {
+ t.Errorf("C.CALL == %v, expected %v", got, want)
+ }
+
+ if got, want := C.CALL, C.int(7); got != want {
+ t.Errorf("C.CALL == %v, expected %v", got, want)
}
// Issue 20125.
diff --git a/misc/cgo/test/issue19832.go b/misc/cgo/test/issue19832.go
new file mode 100644
index 00000000000..44587770af4
--- /dev/null
+++ b/misc/cgo/test/issue19832.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 19832. Functions taking a pointer typedef were being expanded and triggering a compiler error.
+
+package cgotest
+
+// typedef struct { int i; } *PS;
+// void T19832(PS p) {}
+import "C"
+import "testing"
+
+func test19832(t *testing.T) {
+ C.T19832(nil)
+}
diff --git a/misc/cgo/test/issue20910.c b/misc/cgo/test/issue20910.c
new file mode 100644
index 00000000000..e8d623fc983
--- /dev/null
+++ b/misc/cgo/test/issue20910.c
@@ -0,0 +1,19 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include
+#include
+#include
+#include "_cgo_export.h"
+
+/* Test calling a Go function with multiple return values. */
+
+void
+callMulti(void)
+{
+ struct multi_return result = multi();
+ assert(strcmp(result.r0, "multi") == 0);
+ assert(result.r1 == 0);
+ free(result.r0);
+}
diff --git a/misc/cgo/test/issue20910.go b/misc/cgo/test/issue20910.go
new file mode 100644
index 00000000000..69d7d9249ac
--- /dev/null
+++ b/misc/cgo/test/issue20910.go
@@ -0,0 +1,19 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cgotest
+
+//void callMulti(void);
+import "C"
+
+import "testing"
+
+//export multi
+func multi() (*C.char, C.int) {
+ return C.CString("multi"), 0
+}
+
+func test20910(t *testing.T) {
+ C.callMulti()
+}
diff --git a/misc/cgo/test/issue21809.go b/misc/cgo/test/issue21809.go
new file mode 100644
index 00000000000..a3a6b88897e
--- /dev/null
+++ b/misc/cgo/test/issue21809.go
@@ -0,0 +1,45 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cgotest
+
+// Issue 21809. Compile C `typedef` to go type aliases.
+
+// typedef long MySigned_t;
+// /* tests alias-to-alias */
+// typedef MySigned_t MySigned2_t;
+//
+// long takes_long(long x) { return x * x; }
+// MySigned_t takes_typedef(MySigned_t x) { return x * x; }
+import "C"
+
+import "testing"
+
+func test21809(t *testing.T) {
+ longVar := C.long(3)
+ typedefVar := C.MySigned_t(4)
+ typedefTypedefVar := C.MySigned2_t(5)
+
+ // all three should be considered identical to `long`
+ if ret := C.takes_long(longVar); ret != 9 {
+ t.Errorf("got %v but expected %v", ret, 9)
+ }
+ if ret := C.takes_long(typedefVar); ret != 16 {
+ t.Errorf("got %v but expected %v", ret, 16)
+ }
+ if ret := C.takes_long(typedefTypedefVar); ret != 25 {
+ t.Errorf("got %v but expected %v", ret, 25)
+ }
+
+ // They should also be identical to the typedef'd type
+ if ret := C.takes_typedef(longVar); ret != 9 {
+ t.Errorf("got %v but expected %v", ret, 9)
+ }
+ if ret := C.takes_typedef(typedefVar); ret != 16 {
+ t.Errorf("got %v but expected %v", ret, 16)
+ }
+ if ret := C.takes_typedef(typedefTypedefVar); ret != 25 {
+ t.Errorf("got %v but expected %v", ret, 25)
+ }
+}
diff --git a/misc/cgo/test/issue21897.go b/misc/cgo/test/issue21897.go
new file mode 100644
index 00000000000..d13246bd84a
--- /dev/null
+++ b/misc/cgo/test/issue21897.go
@@ -0,0 +1,56 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin,cgo,!internal
+
+package cgotest
+
+/*
+#cgo LDFLAGS: -framework CoreFoundation
+#include
+*/
+import "C"
+import (
+ "runtime/debug"
+ "testing"
+ "unsafe"
+)
+
+func test21897(t *testing.T) {
+ // Please write barrier, kick in soon.
+ defer debug.SetGCPercent(debug.SetGCPercent(1))
+
+ for i := 0; i < 10000; i++ {
+ testCFNumberRef()
+ testCFDateRef()
+ testCFBooleanRef()
+ // Allocate some memory, so eventually the write barrier is enabled
+ // and it will see writes of bad pointers in the test* functions below.
+ byteSliceSink = make([]byte, 1024)
+ }
+}
+
+var byteSliceSink []byte
+
+func testCFNumberRef() {
+ var v int64 = 0
+ xCFNumberRef = C.CFNumberCreate(C.kCFAllocatorSystemDefault, C.kCFNumberSInt64Type, unsafe.Pointer(&v))
+ //fmt.Printf("CFNumberRef: %x\n", uintptr(unsafe.Pointer(xCFNumberRef)))
+}
+
+var xCFNumberRef C.CFNumberRef
+
+func testCFDateRef() {
+ xCFDateRef = C.CFDateCreate(C.kCFAllocatorSystemDefault, 0) // 0 value is 1 Jan 2001 00:00:00 GMT
+ //fmt.Printf("CFDateRef: %x\n", uintptr(unsafe.Pointer(xCFDateRef)))
+}
+
+var xCFDateRef C.CFDateRef
+
+func testCFBooleanRef() {
+ xCFBooleanRef = C.kCFBooleanFalse
+ //fmt.Printf("CFBooleanRef: %x\n", uintptr(unsafe.Pointer(xCFBooleanRef)))
+}
+
+var xCFBooleanRef C.CFBooleanRef
diff --git a/misc/cgo/test/issue21897b.go b/misc/cgo/test/issue21897b.go
new file mode 100644
index 00000000000..08b5f4d808e
--- /dev/null
+++ b/misc/cgo/test/issue21897b.go
@@ -0,0 +1,13 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !darwin !cgo internal
+
+package cgotest
+
+import "testing"
+
+func test21897(t *testing.T) {
+ t.Skip("test runs only on darwin+cgo")
+}
diff --git a/misc/cgo/test/issue22958.go b/misc/cgo/test/issue22958.go
new file mode 100644
index 00000000000..a5f058fdae1
--- /dev/null
+++ b/misc/cgo/test/issue22958.go
@@ -0,0 +1,24 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cgotest
+
+// Test handling of bitfields.
+
+/*
+typedef struct {
+ unsigned long long f8 : 8;
+ unsigned long long f16 : 16;
+ unsigned long long f24 : 24;
+ unsigned long long f32 : 32;
+ unsigned long long f40 : 40;
+ unsigned long long f48 : 48;
+ unsigned long long f56 : 56;
+ unsigned long long f64 : 64;
+} issue22958Type;
+*/
+import "C"
+
+// Nothing to run, just make sure this compiles.
+var Vissue22958 C.issue22958Type
diff --git a/misc/cgo/test/issue6907.go b/misc/cgo/test/issue6907.go
new file mode 100644
index 00000000000..00495ab8e2e
--- /dev/null
+++ b/misc/cgo/test/issue6907.go
@@ -0,0 +1,33 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cgotest
+
+/*
+#include
+#include
+
+char* Issue6907CopyString(_GoString_ s) {
+ size_t n;
+ const char *p;
+ char *r;
+
+ n = _GoStringLen(s);
+ p = _GoStringPtr(s);
+ r = malloc(n + 1);
+ memmove(r, p, n);
+ r[n] = '\0';
+ return r;
+}
+*/
+import "C"
+
+import "testing"
+
+func test6907(t *testing.T) {
+ want := "yarn"
+ if got := C.GoString(C.Issue6907CopyString(want)); got != want {
+ t.Errorf("C.GoString(C.Issue6907CopyString(%q)) == %q, want %q", want, got, want)
+ }
+}
diff --git a/misc/cgo/test/issue6907export.go b/misc/cgo/test/issue6907export.go
new file mode 100644
index 00000000000..d41899e1a62
--- /dev/null
+++ b/misc/cgo/test/issue6907export.go
@@ -0,0 +1,30 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cgotest
+
+/*
+extern int CheckIssue6907C(_GoString_);
+*/
+import "C"
+
+import (
+ "testing"
+)
+
+const CString = "C string"
+
+//export CheckIssue6907Go
+func CheckIssue6907Go(s string) C.int {
+ if s == CString {
+ return 1
+ }
+ return 0
+}
+
+func test6907Go(t *testing.T) {
+ if got := C.CheckIssue6907C(CString); got != 1 {
+ t.Errorf("C.CheckIssue6907C() == %d, want %d", got, 1)
+ }
+}
diff --git a/misc/cgo/test/issue6907export_c.c b/misc/cgo/test/issue6907export_c.c
new file mode 100644
index 00000000000..9b1a4fc630b
--- /dev/null
+++ b/misc/cgo/test/issue6907export_c.c
@@ -0,0 +1,11 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include
+
+#include "_cgo_export.h"
+
+int CheckIssue6907C(_GoString_ s) {
+ return CheckIssue6907Go(s);
+}
diff --git a/misc/cgo/test/issue7978.go b/misc/cgo/test/issue7978.go
index 7fb62e807ba..b057e3eacb2 100644
--- a/misc/cgo/test/issue7978.go
+++ b/misc/cgo/test/issue7978.go
@@ -44,8 +44,8 @@ static void issue7978c(uint32_t *sync) {
import "C"
import (
- "os"
"runtime"
+ "runtime/debug"
"strings"
"sync/atomic"
"testing"
@@ -114,12 +114,7 @@ func test7978(t *testing.T) {
if C.HAS_SYNC_FETCH_AND_ADD == 0 {
t.Skip("clang required for __sync_fetch_and_add support on darwin/arm")
}
- if runtime.GOOS == "android" || runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") {
- t.Skip("GOTRACEBACK is not passed on to the exec wrapper")
- }
- if os.Getenv("GOTRACEBACK") != "2" {
- t.Fatalf("GOTRACEBACK must be 2")
- }
+ debug.SetTraceback("2")
issue7978sync = 0
go issue7978go()
// test in c code, before callback
diff --git a/misc/cgo/testcarchive/carchive_test.go b/misc/cgo/testcarchive/carchive_test.go
index 74897c7f6f1..7ba5faabeb3 100644
--- a/misc/cgo/testcarchive/carchive_test.go
+++ b/misc/cgo/testcarchive/carchive_test.go
@@ -6,6 +6,7 @@ package carchive_test
import (
"bufio"
+ "bytes"
"debug/elf"
"fmt"
"io/ioutil"
@@ -134,8 +135,10 @@ func cmdToRun(name string) []string {
}
func testInstall(t *testing.T, exe, libgoa, libgoh string, buildcmd ...string) {
+ t.Helper()
cmd := exec.Command(buildcmd[0], buildcmd[1:]...)
cmd.Env = gopathEnv
+ t.Log(buildcmd)
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
@@ -171,7 +174,7 @@ func TestInstall(t *testing.T) {
testInstall(t, "./testp1"+exeSuffix,
filepath.Join("pkg", libgodir, "libgo.a"),
filepath.Join("pkg", libgodir, "libgo.h"),
- "go", "install", "-buildmode=c-archive", "libgo")
+ "go", "install", "-i", "-buildmode=c-archive", "libgo")
// Test building libgo other than installing it.
// Header files are now present.
@@ -488,7 +491,7 @@ func TestPIE(t *testing.T) {
os.RemoveAll("pkg")
}()
- cmd := exec.Command("go", "install", "-buildmode=c-archive", "libgo")
+ cmd := exec.Command("go", "install", "-i", "-buildmode=c-archive", "libgo")
cmd.Env = gopathEnv
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
@@ -549,6 +552,8 @@ func TestSIGPROF(t *testing.T) {
switch GOOS {
case "windows", "plan9":
t.Skipf("skipping SIGPROF test on %s", GOOS)
+ case "darwin":
+ t.Skipf("skipping SIGPROF test on %s; see https://golang.org/issue/19320", GOOS)
}
t.Parallel()
@@ -605,9 +610,26 @@ func TestCompileWithoutShared(t *testing.T) {
}
exe := "./testnoshared" + exeSuffix
- ccArgs := append(cc, "-o", exe, "main5.c", "libgo2.a")
+
+ // In some cases, -no-pie is needed here, but not accepted everywhere. First try
+ // if -no-pie is accepted. See #22126.
+ ccArgs := append(cc, "-o", exe, "-no-pie", "main5.c", "libgo2.a")
t.Log(ccArgs)
out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput()
+
+ // If -no-pie unrecognized, try -nopie if this is possibly clang
+ if err != nil && bytes.Contains(out, []byte("unknown")) && !strings.Contains(cc[0], "gcc") {
+ ccArgs = append(cc, "-o", exe, "-nopie", "main5.c", "libgo2.a")
+ t.Log(ccArgs)
+ out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput()
+ }
+
+ // Don't use either -no-pie or -nopie
+ if err != nil && bytes.Contains(out, []byte("unrecognized")) {
+ ccArgs := append(cc, "-o", exe, "main5.c", "libgo2.a")
+ t.Log(ccArgs)
+ out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput()
+ }
t.Logf("%s", out)
if err != nil {
t.Fatal(err)
diff --git a/misc/cgo/testcshared/cshared_test.go b/misc/cgo/testcshared/cshared_test.go
new file mode 100644
index 00000000000..49be0923966
--- /dev/null
+++ b/misc/cgo/testcshared/cshared_test.go
@@ -0,0 +1,479 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cshared_test
+
+import (
+ "debug/elf"
+ "fmt"
+ "log"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "strings"
+ "sync"
+ "testing"
+ "unicode"
+)
+
+// C compiler with args (from $(go env CC) $(go env GOGCCFLAGS)).
+var cc []string
+
+// An environment with GOPATH=$(pwd).
+var gopathEnv []string
+
+// ".exe" on Windows.
+var exeSuffix string
+
+var GOOS, GOARCH, GOROOT string
+var installdir, androiddir string
+var libSuffix, libgoname string
+
+func TestMain(m *testing.M) {
+ GOOS = goEnv("GOOS")
+ GOARCH = goEnv("GOARCH")
+ GOROOT = goEnv("GOROOT")
+
+ if _, err := os.Stat(GOROOT); os.IsNotExist(err) {
+ log.Fatalf("Unable able to find GOROOT at '%s'", GOROOT)
+ }
+
+ // Directory where cgo headers and outputs will be installed.
+ // The installation directory format varies depending on the platform.
+ installdir = path.Join("pkg", fmt.Sprintf("%s_%s_testcshared", GOOS, GOARCH))
+ switch GOOS {
+ case "darwin":
+ libSuffix = "dylib"
+ case "windows":
+ libSuffix = "dll"
+ default:
+ libSuffix = "so"
+ installdir = path.Join("pkg", fmt.Sprintf("%s_%s_testcshared_shared", GOOS, GOARCH))
+ }
+
+ androiddir = fmt.Sprintf("/data/local/tmp/testcshared-%d", os.Getpid())
+ if GOOS == "android" {
+ cmd := exec.Command("adb", "shell", "mkdir", "-p", androiddir)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ log.Fatalf("setupAndroid failed: %v\n%s\n", err, out)
+ }
+ }
+
+ libgoname = "libgo." + libSuffix
+
+ cc = []string{goEnv("CC")}
+
+ out := goEnv("GOGCCFLAGS")
+ quote := '\000'
+ start := 0
+ lastSpace := true
+ backslash := false
+ s := string(out)
+ for i, c := range s {
+ if quote == '\000' && unicode.IsSpace(c) {
+ if !lastSpace {
+ cc = append(cc, s[start:i])
+ lastSpace = true
+ }
+ } else {
+ if lastSpace {
+ start = i
+ lastSpace = false
+ }
+ if quote == '\000' && !backslash && (c == '"' || c == '\'') {
+ quote = c
+ backslash = false
+ } else if !backslash && quote == c {
+ quote = '\000'
+ } else if (quote == '\000' || quote == '"') && !backslash && c == '\\' {
+ backslash = true
+ } else {
+ backslash = false
+ }
+ }
+ }
+ if !lastSpace {
+ cc = append(cc, s[start:])
+ }
+
+ switch GOOS {
+ case "darwin":
+ // For Darwin/ARM.
+ // TODO(crawshaw): can we do better?
+ cc = append(cc, []string{"-framework", "CoreFoundation", "-framework", "Foundation"}...)
+ case "android":
+ cc = append(cc, "-pie", "-fuse-ld=gold")
+ }
+ libgodir := GOOS + "_" + GOARCH
+ switch GOOS {
+ case "darwin":
+ if GOARCH == "arm" || GOARCH == "arm64" {
+ libgodir += "_shared"
+ }
+ case "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris":
+ libgodir += "_shared"
+ }
+ cc = append(cc, "-I", filepath.Join("pkg", libgodir))
+
+ // Build an environment with GOPATH=$(pwd)
+ dir, err := os.Getwd()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+ gopathEnv = append(os.Environ(), "GOPATH="+dir)
+
+ if GOOS == "windows" {
+ exeSuffix = ".exe"
+ }
+
+ st := m.Run()
+
+ os.Remove(libgoname)
+ os.RemoveAll("pkg")
+ cleanupHeaders()
+ cleanupAndroid()
+
+ os.Exit(st)
+}
+
+func goEnv(key string) string {
+ out, err := exec.Command("go", "env", key).Output()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "go env %s failed:\n%s", key, err)
+ fmt.Fprintf(os.Stderr, "%s", err.(*exec.ExitError).Stderr)
+ os.Exit(2)
+ }
+ return strings.TrimSpace(string(out))
+}
+
+func cmdToRun(name string) string {
+ return "./" + name + exeSuffix
+}
+
+func adbPush(t *testing.T, filename string) {
+ if GOOS != "android" {
+ return
+ }
+ args := []string{"adb", "push", filename, fmt.Sprintf("%s/%s", androiddir, filename)}
+ cmd := exec.Command(args[0], args[1:]...)
+ if out, err := cmd.CombinedOutput(); err != nil {
+ t.Fatalf("adb command failed: %v\n%s\n", err, out)
+ }
+}
+
+func adbRun(t *testing.T, env []string, adbargs ...string) string {
+ if GOOS != "android" {
+ t.Fatalf("trying to run adb command when operating system is not android.")
+ }
+ args := []string{"adb", "shell"}
+ // Propagate LD_LIBRARY_PATH to the adb shell invocation.
+ for _, e := range env {
+ if strings.Index(e, "LD_LIBRARY_PATH=") != -1 {
+ adbargs = append([]string{e}, adbargs...)
+ break
+ }
+ }
+ shellcmd := fmt.Sprintf("cd %s; %s", androiddir, strings.Join(adbargs, " "))
+ args = append(args, shellcmd)
+ cmd := exec.Command(args[0], args[1:]...)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("adb command failed: %v\n%s\n", err, out)
+ }
+ return strings.Replace(string(out), "\r", "", -1)
+}
+
+func run(t *testing.T, env []string, args ...string) string {
+ t.Helper()
+ cmd := exec.Command(args[0], args[1:]...)
+ cmd.Env = env
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("command failed: %v\n%v\n%s\n", args, err, out)
+ } else {
+ t.Logf("run: %v", args)
+ }
+ return string(out)
+}
+
+func runExe(t *testing.T, env []string, args ...string) string {
+ t.Helper()
+ if GOOS == "android" {
+ return adbRun(t, env, args...)
+ }
+ return run(t, env, args...)
+}
+
+func runCC(t *testing.T, args ...string) string {
+ t.Helper()
+ // This function is run in parallel, so append to a copy of cc
+ // rather than cc itself.
+ return run(t, nil, append(append([]string(nil), cc...), args...)...)
+}
+
+func createHeaders() error {
+ args := []string{"go", "install", "-i", "-buildmode=c-shared",
+ "-installsuffix", "testcshared", "libgo"}
+ cmd := exec.Command(args[0], args[1:]...)
+ cmd.Env = gopathEnv
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("command failed: %v\n%v\n%s\n", args, err, out)
+ }
+
+ args = []string{"go", "build", "-buildmode=c-shared",
+ "-installsuffix", "testcshared",
+ "-o", libgoname,
+ filepath.Join("src", "libgo", "libgo.go")}
+ cmd = exec.Command(args[0], args[1:]...)
+ cmd.Env = gopathEnv
+ out, err = cmd.CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("command failed: %v\n%v\n%s\n", args, err, out)
+ }
+
+ if GOOS == "android" {
+ args = []string{"adb", "push", libgoname, fmt.Sprintf("%s/%s", androiddir, libgoname)}
+ cmd = exec.Command(args[0], args[1:]...)
+ out, err = cmd.CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("adb command failed: %v\n%s\n", err, out)
+ }
+ }
+
+ return nil
+}
+
+var (
+ headersOnce sync.Once
+ headersErr error
+)
+
+func createHeadersOnce(t *testing.T) {
+ headersOnce.Do(func() {
+ headersErr = createHeaders()
+ })
+ if headersErr != nil {
+ t.Fatal(headersErr)
+ }
+}
+
+func cleanupHeaders() {
+ os.Remove("libgo.h")
+}
+
+func cleanupAndroid() {
+ if GOOS != "android" {
+ return
+ }
+ cmd := exec.Command("adb", "shell", "rm", "-rf", androiddir)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ log.Fatalf("cleanupAndroid failed: %v\n%s\n", err, out)
+ }
+}
+
+// test0: exported symbols in shared lib are accessible.
+func TestExportedSymbols(t *testing.T) {
+ t.Parallel()
+
+ cmd := "testp0"
+ bin := cmdToRun(cmd)
+
+ createHeadersOnce(t)
+
+ runCC(t, "-I", installdir, "-o", cmd, "main0.c", libgoname)
+ adbPush(t, cmd)
+
+ defer os.Remove(bin)
+
+ out := runExe(t, append(gopathEnv, "LD_LIBRARY_PATH=."), bin)
+ if strings.TrimSpace(out) != "PASS" {
+ t.Error(out)
+ }
+}
+
+// test1: shared library can be dynamically loaded and exported symbols are accessible.
+func TestExportedSymbolsWithDynamicLoad(t *testing.T) {
+ t.Parallel()
+
+ if GOOS == "windows" {
+ t.Logf("Skipping on %s", GOOS)
+ return
+ }
+
+ cmd := "testp1"
+ bin := cmdToRun(cmd)
+
+ createHeadersOnce(t)
+
+ runCC(t, "-o", cmd, "main1.c", "-ldl")
+ adbPush(t, cmd)
+
+ defer os.Remove(bin)
+
+ out := runExe(t, nil, bin, "./"+libgoname)
+ if strings.TrimSpace(out) != "PASS" {
+ t.Error(out)
+ }
+}
+
+// test2: tests libgo2 which does not export any functions.
+func TestUnexportedSymbols(t *testing.T) {
+ t.Parallel()
+
+ if GOOS == "windows" {
+ t.Logf("Skipping on %s", GOOS)
+ return
+ }
+
+ cmd := "testp2"
+ bin := cmdToRun(cmd)
+ libname := "libgo2." + libSuffix
+
+ run(t,
+ gopathEnv,
+ "go", "build",
+ "-buildmode=c-shared",
+ "-installsuffix", "testcshared",
+ "-o", libname, "libgo2",
+ )
+ adbPush(t, libname)
+
+ linkFlags := "-Wl,--no-as-needed"
+ if GOOS == "darwin" {
+ linkFlags = ""
+ }
+
+ runCC(t, "-o", cmd, "main2.c", linkFlags, libname)
+ adbPush(t, cmd)
+
+ defer os.Remove(libname)
+ defer os.Remove(bin)
+
+ out := runExe(t, append(gopathEnv, "LD_LIBRARY_PATH=."), bin)
+
+ if strings.TrimSpace(out) != "PASS" {
+ t.Error(out)
+ }
+}
+
+// test3: tests main.main is exported on android.
+func TestMainExportedOnAndroid(t *testing.T) {
+ t.Parallel()
+
+ switch GOOS {
+ case "android":
+ break
+ default:
+ t.Logf("Skipping on %s", GOOS)
+ return
+ }
+
+ cmd := "testp3"
+ bin := cmdToRun(cmd)
+
+ createHeadersOnce(t)
+
+ runCC(t, "-o", cmd, "main3.c", "-ldl")
+ adbPush(t, cmd)
+
+ defer os.Remove(bin)
+
+ out := runExe(t, nil, bin, "./"+libgoname)
+ if strings.TrimSpace(out) != "PASS" {
+ t.Error(out)
+ }
+}
+
+func testSignalHandlers(t *testing.T, pkgname, cfile, cmd string) {
+ libname := pkgname + "." + libSuffix
+ run(t,
+ gopathEnv,
+ "go", "build",
+ "-buildmode=c-shared",
+ "-installsuffix", "testcshared",
+ "-o", libname, pkgname,
+ )
+ adbPush(t, libname)
+ runCC(t, "-pthread", "-o", cmd, cfile, "-ldl")
+ adbPush(t, cmd)
+
+ bin := cmdToRun(cmd)
+
+ defer os.Remove(libname)
+ defer os.Remove(bin)
+ defer os.Remove(pkgname + ".h")
+
+ out := runExe(t, nil, bin, "./"+libname)
+ if strings.TrimSpace(out) != "PASS" {
+ t.Error(run(t, nil, bin, libname, "verbose"))
+ }
+}
+
+// test4: test signal handlers
+func TestSignalHandlers(t *testing.T) {
+ t.Parallel()
+ if GOOS == "windows" {
+ t.Logf("Skipping on %s", GOOS)
+ return
+ }
+ testSignalHandlers(t, "libgo4", "main4.c", "testp4")
+}
+
+// test5: test signal handlers with os/signal.Notify
+func TestSignalHandlersWithNotify(t *testing.T) {
+ t.Parallel()
+ if GOOS == "windows" {
+ t.Logf("Skipping on %s", GOOS)
+ return
+ }
+ testSignalHandlers(t, "libgo5", "main5.c", "testp5")
+}
+
+func TestPIE(t *testing.T) {
+ t.Parallel()
+
+ switch GOOS {
+ case "linux", "android":
+ break
+ default:
+ t.Logf("Skipping on %s", GOOS)
+ return
+ }
+
+ createHeadersOnce(t)
+
+ f, err := elf.Open(libgoname)
+ if err != nil {
+ t.Fatalf("elf.Open failed: %v", err)
+ }
+ defer f.Close()
+
+ ds := f.SectionByType(elf.SHT_DYNAMIC)
+ if ds == nil {
+ t.Fatalf("no SHT_DYNAMIC section")
+ }
+ d, err := ds.Data()
+ if err != nil {
+ t.Fatalf("can't read SHT_DYNAMIC contents: %v", err)
+ }
+ for len(d) > 0 {
+ var tag elf.DynTag
+ switch f.Class {
+ case elf.ELFCLASS32:
+ tag = elf.DynTag(f.ByteOrder.Uint32(d[:4]))
+ d = d[8:]
+ case elf.ELFCLASS64:
+ tag = elf.DynTag(f.ByteOrder.Uint64(d[:8]))
+ d = d[16:]
+ }
+ if tag == elf.DT_TEXTREL {
+ t.Fatalf("%s has DT_TEXTREL flag", libgoname)
+ }
+ }
+}
diff --git a/misc/cgo/testcshared/src/p/p.go b/misc/cgo/testcshared/src/p/p.go
index fb4b5ca8d1a..0f02cf3ce6c 100644
--- a/misc/cgo/testcshared/src/p/p.go
+++ b/misc/cgo/testcshared/src/p/p.go
@@ -8,5 +8,6 @@ import "C"
//export FromPkg
func FromPkg() int32 { return 1024 }
+
//export Divu
func Divu(a, b uint32) uint32 { return a / b }
diff --git a/misc/cgo/testcshared/test.bash b/misc/cgo/testcshared/test.bash
deleted file mode 100755
index 315a0d40367..00000000000
--- a/misc/cgo/testcshared/test.bash
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2015 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-# For testing Android, this script requires adb to push and run compiled
-# binaries on a target device.
-
-set -e
-
-if [ ! -f src/libgo/libgo.go ]; then
- cwd=$(pwd)
- echo "misc/cgo/testcshared/test.bash is running in $cwd" 1>&2
- exit 1
-fi
-
-goos=$(go env GOOS)
-goarch=$(go env GOARCH)
-goroot=$(go env GOROOT)
-if [ ! -d "$goroot" ]; then
- echo 'misc/cgo/testcshared/test.bash cannot find GOROOT' 1>&2
- echo '$GOROOT:' "$GOROOT" 1>&2
- echo 'go env GOROOT:' "$goroot" 1>&2
- exit 1
-fi
-
-# Directory where cgo headers and outputs will be installed.
-# The installation directory format varies depending on the platform.
-installdir=pkg/${goos}_${goarch}_testcshared_shared
-if [ "${goos}" = "darwin" ]; then
- installdir=pkg/${goos}_${goarch}_testcshared
-fi
-
-# Temporary directory on the android device.
-androidpath=/data/local/tmp/testcshared-$$
-
-function cleanup() {
- rm -f libgo.$libext libgo2.$libext libgo4.$libext libgo5.$libext
- rm -f libgo.h libgo4.h libgo5.h
- rm -f testp testp2 testp3 testp4 testp5
- rm -rf pkg "${goroot}/${installdir}"
-
- if [ "$goos" = "android" ]; then
- adb shell rm -rf "$androidpath"
- fi
-}
-trap cleanup EXIT
-
-if [ "$goos" = "android" ]; then
- adb shell mkdir -p "$androidpath"
-fi
-
-function run() {
- case "$goos" in
- "android")
- local args=$@
- output=$(adb shell "cd ${androidpath}; $@")
- output=$(echo $output|tr -d '\r')
- case $output in
- *PASS) echo "PASS";;
- *) echo "$output";;
- esac
- ;;
- *)
- echo $(env $@)
- ;;
- esac
-}
-
-function binpush() {
- bin=${1}
- if [ "$goos" = "android" ]; then
- adb push "$bin" "${androidpath}/${bin}" 2>/dev/null
- fi
-}
-
-rm -rf pkg
-
-suffix="-installsuffix testcshared"
-
-libext="so"
-if [ "$goos" = "darwin" ]; then
- libext="dylib"
-fi
-
-# Create the header files.
-GOPATH=$(pwd) go install -buildmode=c-shared $suffix libgo
-
-GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo.$libext src/libgo/libgo.go
-binpush libgo.$libext
-
-if [ "$goos" = "linux" ] || [ "$goos" = "android" ] ; then
- if readelf -d libgo.$libext | grep TEXTREL >/dev/null; then
- echo "libgo.$libext has TEXTREL set"
- exit 1
- fi
-fi
-
-GOGCCFLAGS=$(go env GOGCCFLAGS)
-if [ "$goos" = "android" ]; then
- GOGCCFLAGS="${GOGCCFLAGS} -pie -fuse-ld=gold"
-fi
-
-status=0
-
-# test0: exported symbols in shared lib are accessible.
-# TODO(iant): using _shared here shouldn't really be necessary.
-$(go env CC) ${GOGCCFLAGS} -I ${installdir} -o testp main0.c ./libgo.$libext
-binpush testp
-
-output=$(run LD_LIBRARY_PATH=. ./testp)
-if [ "$output" != "PASS" ]; then
- echo "FAIL test0 got ${output}"
- status=1
-fi
-
-# test1: shared library can be dynamically loaded and exported symbols are accessible.
-$(go env CC) ${GOGCCFLAGS} -o testp main1.c -ldl
-binpush testp
-output=$(run ./testp ./libgo.$libext)
-if [ "$output" != "PASS" ]; then
- echo "FAIL test1 got ${output}"
- status=1
-fi
-
-# test2: tests libgo2 which does not export any functions.
-GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo2.$libext libgo2
-binpush libgo2.$libext
-linkflags="-Wl,--no-as-needed"
-if [ "$goos" = "darwin" ]; then
- linkflags=""
-fi
-$(go env CC) ${GOGCCFLAGS} -o testp2 main2.c $linkflags libgo2.$libext
-binpush testp2
-output=$(run LD_LIBRARY_PATH=. ./testp2)
-if [ "$output" != "PASS" ]; then
- echo "FAIL test2 got ${output}"
- status=1
-fi
-
-# test3: tests main.main is exported on android.
-if [ "$goos" = "android" ]; then
- $(go env CC) ${GOGCCFLAGS} -o testp3 main3.c -ldl
- binpush testp3
- output=$(run ./testp ./libgo.so)
- if [ "$output" != "PASS" ]; then
- echo "FAIL test3 got ${output}"
- status=1
- fi
-fi
-
-# test4: tests signal handlers
-GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo4.$libext libgo4
-binpush libgo4.$libext
-$(go env CC) ${GOGCCFLAGS} -pthread -o testp4 main4.c -ldl
-binpush testp4
-output=$(run ./testp4 ./libgo4.$libext 2>&1)
-if test "$output" != "PASS"; then
- echo "FAIL test4 got ${output}"
- if test "$goos" != "android"; then
- echo "re-running test4 in verbose mode"
- ./testp4 ./libgo4.$libext verbose
- fi
- status=1
-fi
-
-# test5: tests signal handlers with os/signal.Notify
-GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo5.$libext libgo5
-binpush libgo5.$libext
-$(go env CC) ${GOGCCFLAGS} -pthread -o testp5 main5.c -ldl
-binpush testp5
-output=$(run ./testp5 ./libgo5.$libext 2>&1)
-if test "$output" != "PASS"; then
- echo "FAIL test5 got ${output}"
- if test "$goos" != "android"; then
- echo "re-running test5 in verbose mode"
- ./testp5 ./libgo5.$libext verbose
- fi
- status=1
-fi
-
-if test "$libext" = "dylib"; then
- # make sure dylibs are well-formed
- if ! otool -l libgo*.dylib >/dev/null; then
- status=1
- fi
-fi
-
-if test $status = 0; then
- echo "ok"
-fi
-
-exit $status
diff --git a/misc/cgo/testplugin/src/host/host.go b/misc/cgo/testplugin/src/host/host.go
index 898f44efa15..0ca17da3def 100644
--- a/misc/cgo/testplugin/src/host/host.go
+++ b/misc/cgo/testplugin/src/host/host.go
@@ -126,14 +126,24 @@ func main() {
log.Fatalf(`plugin1.F()=%d, want 17`, gotf)
}
- // plugin2 has no exported symbols, only an init function.
- if _, err := plugin.Open("plugin2.so"); err != nil {
+ p2, err := plugin.Open("plugin2.so")
+ if err != nil {
log.Fatalf("plugin.Open failed: %v", err)
}
+ // Check that plugin2's init function was called, and
+ // that it modifies the same global variable as the host.
if got, want := common.X, 2; got != want {
log.Fatalf("after loading plugin2, common.X=%d, want %d", got, want)
}
+ _, err = plugin.Open("plugin2-dup.so")
+ if err == nil {
+ log.Fatal(`plugin.Open("plugin2-dup.so"): duplicate open should have failed`)
+ }
+ if s := err.Error(); !strings.Contains(s, "already loaded") {
+ log.Fatal(`plugin.Open("plugin2.so"): error does not mention "already loaded"`)
+ }
+
_, err = plugin.Open("plugin-mismatch.so")
if err == nil {
log.Fatal(`plugin.Open("plugin-mismatch.so"): should have failed`)
@@ -142,6 +152,24 @@ func main() {
log.Fatalf(`plugin.Open("plugin-mismatch.so"): error does not mention "different version": %v`, s)
}
+ _, err = plugin.Open("plugin2-dup.so")
+ if err == nil {
+ log.Fatal(`plugin.Open("plugin2-dup.so"): duplicate open after bad plugin should have failed`)
+ }
+ _, err = plugin.Open("plugin2.so")
+ if err != nil {
+ log.Fatalf(`plugin.Open("plugin2.so"): second open with same name failed: %v`, err)
+ }
+
+ // Test that unexported types with the same names in
+ // different plugins do not interfere with each other.
+ //
+ // See Issue #21386.
+ UnexportedNameReuse, _ := p.Lookup("UnexportedNameReuse")
+ UnexportedNameReuse.(func())()
+ UnexportedNameReuse, _ = p2.Lookup("UnexportedNameReuse")
+ UnexportedNameReuse.(func())()
+
testUnnamed()
fmt.Println("PASS")
diff --git a/misc/cgo/testplugin/src/issue18584/main.go b/misc/cgo/testplugin/src/issue18584/main.go
new file mode 100644
index 00000000000..c280fd46203
--- /dev/null
+++ b/misc/cgo/testplugin/src/issue18584/main.go
@@ -0,0 +1,23 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "plugin"
+
+func main() {
+ p, err := plugin.Open("plugin.so")
+ if err != nil {
+ panic(err)
+ }
+
+ sym, err := p.Lookup("G")
+ if err != nil {
+ panic(err)
+ }
+ g := sym.(func() bool)
+ if !g() {
+ panic("expected types to match, Issue #18584")
+ }
+}
diff --git a/misc/cgo/testplugin/src/issue18584/plugin.go b/misc/cgo/testplugin/src/issue18584/plugin.go
new file mode 100644
index 00000000000..be0868d3752
--- /dev/null
+++ b/misc/cgo/testplugin/src/issue18584/plugin.go
@@ -0,0 +1,19 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "reflect"
+
+type C struct {
+}
+
+func F(c *C) *C {
+ return nil
+}
+
+func G() bool {
+ var c *C
+ return reflect.TypeOf(F).Out(0) == reflect.TypeOf(c)
+}
diff --git a/misc/cgo/testplugin/src/issue19418/main.go b/misc/cgo/testplugin/src/issue19418/main.go
new file mode 100644
index 00000000000..2ec9f9aaaa2
--- /dev/null
+++ b/misc/cgo/testplugin/src/issue19418/main.go
@@ -0,0 +1,29 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "os"
+ "plugin"
+)
+
+func main() {
+ p, err := plugin.Open("plugin.so")
+ if err != nil {
+ panic(err)
+ }
+
+ val, err := p.Lookup("Val")
+ if err != nil {
+ panic(err)
+ }
+ got := *val.(*string)
+ const want = "linkstr"
+ if got != want {
+ fmt.Fprintf(os.Stderr, "issue19418 value is %q, want %q\n", got, want)
+ os.Exit(2)
+ }
+}
diff --git a/src/internal/cpu/cpu_ppc64.go b/misc/cgo/testplugin/src/issue19418/plugin.go
similarity index 80%
rename from src/internal/cpu/cpu_ppc64.go
rename to misc/cgo/testplugin/src/issue19418/plugin.go
index 5b151508479..fe93b161431 100644
--- a/src/internal/cpu/cpu_ppc64.go
+++ b/misc/cgo/testplugin/src/issue19418/plugin.go
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package cpu
+package main
-const CacheLineSize = 128
+var Val = "val-unset"
diff --git a/misc/cgo/testplugin/src/issue19529/plugin.go b/misc/cgo/testplugin/src/issue19529/plugin.go
new file mode 100644
index 00000000000..ad2df6cc7c7
--- /dev/null
+++ b/misc/cgo/testplugin/src/issue19529/plugin.go
@@ -0,0 +1,15 @@
+package main
+
+import (
+ "reflect"
+)
+
+type Foo struct {
+ Bar string `json:"Bar@baz,omitempty"`
+}
+
+func F() {
+ println(reflect.TypeOf(Foo{}).Field(0).Tag)
+}
+
+func main() {}
diff --git a/misc/cgo/testplugin/src/issue22175/main.go b/misc/cgo/testplugin/src/issue22175/main.go
new file mode 100644
index 00000000000..9be9bab9dc3
--- /dev/null
+++ b/misc/cgo/testplugin/src/issue22175/main.go
@@ -0,0 +1,28 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "os"
+ "plugin"
+)
+
+func main() {
+ p2, err := plugin.Open("issue22175_plugin1.so")
+ if err != nil {
+ panic(err)
+ }
+ f, err := p2.Lookup("F")
+ if err != nil {
+ panic(err)
+ }
+ got := f.(func() int)()
+ const want = 971
+ if got != want {
+ fmt.Fprintf(os.Stderr, "issue22175: F()=%d, want %d", got, want)
+ os.Exit(1)
+ }
+}
diff --git a/misc/cgo/testplugin/src/issue22175/plugin1.go b/misc/cgo/testplugin/src/issue22175/plugin1.go
new file mode 100644
index 00000000000..5ae6cb631e7
--- /dev/null
+++ b/misc/cgo/testplugin/src/issue22175/plugin1.go
@@ -0,0 +1,21 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "plugin"
+
+func F() int {
+ p2, err := plugin.Open("issue22175_plugin2.so")
+ if err != nil {
+ panic(err)
+ }
+ g, err := p2.Lookup("G")
+ if err != nil {
+ panic(err)
+ }
+ return g.(func() int)()
+}
+
+func main() {}
diff --git a/misc/cgo/testplugin/src/issue22175/plugin2.go b/misc/cgo/testplugin/src/issue22175/plugin2.go
new file mode 100644
index 00000000000..f387a192e67
--- /dev/null
+++ b/misc/cgo/testplugin/src/issue22175/plugin2.go
@@ -0,0 +1,9 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func G() int { return 971 }
+
+func main() {}
diff --git a/misc/cgo/testplugin/src/issue22295.pkg/main.go b/misc/cgo/testplugin/src/issue22295.pkg/main.go
new file mode 100644
index 00000000000..6cb186e1003
--- /dev/null
+++ b/misc/cgo/testplugin/src/issue22295.pkg/main.go
@@ -0,0 +1,28 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import (
+ "log"
+ "plugin"
+)
+
+func main() {
+ p, err := plugin.Open("issue.22295.so")
+ if err != nil {
+ log.Fatal(err)
+ }
+ f, err := p.Lookup("F")
+ if err != nil {
+ log.Fatal(err)
+ }
+ const want = 2503
+ got := f.(func() int)()
+ if got != want {
+ log.Fatalf("got %d, want %d", got, want)
+ }
+}
diff --git a/misc/cgo/testplugin/src/issue22295.pkg/plugin.go b/misc/cgo/testplugin/src/issue22295.pkg/plugin.go
new file mode 100644
index 00000000000..46b08a405bc
--- /dev/null
+++ b/misc/cgo/testplugin/src/issue22295.pkg/plugin.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+var f *int
+
+func init() {
+ f = new(int)
+ *f = 2503
+}
+
+func F() int { return *f }
+
+func main() {}
diff --git a/misc/cgo/testplugin/src/plugin1/plugin1.go b/misc/cgo/testplugin/src/plugin1/plugin1.go
index edcef2c77e9..0a9fa2f2c1f 100644
--- a/misc/cgo/testplugin/src/plugin1/plugin1.go
+++ b/misc/cgo/testplugin/src/plugin1/plugin1.go
@@ -7,7 +7,10 @@ package main
// // No C code required.
import "C"
-import "common"
+import (
+ "common"
+ "reflect"
+)
func F() int {
_ = make([]byte, 1<<21) // trigger stack unwind, Issue #18190.
@@ -33,6 +36,21 @@ func init() {
call(g)
}
+type sameNameReusedInPlugins struct {
+ X string
+}
+
+type sameNameHolder struct {
+ F *sameNameReusedInPlugins
+}
+
+func UnexportedNameReuse() {
+ h := sameNameHolder{}
+ v := reflect.ValueOf(&h).Elem().Field(0)
+ newval := reflect.New(v.Type().Elem())
+ v.Set(newval)
+}
+
func main() {
panic("plugin1.main called")
}
diff --git a/misc/cgo/testplugin/src/plugin2/plugin2.go b/misc/cgo/testplugin/src/plugin2/plugin2.go
index 9c507fc3658..a67f2de27a7 100644
--- a/misc/cgo/testplugin/src/plugin2/plugin2.go
+++ b/misc/cgo/testplugin/src/plugin2/plugin2.go
@@ -13,6 +13,7 @@ import "C"
import (
"common"
+ "reflect"
"strings"
)
@@ -22,6 +23,21 @@ func init() {
common.X = 2
}
+type sameNameReusedInPlugins struct {
+ X string
+}
+
+type sameNameHolder struct {
+ F *sameNameReusedInPlugins
+}
+
+func UnexportedNameReuse() {
+ h := sameNameHolder{}
+ v := reflect.ValueOf(&h).Elem().Field(0)
+ newval := reflect.New(v.Type().Elem())
+ v.Set(newval)
+}
+
func main() {
panic("plugin1.main called")
}
diff --git a/misc/cgo/testplugin/test.bash b/misc/cgo/testplugin/test.bash
index 69df5bd2bfa..5ef87625f1a 100755
--- a/misc/cgo/testplugin/test.bash
+++ b/misc/cgo/testplugin/test.bash
@@ -14,39 +14,77 @@ fi
goos=$(go env GOOS)
goarch=$(go env GOARCH)
+echo SKIP: golang.org/issue/22571.
+exit 0
+
function cleanup() {
- rm -f plugin*.so unnamed*.so iface*.so
- rm -rf host pkg sub iface issue18676 issue19534
+ rm -f plugin*.so unnamed*.so iface*.so issue*
+ rm -rf host pkg sub iface
}
trap cleanup EXIT
rm -rf pkg sub
mkdir sub
-GOPATH=$(pwd) go build -buildmode=plugin plugin1
-GOPATH=$(pwd) go build -buildmode=plugin plugin2
-GOPATH=$(pwd)/altpath go build -buildmode=plugin plugin-mismatch
-GOPATH=$(pwd) go build -buildmode=plugin -o=sub/plugin1.so sub/plugin1
-GOPATH=$(pwd) go build -buildmode=plugin unnamed1.go
-GOPATH=$(pwd) go build -buildmode=plugin unnamed2.go
-GOPATH=$(pwd) go build host
+GOPATH=$(pwd) go build -i -gcflags "$GO_GCFLAGS" -buildmode=plugin plugin1
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin plugin2
+cp plugin2.so plugin2-dup.so
+GOPATH=$(pwd)/altpath go build -gcflags "$GO_GCFLAGS" -buildmode=plugin plugin-mismatch
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o=sub/plugin1.so sub/plugin1
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o=unnamed1.so unnamed1/main.go
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o=unnamed2.so unnamed2/main.go
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" host
LD_LIBRARY_PATH=$(pwd) ./host
# Test that types and itabs get properly uniqified.
-GOPATH=$(pwd) go build -buildmode=plugin iface_a
-GOPATH=$(pwd) go build -buildmode=plugin iface_b
-GOPATH=$(pwd) go build iface
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin iface_a
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin iface_b
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" iface
LD_LIBRARY_PATH=$(pwd) ./iface
+function _timeout() (
+ set -e
+ $2 &
+ p=$!
+ (sleep $1; kill $p 2>/dev/null) &
+ p2=$!
+ wait $p 2>/dev/null
+ kill -0 $p2 2>/dev/null
+)
+
# Test for issue 18676 - make sure we don't add the same itab twice.
# The buggy code hangs forever, so use a timeout to check for that.
-GOPATH=$(pwd) go build -buildmode=plugin -o plugin.so src/issue18676/plugin.go
-GOPATH=$(pwd) go build -o issue18676 src/issue18676/main.go
-timeout 10s ./issue18676
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o plugin.so src/issue18676/plugin.go
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue18676 src/issue18676/main.go
+_timeout 10s ./issue18676
# Test for issue 19534 - that we can load a plugin built in a path with non-alpha
# characters
-GOPATH=$(pwd) go build -buildmode=plugin -ldflags='-pluginpath=issue.19534' -o plugin.so src/issue19534/plugin.go
-GOPATH=$(pwd) go build -o issue19534 src/issue19534/main.go
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -ldflags='-pluginpath=issue.19534' -o plugin.so src/issue19534/plugin.go
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue19534 src/issue19534/main.go
./issue19534
+
+# Test for issue 18584
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o plugin.so src/issue18584/plugin.go
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue18584 src/issue18584/main.go
+./issue18584
+
+# Test for issue 19418
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin "-ldflags=-X main.Val=linkstr" -o plugin.so src/issue19418/plugin.go
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue19418 src/issue19418/main.go
+./issue19418
+
+# Test for issue 19529
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o plugin.so src/issue19529/plugin.go
+
+# Test for issue 22175
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o issue22175_plugin1.so src/issue22175/plugin1.go
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o issue22175_plugin2.so src/issue22175/plugin2.go
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue22175 src/issue22175/main.go
+./issue22175
+
+# Test for issue 22295
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o issue.22295.so issue22295.pkg
+GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue22295 src/issue22295.pkg/main.go
+./issue22295
diff --git a/misc/cgo/testplugin/unnamed1.go b/misc/cgo/testplugin/unnamed1/main.go
similarity index 100%
rename from misc/cgo/testplugin/unnamed1.go
rename to misc/cgo/testplugin/unnamed1/main.go
diff --git a/misc/cgo/testplugin/unnamed2.go b/misc/cgo/testplugin/unnamed2/main.go
similarity index 100%
rename from misc/cgo/testplugin/unnamed2.go
rename to misc/cgo/testplugin/unnamed2/main.go
diff --git a/misc/cgo/testsanitizers/cc_test.go b/misc/cgo/testsanitizers/cc_test.go
new file mode 100644
index 00000000000..cacb0d93df7
--- /dev/null
+++ b/misc/cgo/testsanitizers/cc_test.go
@@ -0,0 +1,441 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// sanitizers_test checks the use of Go with sanitizers like msan, asan, etc.
+// See https://github.com/google/sanitizers.
+package sanitizers_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "testing"
+ "unicode"
+)
+
+var overcommit struct {
+ sync.Once
+ value int
+ err error
+}
+
+// requireOvercommit skips t if the kernel does not allow overcommit.
+func requireOvercommit(t *testing.T) {
+ t.Helper()
+
+ overcommit.Once.Do(func() {
+ var out []byte
+ out, overcommit.err = ioutil.ReadFile("/proc/sys/vm/overcommit_memory")
+ if overcommit.err != nil {
+ return
+ }
+ overcommit.value, overcommit.err = strconv.Atoi(string(bytes.TrimSpace(out)))
+ })
+
+ if overcommit.err != nil {
+ t.Skipf("couldn't determine vm.overcommit_memory (%v); assuming no overcommit", overcommit.err)
+ }
+ if overcommit.value == 2 {
+ t.Skip("vm.overcommit_memory=2")
+ }
+}
+
+var env struct {
+ sync.Once
+ m map[string]string
+ err error
+}
+
+// goEnv returns the output of $(go env) as a map.
+func goEnv(key string) (string, error) {
+ env.Once.Do(func() {
+ var out []byte
+ out, env.err = exec.Command("go", "env", "-json").Output()
+ if env.err != nil {
+ return
+ }
+
+ env.m = make(map[string]string)
+ env.err = json.Unmarshal(out, &env.m)
+ })
+ if env.err != nil {
+ return "", env.err
+ }
+
+ v, ok := env.m[key]
+ if !ok {
+ return "", fmt.Errorf("`go env`: no entry for %v", key)
+ }
+ return v, nil
+}
+
+// replaceEnv sets the key environment variable to value in cmd.
+func replaceEnv(cmd *exec.Cmd, key, value string) {
+ if cmd.Env == nil {
+ cmd.Env = os.Environ()
+ }
+ cmd.Env = append(cmd.Env, key+"="+value)
+}
+
+// mustRun executes t and fails cmd with a well-formatted message if it fails.
+func mustRun(t *testing.T, cmd *exec.Cmd) {
+ t.Helper()
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("%#q exited with %v\n%s", strings.Join(cmd.Args, " "), err, out)
+ }
+}
+
+// cc returns a cmd that executes `$(go env CC) $(go env GOGCCFLAGS) $args`.
+func cc(args ...string) (*exec.Cmd, error) {
+ CC, err := goEnv("CC")
+ if err != nil {
+ return nil, err
+ }
+
+ GOGCCFLAGS, err := goEnv("GOGCCFLAGS")
+ if err != nil {
+ return nil, err
+ }
+
+ // Split GOGCCFLAGS, respecting quoting.
+ //
+ // TODO(bcmills): This code also appears in
+ // misc/cgo/testcarchive/carchive_test.go, and perhaps ought to go in
+ // src/cmd/dist/test.go as well. Figure out where to put it so that it can be
+ // shared.
+ var flags []string
+ quote := '\000'
+ start := 0
+ lastSpace := true
+ backslash := false
+ for i, c := range GOGCCFLAGS {
+ if quote == '\000' && unicode.IsSpace(c) {
+ if !lastSpace {
+ flags = append(flags, GOGCCFLAGS[start:i])
+ lastSpace = true
+ }
+ } else {
+ if lastSpace {
+ start = i
+ lastSpace = false
+ }
+ if quote == '\000' && !backslash && (c == '"' || c == '\'') {
+ quote = c
+ backslash = false
+ } else if !backslash && quote == c {
+ quote = '\000'
+ } else if (quote == '\000' || quote == '"') && !backslash && c == '\\' {
+ backslash = true
+ } else {
+ backslash = false
+ }
+ }
+ }
+ if !lastSpace {
+ flags = append(flags, GOGCCFLAGS[start:])
+ }
+
+ cmd := exec.Command(CC, flags...)
+ cmd.Args = append(cmd.Args, args...)
+ return cmd, nil
+}
+
+type version struct {
+ name string
+ major, minor int
+}
+
+var compiler struct {
+ sync.Once
+ version
+ err error
+}
+
+// compilerVersion detects the version of $(go env CC).
+//
+// It returns a non-nil error if the compiler matches a known version schema but
+// the version could not be parsed, or if $(go env CC) could not be determined.
+func compilerVersion() (version, error) {
+ compiler.Once.Do(func() {
+ compiler.err = func() error {
+ compiler.name = "unknown"
+
+ cmd, err := cc("--version")
+ if err != nil {
+ return err
+ }
+ out, err := cmd.Output()
+ if err != nil {
+ // Compiler does not support "--version" flag: not Clang or GCC.
+ return nil
+ }
+
+ var match [][]byte
+ if bytes.HasPrefix(out, []byte("gcc")) {
+ compiler.name = "gcc"
+
+ cmd, err := cc("-dumpversion")
+ if err != nil {
+ return err
+ }
+ out, err := cmd.Output()
+ if err != nil {
+ // gcc, but does not support gcc's "-dumpversion" flag?!
+ return err
+ }
+ gccRE := regexp.MustCompile(`(\d+)\.(\d+)`)
+ match = gccRE.FindSubmatch(out)
+ } else {
+ clangRE := regexp.MustCompile(`clang version (\d+)\.(\d+)`)
+ if match = clangRE.FindSubmatch(out); len(match) > 0 {
+ compiler.name = "clang"
+ }
+ }
+
+ if len(match) < 3 {
+ return nil // "unknown"
+ }
+ if compiler.major, err = strconv.Atoi(string(match[1])); err != nil {
+ return err
+ }
+ if compiler.minor, err = strconv.Atoi(string(match[2])); err != nil {
+ return err
+ }
+ return nil
+ }()
+ })
+ return compiler.version, compiler.err
+}
+
+type compilerCheck struct {
+ once sync.Once
+ err error
+ skip bool // If true, skip with err instead of failing with it.
+}
+
+type config struct {
+ sanitizer string
+
+ cFlags, ldFlags, goFlags []string
+
+ sanitizerCheck, runtimeCheck compilerCheck
+}
+
+var configs struct {
+ sync.Mutex
+ m map[string]*config
+}
+
+// configure returns the configuration for the given sanitizer.
+func configure(sanitizer string) *config {
+ configs.Lock()
+ defer configs.Unlock()
+ if c, ok := configs.m[sanitizer]; ok {
+ return c
+ }
+
+ c := &config{
+ sanitizer: sanitizer,
+ cFlags: []string{"-fsanitize=" + sanitizer},
+ ldFlags: []string{"-fsanitize=" + sanitizer},
+ }
+
+ if testing.Verbose() {
+ c.goFlags = append(c.goFlags, "-x")
+ }
+
+ switch sanitizer {
+ case "memory":
+ c.goFlags = append(c.goFlags, "-msan")
+
+ case "thread":
+ c.goFlags = append(c.goFlags, "--installsuffix=tsan")
+ compiler, _ := compilerVersion()
+ if compiler.name == "gcc" {
+ c.cFlags = append(c.cFlags, "-fPIC")
+ c.ldFlags = append(c.ldFlags, "-fPIC", "-static-libtsan")
+ }
+
+ default:
+ panic(fmt.Sprintf("unrecognized sanitizer: %q", sanitizer))
+ }
+
+ if configs.m == nil {
+ configs.m = make(map[string]*config)
+ }
+ configs.m[sanitizer] = c
+ return c
+}
+
+// goCmd returns a Cmd that executes "go $subcommand $args" with appropriate
+// additional flags and environment.
+func (c *config) goCmd(subcommand string, args ...string) *exec.Cmd {
+ cmd := exec.Command("go", subcommand)
+ cmd.Args = append(cmd.Args, c.goFlags...)
+ cmd.Args = append(cmd.Args, args...)
+ replaceEnv(cmd, "CGO_CFLAGS", strings.Join(c.cFlags, " "))
+ replaceEnv(cmd, "CGO_LDFLAGS", strings.Join(c.ldFlags, " "))
+ return cmd
+}
+
+// skipIfCSanitizerBroken skips t if the C compiler does not produce working
+// binaries as configured.
+func (c *config) skipIfCSanitizerBroken(t *testing.T) {
+ check := &c.sanitizerCheck
+ check.once.Do(func() {
+ check.skip, check.err = c.checkCSanitizer()
+ })
+ if check.err != nil {
+ t.Helper()
+ if check.skip {
+ t.Skip(check.err)
+ }
+ t.Fatal(check.err)
+ }
+}
+
+var cMain = []byte(`
+int main() {
+ return 0;
+}
+`)
+
+func (c *config) checkCSanitizer() (skip bool, err error) {
+ dir, err := ioutil.TempDir("", c.sanitizer)
+ if err != nil {
+ return false, fmt.Errorf("failed to create temp directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ src := filepath.Join(dir, "return0.c")
+ if err := ioutil.WriteFile(src, cMain, 0600); err != nil {
+ return false, fmt.Errorf("failed to write C source file: %v", err)
+ }
+
+ dst := filepath.Join(dir, "return0")
+ cmd, err := cc(c.cFlags...)
+ if err != nil {
+ return false, err
+ }
+ cmd.Args = append(cmd.Args, c.ldFlags...)
+ cmd.Args = append(cmd.Args, "-o", dst, src)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ if bytes.Contains(out, []byte("-fsanitize")) &&
+ (bytes.Contains(out, []byte("unrecognized")) ||
+ bytes.Contains(out, []byte("unsupported"))) {
+ return true, errors.New(string(out))
+ }
+ return true, fmt.Errorf("%#q failed: %v\n%s", strings.Join(cmd.Args, " "), err, out)
+ }
+
+ if out, err := exec.Command(dst).CombinedOutput(); err != nil {
+ if os.IsNotExist(err) {
+ return true, fmt.Errorf("%#q failed to produce executable: %v", strings.Join(cmd.Args, " "), err)
+ }
+ snippet := bytes.SplitN(out, []byte{'\n'}, 2)[0]
+ return true, fmt.Errorf("%#q generated broken executable: %v\n%s", strings.Join(cmd.Args, " "), err, snippet)
+ }
+
+ return false, nil
+}
+
+// skipIfRuntimeIncompatible skips t if the Go runtime is suspected not to work
+// with cgo as configured.
+func (c *config) skipIfRuntimeIncompatible(t *testing.T) {
+ check := &c.runtimeCheck
+ check.once.Do(func() {
+ check.skip, check.err = c.checkRuntime()
+ })
+ if check.err != nil {
+ t.Helper()
+ if check.skip {
+ t.Skip(check.err)
+ }
+ t.Fatal(check.err)
+ }
+}
+
+func (c *config) checkRuntime() (skip bool, err error) {
+ if c.sanitizer != "thread" {
+ return false, nil
+ }
+
+ // libcgo.h sets CGO_TSAN if it detects TSAN support in the C compiler.
+ // Dump the preprocessor defines to check that that works.
+ // (Sometimes it doesn't: see https://golang.org/issue/15983.)
+ cmd, err := cc(c.cFlags...)
+ if err != nil {
+ return false, err
+ }
+ cmd.Args = append(cmd.Args, "-dM", "-E", "../../../src/runtime/cgo/libcgo.h")
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return false, fmt.Errorf("%#q exited with %v\n%s", strings.Join(cmd.Args, " "), err, out)
+ }
+ if !bytes.Contains(out, []byte("#define CGO_TSAN")) {
+ return true, fmt.Errorf("%#q did not define CGO_TSAN")
+ }
+ return false, nil
+}
+
+// srcPath returns the path to the given file relative to this test's source tree.
+func srcPath(path string) string {
+ return filepath.Join("src", path)
+}
+
+// A tempDir manages a temporary directory within a test.
+type tempDir struct {
+ base string
+}
+
+func (d *tempDir) RemoveAll(t *testing.T) {
+ t.Helper()
+ if d.base == "" {
+ return
+ }
+ if err := os.RemoveAll(d.base); err != nil {
+ t.Fatal("Failed to remove temp dir: %v", err)
+ }
+}
+
+func (d *tempDir) Join(name string) string {
+ return filepath.Join(d.base, name)
+}
+
+func newTempDir(t *testing.T) *tempDir {
+ t.Helper()
+ dir, err := ioutil.TempDir("", filepath.Dir(t.Name()))
+ if err != nil {
+ t.Fatalf("Failed to create temp dir: %v", err)
+ }
+ return &tempDir{base: dir}
+}
+
+// hangProneCmd returns an exec.Cmd for a command that is likely to hang.
+//
+// If one of these tests hangs, the caller is likely to kill the test process
+// using SIGINT, which will be sent to all of the processes in the test's group.
+// Unfortunately, TSAN in particular is prone to dropping signals, so the SIGINT
+// may terminate the test binary but leave the subprocess running. hangProneCmd
+// configures subprocess to receive SIGKILL instead to ensure that it won't
+// leak.
+func hangProneCmd(name string, arg ...string) *exec.Cmd {
+ cmd := exec.Command(name, arg...)
+ cmd.SysProcAttr = &syscall.SysProcAttr{
+ Pdeathsig: syscall.SIGKILL,
+ }
+ return cmd
+}
diff --git a/misc/cgo/testsanitizers/cshared_test.go b/misc/cgo/testsanitizers/cshared_test.go
new file mode 100644
index 00000000000..56063ea6201
--- /dev/null
+++ b/misc/cgo/testsanitizers/cshared_test.go
@@ -0,0 +1,74 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sanitizers_test
+
+import (
+ "fmt"
+ "io/ioutil"
+ "strings"
+ "testing"
+)
+
+func TestShared(t *testing.T) {
+ t.Parallel()
+ requireOvercommit(t)
+
+ GOOS, err := goEnv("GOOS")
+ if err != nil {
+ t.Fatal(err)
+ }
+ libExt := "so"
+ if GOOS == "darwin" {
+ libExt = "dylib"
+ }
+
+ cases := []struct {
+ src string
+ sanitizer string
+ }{
+ {
+ src: "msan_shared.go",
+ sanitizer: "memory",
+ },
+ {
+ src: "tsan_shared.go",
+ sanitizer: "thread",
+ },
+ }
+
+ for _, tc := range cases {
+ tc := tc
+ name := strings.TrimSuffix(tc.src, ".go")
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+ config := configure(tc.sanitizer)
+ config.skipIfCSanitizerBroken(t)
+
+ dir := newTempDir(t)
+ defer dir.RemoveAll(t)
+
+ lib := dir.Join(fmt.Sprintf("lib%s.%s", name, libExt))
+ mustRun(t, config.goCmd("build", "-buildmode=c-shared", "-o", lib, srcPath(tc.src)))
+
+ cSrc := dir.Join("main.c")
+ if err := ioutil.WriteFile(cSrc, cMain, 0600); err != nil {
+ t.Fatalf("failed to write C source file: %v", err)
+ }
+
+ dstBin := dir.Join(name)
+ cmd, err := cc(config.cFlags...)
+ if err != nil {
+ t.Fatal(err)
+ }
+ cmd.Args = append(cmd.Args, config.ldFlags...)
+ cmd.Args = append(cmd.Args, "-o", dstBin, cSrc, lib)
+ mustRun(t, cmd)
+
+ cmd = hangProneCmd(dstBin)
+ replaceEnv(cmd, "LD_LIBRARY_PATH", ".")
+ mustRun(t, cmd)
+ })
+ }
+}
diff --git a/misc/cgo/testsanitizers/msan_test.go b/misc/cgo/testsanitizers/msan_test.go
new file mode 100644
index 00000000000..af5afa9ee48
--- /dev/null
+++ b/misc/cgo/testsanitizers/msan_test.go
@@ -0,0 +1,55 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sanitizers_test
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestMSAN(t *testing.T) {
+ t.Parallel()
+ requireOvercommit(t)
+ config := configure("memory")
+ config.skipIfCSanitizerBroken(t)
+
+ mustRun(t, config.goCmd("build", "std"))
+
+ cases := []struct {
+ src string
+ wantErr bool
+ }{
+ {src: "msan.go"},
+ {src: "msan2.go"},
+ {src: "msan2_cmsan.go"},
+ {src: "msan3.go"},
+ {src: "msan4.go"},
+ {src: "msan5.go"},
+ {src: "msan_fail.go", wantErr: true},
+ }
+ for _, tc := range cases {
+ tc := tc
+ name := strings.TrimSuffix(tc.src, ".go")
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ dir := newTempDir(t)
+ defer dir.RemoveAll(t)
+
+ outPath := dir.Join(name)
+ mustRun(t, config.goCmd("build", "-o", outPath, srcPath(tc.src)))
+
+ cmd := hangProneCmd(outPath)
+ if tc.wantErr {
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return
+ }
+ t.Fatalf("%#q exited without error; want MSAN failure\n%s", strings.Join(cmd.Args, " "), out)
+ }
+ mustRun(t, cmd)
+ })
+ }
+}
diff --git a/misc/cgo/testsanitizers/msan.go b/misc/cgo/testsanitizers/src/msan.go
similarity index 100%
rename from misc/cgo/testsanitizers/msan.go
rename to misc/cgo/testsanitizers/src/msan.go
diff --git a/misc/cgo/testsanitizers/msan2.go b/misc/cgo/testsanitizers/src/msan2.go
similarity index 100%
rename from misc/cgo/testsanitizers/msan2.go
rename to misc/cgo/testsanitizers/src/msan2.go
diff --git a/misc/cgo/testsanitizers/src/msan2_cmsan.go b/misc/cgo/testsanitizers/src/msan2_cmsan.go
new file mode 100644
index 00000000000..8fdaea90c97
--- /dev/null
+++ b/misc/cgo/testsanitizers/src/msan2_cmsan.go
@@ -0,0 +1,38 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+/*
+#cgo LDFLAGS: -fsanitize=memory
+#cgo CPPFLAGS: -fsanitize=memory
+
+#include
+#include
+#include
+
+void f(int32_t *p, int n) {
+ int32_t * volatile q = (int32_t *)malloc(sizeof(int32_t) * n);
+ memcpy(p, q, n * sizeof(*p));
+ free(q);
+}
+
+void g(int32_t *p, int n) {
+ if (p[4] != 1) {
+ abort();
+ }
+}
+*/
+import "C"
+
+import (
+ "unsafe"
+)
+
+func main() {
+ a := make([]int32, 10)
+ C.f((*C.int32_t)(unsafe.Pointer(&a[0])), C.int(len(a)))
+ a[4] = 1
+ C.g((*C.int32_t)(unsafe.Pointer(&a[0])), C.int(len(a)))
+}
diff --git a/misc/cgo/testsanitizers/msan3.go b/misc/cgo/testsanitizers/src/msan3.go
similarity index 100%
rename from misc/cgo/testsanitizers/msan3.go
rename to misc/cgo/testsanitizers/src/msan3.go
diff --git a/misc/cgo/testsanitizers/msan4.go b/misc/cgo/testsanitizers/src/msan4.go
similarity index 100%
rename from misc/cgo/testsanitizers/msan4.go
rename to misc/cgo/testsanitizers/src/msan4.go
diff --git a/misc/cgo/testsanitizers/msan5.go b/misc/cgo/testsanitizers/src/msan5.go
similarity index 100%
rename from misc/cgo/testsanitizers/msan5.go
rename to misc/cgo/testsanitizers/src/msan5.go
diff --git a/misc/cgo/testsanitizers/msan_fail.go b/misc/cgo/testsanitizers/src/msan_fail.go
similarity index 100%
rename from misc/cgo/testsanitizers/msan_fail.go
rename to misc/cgo/testsanitizers/src/msan_fail.go
diff --git a/misc/cgo/testsanitizers/msan_shared.go b/misc/cgo/testsanitizers/src/msan_shared.go
similarity index 100%
rename from misc/cgo/testsanitizers/msan_shared.go
rename to misc/cgo/testsanitizers/src/msan_shared.go
diff --git a/misc/cgo/testsanitizers/tsan.go b/misc/cgo/testsanitizers/src/tsan.go
similarity index 100%
rename from misc/cgo/testsanitizers/tsan.go
rename to misc/cgo/testsanitizers/src/tsan.go
diff --git a/misc/cgo/testsanitizers/tsan10.go b/misc/cgo/testsanitizers/src/tsan10.go
similarity index 100%
rename from misc/cgo/testsanitizers/tsan10.go
rename to misc/cgo/testsanitizers/src/tsan10.go
diff --git a/misc/cgo/testsanitizers/tsan11.go b/misc/cgo/testsanitizers/src/tsan11.go
similarity index 100%
rename from misc/cgo/testsanitizers/tsan11.go
rename to misc/cgo/testsanitizers/src/tsan11.go
diff --git a/misc/cgo/testsanitizers/tsan12.go b/misc/cgo/testsanitizers/src/tsan12.go
similarity index 100%
rename from misc/cgo/testsanitizers/tsan12.go
rename to misc/cgo/testsanitizers/src/tsan12.go
diff --git a/misc/cgo/testsanitizers/tsan2.go b/misc/cgo/testsanitizers/src/tsan2.go
similarity index 100%
rename from misc/cgo/testsanitizers/tsan2.go
rename to misc/cgo/testsanitizers/src/tsan2.go
diff --git a/misc/cgo/testsanitizers/tsan3.go b/misc/cgo/testsanitizers/src/tsan3.go
similarity index 100%
rename from misc/cgo/testsanitizers/tsan3.go
rename to misc/cgo/testsanitizers/src/tsan3.go
diff --git a/misc/cgo/testsanitizers/tsan4.go b/misc/cgo/testsanitizers/src/tsan4.go
similarity index 100%
rename from misc/cgo/testsanitizers/tsan4.go
rename to misc/cgo/testsanitizers/src/tsan4.go
diff --git a/misc/cgo/testsanitizers/tsan5.go b/misc/cgo/testsanitizers/src/tsan5.go
similarity index 100%
rename from misc/cgo/testsanitizers/tsan5.go
rename to misc/cgo/testsanitizers/src/tsan5.go
diff --git a/misc/cgo/testsanitizers/tsan6.go b/misc/cgo/testsanitizers/src/tsan6.go
similarity index 100%
rename from misc/cgo/testsanitizers/tsan6.go
rename to misc/cgo/testsanitizers/src/tsan6.go
diff --git a/misc/cgo/testsanitizers/tsan7.go b/misc/cgo/testsanitizers/src/tsan7.go
similarity index 100%
rename from misc/cgo/testsanitizers/tsan7.go
rename to misc/cgo/testsanitizers/src/tsan7.go
diff --git a/misc/cgo/testsanitizers/tsan8.go b/misc/cgo/testsanitizers/src/tsan8.go
similarity index 100%
rename from misc/cgo/testsanitizers/tsan8.go
rename to misc/cgo/testsanitizers/src/tsan8.go
diff --git a/misc/cgo/testsanitizers/tsan9.go b/misc/cgo/testsanitizers/src/tsan9.go
similarity index 100%
rename from misc/cgo/testsanitizers/tsan9.go
rename to misc/cgo/testsanitizers/src/tsan9.go
diff --git a/misc/cgo/testsanitizers/tsan_shared.go b/misc/cgo/testsanitizers/src/tsan_shared.go
similarity index 100%
rename from misc/cgo/testsanitizers/tsan_shared.go
rename to misc/cgo/testsanitizers/src/tsan_shared.go
diff --git a/misc/cgo/testsanitizers/test.bash b/misc/cgo/testsanitizers/test.bash
deleted file mode 100755
index 9f80af6c507..00000000000
--- a/misc/cgo/testsanitizers/test.bash
+++ /dev/null
@@ -1,233 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2015 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-# This directory is intended to test the use of Go with sanitizers
-# like msan, asan, etc. See https://github.com/google/sanitizers .
-
-set -e
-
-# The sanitizers were originally developed with clang, so prefer it.
-CC=cc
-if test -x "$(type -p clang)"; then
- CC=clang
-fi
-export CC
-
-if [ "$(sysctl -n vm.overcommit_memory)" = 2 ]; then
- echo "skipping msan/tsan tests: vm.overcommit_memory=2" >&2
- exit 0
-fi
-
-msan=yes
-
-TMPDIR=${TMPDIR:-/tmp}
-echo 'int main() { return 0; }' > ${TMPDIR}/testsanitizers$$.c
-if $CC -fsanitize=memory -o ${TMPDIR}/testsanitizers$$ ${TMPDIR}/testsanitizers$$.c 2>&1 | grep "unrecognized" >& /dev/null; then
- echo "skipping msan tests: $CC -fsanitize=memory not supported"
- msan=no
-elif ! test -x ${TMPDIR}/testsanitizers$$; then
- echo "skipping msan tests: $CC -fsanitize-memory did not generate an executable"
- msan=no
-elif ! ${TMPDIR}/testsanitizers$$ >/dev/null 2>&1; then
- echo "skipping msan tests: $CC -fsanitize-memory generates broken executable"
- msan=no
-fi
-rm -f ${TMPDIR}/testsanitizers$$.*
-
-tsan=yes
-
-# The memory and thread sanitizers in versions of clang before 3.6
-# don't work with Go.
-if test "$msan" = "yes" && $CC --version | grep clang >& /dev/null; then
- ver=$($CC --version | sed -e 's/.* version \([0-9.-]*\).*/\1/')
- major=$(echo $ver | sed -e 's/\([0-9]*\).*/\1/')
- minor=$(echo $ver | sed -e 's/[0-9]*\.\([0-9]*\).*/\1/')
- if test "$major" -lt 3 || test "$major" -eq 3 -a "$minor" -lt 6; then
- echo "skipping msan/tsan tests: clang version $major.$minor (older than 3.6)"
- msan=no
- tsan=no
- fi
-
- # Clang before 3.8 does not work with Linux at or after 4.1.
- # golang.org/issue/12898.
- if test "$msan" = "yes" -a "$major" -lt 3 || test "$major" -eq 3 -a "$minor" -lt 8; then
- if test "$(uname)" = Linux; then
- linuxver=$(uname -r)
- linuxmajor=$(echo $linuxver | sed -e 's/\([0-9]*\).*/\1/')
- linuxminor=$(echo $linuxver | sed -e 's/[0-9]*\.\([0-9]*\).*/\1/')
- if test "$linuxmajor" -gt 4 || test "$linuxmajor" -eq 4 -a "$linuxminor" -ge 1; then
- echo "skipping msan/tsan tests: clang version $major.$minor (older than 3.8) incompatible with linux version $linuxmajor.$linuxminor (4.1 or newer)"
- msan=no
- tsan=no
- fi
- fi
- fi
-fi
-
-status=0
-
-testmsanshared() {
- goos=$(go env GOOS)
- suffix="-installsuffix testsanitizers"
- libext="so"
- if [ "$goos" = "darwin" ]; then
- libext="dylib"
- fi
- go build -msan -buildmode=c-shared $suffix -o ${TMPDIR}/libmsanshared.$libext msan_shared.go
-
- echo 'int main() { return 0; }' > ${TMPDIR}/testmsanshared.c
- $CC $(go env GOGCCFLAGS) -fsanitize=memory -o ${TMPDIR}/testmsanshared ${TMPDIR}/testmsanshared.c ${TMPDIR}/libmsanshared.$libext
-
- if ! LD_LIBRARY_PATH=. ${TMPDIR}/testmsanshared; then
- echo "FAIL: msan_shared"
- status=1
- fi
- rm -f ${TMPDIR}/{testmsanshared,testmsanshared.c,libmsanshared.$libext}
-}
-
-if test "$msan" = "yes"; then
- if ! go build -msan std; then
- echo "FAIL: build -msan std"
- status=1
- fi
-
- if ! go run -msan msan.go; then
- echo "FAIL: msan"
- status=1
- fi
-
- if ! CGO_LDFLAGS="-fsanitize=memory" CGO_CPPFLAGS="-fsanitize=memory" go run -msan -a msan2.go; then
- echo "FAIL: msan2 with -fsanitize=memory"
- status=1
- fi
-
- if ! go run -msan -a msan2.go; then
- echo "FAIL: msan2"
- status=1
- fi
-
- if ! go run -msan msan3.go; then
- echo "FAIL: msan3"
- status=1
- fi
-
- if ! go run -msan msan4.go; then
- echo "FAIL: msan4"
- status=1
- fi
-
- if ! go run -msan msan5.go; then
- echo "FAIL: msan5"
- status=1
- fi
-
- if go run -msan msan_fail.go 2>/dev/null; then
- echo "FAIL: msan_fail"
- status=1
- fi
-
- testmsanshared
-fi
-
-testtsanshared() {
- goos=$(go env GOOS)
- suffix="-installsuffix tsan"
- libext="so"
- if [ "$goos" = "darwin" ]; then
- libext="dylib"
- fi
- go build -buildmode=c-shared $suffix -o ${TMPDIR}/libtsanshared.$libext tsan_shared.go
-
- echo 'int main() { return 0; }' > ${TMPDIR}/testtsanshared.c
- $CC $(go env GOGCCFLAGS) -fsanitize=thread -o ${TMPDIR}/testtsanshared ${TMPDIR}/testtsanshared.c ${TMPDIR}/libtsanshared.$libext
-
- if ! LD_LIBRARY_PATH=. ${TMPDIR}/testtsanshared; then
- echo "FAIL: tsan_shared"
- status=1
- fi
- rm -f ${TMPDIR}/{testtsanshared,testtsanshared.c,libtsanshared.$libext}
-}
-
-if test "$tsan" = "yes"; then
- echo 'int main() { return 0; }' > ${TMPDIR}/testsanitizers$$.c
- ok=yes
- if ! $CC -fsanitize=thread ${TMPDIR}/testsanitizers$$.c -o ${TMPDIR}/testsanitizers$$ &> ${TMPDIR}/testsanitizers$$.err; then
- ok=no
- fi
- if grep "unrecognized" ${TMPDIR}/testsanitizers$$.err >& /dev/null; then
- echo "skipping tsan tests: -fsanitize=thread not supported"
- tsan=no
- elif test "$ok" != "yes"; then
- cat ${TMPDIR}/testsanitizers$$.err
- echo "skipping tsan tests: -fsanitizer=thread build failed"
- tsan=no
- elif ! ${TMPDIR}/testsanitizers$$ 2>&1; then
- echo "skipping tsan tests: running tsan program failed"
- tsan=no
- fi
- rm -f ${TMPDIR}/testsanitizers$$*
-fi
-
-# Run a TSAN test.
-# $1 test name
-# $2 environment variables
-# $3 go run args
-testtsan() {
- err=${TMPDIR}/tsanerr$$.out
- if ! env $2 go run $3 $1 2>$err; then
- cat $err
- echo "FAIL: $1"
- status=1
- elif grep -i warning $err >/dev/null 2>&1; then
- cat $err
- echo "FAIL: $1"
- status=1
- fi
- rm -f $err
-}
-
-if test "$tsan" = "yes"; then
- testtsan tsan.go
- testtsan tsan2.go
- testtsan tsan3.go
- testtsan tsan4.go
- testtsan tsan8.go
- testtsan tsan9.go
-
- # These tests are only reliable using clang or GCC version 7 or later.
- # Otherwise runtime/cgo/libcgo.h can't tell whether TSAN is in use.
- ok=false
- clang=false
- if ${CC} --version | grep clang >/dev/null 2>&1; then
- ok=true
- clang=true
- else
- ver=$($CC -dumpversion)
- major=$(echo $ver | sed -e 's/\([0-9]*\).*/\1/')
- if test "$major" -lt 7; then
- echo "skipping remaining TSAN tests: GCC version $major (older than 7)"
- else
- ok=true
- fi
- fi
-
- if test "$ok" = "true"; then
- # These tests require rebuilding os/user with -fsanitize=thread.
- testtsan tsan5.go "CGO_CFLAGS=-fsanitize=thread CGO_LDFLAGS=-fsanitize=thread" "-installsuffix=tsan"
- testtsan tsan6.go "CGO_CFLAGS=-fsanitize=thread CGO_LDFLAGS=-fsanitize=thread" "-installsuffix=tsan"
- testtsan tsan7.go "CGO_CFLAGS=-fsanitize=thread CGO_LDFLAGS=-fsanitize=thread" "-installsuffix=tsan"
-
- # The remaining tests reportedly hang when built with GCC; issue #21196.
- if test "$clang" = "true"; then
- testtsan tsan10.go "CGO_CFLAGS=-fsanitize=thread CGO_LDFLAGS=-fsanitize=thread" "-installsuffix=tsan"
- testtsan tsan11.go "CGO_CFLAGS=-fsanitize=thread CGO_LDFLAGS=-fsanitize=thread" "-installsuffix=tsan"
- testtsan tsan12.go "CGO_CFLAGS=-fsanitize=thread CGO_LDFLAGS=-fsanitize=thread" "-installsuffix=tsan"
- fi
-
- testtsanshared
- fi
-fi
-
-exit $status
diff --git a/misc/cgo/testsanitizers/tsan_test.go b/misc/cgo/testsanitizers/tsan_test.go
new file mode 100644
index 00000000000..ec4e0033fb4
--- /dev/null
+++ b/misc/cgo/testsanitizers/tsan_test.go
@@ -0,0 +1,56 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sanitizers_test
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestTSAN(t *testing.T) {
+ t.Parallel()
+ requireOvercommit(t)
+ config := configure("thread")
+ config.skipIfCSanitizerBroken(t)
+
+ mustRun(t, config.goCmd("build", "std"))
+
+ cases := []struct {
+ src string
+ needsRuntime bool
+ }{
+ {src: "tsan.go"},
+ {src: "tsan2.go"},
+ {src: "tsan3.go"},
+ {src: "tsan4.go"},
+ {src: "tsan5.go", needsRuntime: true},
+ {src: "tsan6.go", needsRuntime: true},
+ {src: "tsan7.go", needsRuntime: true},
+ {src: "tsan8.go"},
+ {src: "tsan9.go"},
+ {src: "tsan10.go", needsRuntime: true},
+ {src: "tsan11.go", needsRuntime: true},
+ {src: "tsan12.go", needsRuntime: true},
+ }
+ for _, tc := range cases {
+ tc := tc
+ name := strings.TrimSuffix(tc.src, ".go")
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ dir := newTempDir(t)
+ defer dir.RemoveAll(t)
+
+ outPath := dir.Join(name)
+ mustRun(t, config.goCmd("build", "-o", outPath, srcPath(tc.src)))
+
+ cmd := hangProneCmd(outPath)
+ if tc.needsRuntime {
+ config.skipIfRuntimeIncompatible(t)
+ }
+ mustRun(t, cmd)
+ })
+ }
+}
diff --git a/misc/cgo/testshared/shared_test.go b/misc/cgo/testshared/shared_test.go
index 9e682a2fb59..f1e8f0605b6 100644
--- a/misc/cgo/testshared/shared_test.go
+++ b/misc/cgo/testshared/shared_test.go
@@ -10,6 +10,7 @@ import (
"debug/elf"
"encoding/binary"
"errors"
+ "flag"
"fmt"
"go/build"
"io"
@@ -46,7 +47,7 @@ func run(t *testing.T, msg string, args ...string) {
func goCmd(t *testing.T, args ...string) {
newargs := []string{args[0], "-installsuffix=" + suffix}
if testing.Verbose() {
- newargs = append(newargs, "-v")
+ newargs = append(newargs, "-x")
}
newargs = append(newargs, args[1:]...)
c := exec.Command("go", newargs...)
@@ -57,6 +58,7 @@ func goCmd(t *testing.T, args ...string) {
c.Stdout = os.Stdout
c.Stderr = os.Stderr
err = c.Run()
+ output = []byte("(output above)")
} else {
output, err = c.CombinedOutput()
}
@@ -161,6 +163,8 @@ func testMain(m *testing.M) (int, error) {
}
func TestMain(m *testing.M) {
+ flag.Parse()
+
// Some of the tests install binaries into a custom GOPATH.
// That won't work if GOBIN is set.
os.Unsetenv("GOBIN")
@@ -461,13 +465,13 @@ func TestGopathShlib(t *testing.T) {
// that is not mapped into memory.
func testPkgListNote(t *testing.T, f *elf.File, note *note) {
if note.section.Flags != 0 {
- t.Errorf("package list section has flags %v", note.section.Flags)
+ t.Errorf("package list section has flags %v, want 0", note.section.Flags)
}
if isOffsetLoaded(f, note.section.Offset) {
t.Errorf("package list section contained in PT_LOAD segment")
}
if note.desc != "depBase\n" {
- t.Errorf("incorrect package list %q", note.desc)
+ t.Errorf("incorrect package list %q, want %q", note.desc, "depBase\n")
}
}
@@ -476,7 +480,7 @@ func testPkgListNote(t *testing.T, f *elf.File, note *note) {
// bytes into it.
func testABIHashNote(t *testing.T, f *elf.File, note *note) {
if note.section.Flags != elf.SHF_ALLOC {
- t.Errorf("abi hash section has flags %v", note.section.Flags)
+ t.Errorf("abi hash section has flags %v, want SHF_ALLOC", note.section.Flags)
}
if !isOffsetLoaded(f, note.section.Offset) {
t.Errorf("abihash section not contained in PT_LOAD segment")
@@ -497,13 +501,13 @@ func testABIHashNote(t *testing.T, f *elf.File, note *note) {
return
}
if elf.ST_BIND(hashbytes.Info) != elf.STB_LOCAL {
- t.Errorf("%s has incorrect binding %v", hashbytes.Name, elf.ST_BIND(hashbytes.Info))
+ t.Errorf("%s has incorrect binding %v, want STB_LOCAL", hashbytes.Name, elf.ST_BIND(hashbytes.Info))
}
if f.Sections[hashbytes.Section] != note.section {
- t.Errorf("%s has incorrect section %v", hashbytes.Name, f.Sections[hashbytes.Section].Name)
+ t.Errorf("%s has incorrect section %v, want %s", hashbytes.Name, f.Sections[hashbytes.Section].Name, note.section.Name)
}
if hashbytes.Value-note.section.Addr != 16 {
- t.Errorf("%s has incorrect offset into section %d", hashbytes.Name, hashbytes.Value-note.section.Addr)
+ t.Errorf("%s has incorrect offset into section %d, want 16", hashbytes.Name, hashbytes.Value-note.section.Addr)
}
}
@@ -511,14 +515,14 @@ func testABIHashNote(t *testing.T, f *elf.File, note *note) {
// was linked against in an unmapped section.
func testDepsNote(t *testing.T, f *elf.File, note *note) {
if note.section.Flags != 0 {
- t.Errorf("package list section has flags %v", note.section.Flags)
+ t.Errorf("package list section has flags %v, want 0", note.section.Flags)
}
if isOffsetLoaded(f, note.section.Offset) {
t.Errorf("package list section contained in PT_LOAD segment")
}
// libdepBase.so just links against the lib containing the runtime.
if note.desc != soname {
- t.Errorf("incorrect dependency list %q", note.desc)
+ t.Errorf("incorrect dependency list %q, want %q", note.desc, soname)
}
}
@@ -556,7 +560,7 @@ func TestNotes(t *testing.T) {
abiHashNoteFound = true
case 3: // ELF_NOTE_GODEPS_TAG
if depsNoteFound {
- t.Error("multiple abi hash notes")
+ t.Error("multiple depedency list notes")
}
testDepsNote(t, f, note)
depsNoteFound = true
@@ -594,6 +598,7 @@ func TestThreeGopathShlibs(t *testing.T) {
// If gccgo is not available or not new enough call t.Skip. Otherwise,
// return a build.Context that is set up for gccgo.
func prepGccgo(t *testing.T) build.Context {
+ t.Skip("golang.org/issue/22472")
gccgoName := os.Getenv("GCCGO")
if gccgoName == "" {
gccgoName = "gccgo"
@@ -643,6 +648,8 @@ func TestGoPathShlibGccgo(t *testing.T) {
// library with gccgo, another GOPATH package that depends on the first and an
// executable that links the second library.
func TestTwoGopathShlibsGccgo(t *testing.T) {
+ t.Skip("golang.org/issue/22224")
+
gccgoContext := prepGccgo(t)
libgoRE := regexp.MustCompile("libgo.so.[0-9]+")
@@ -696,18 +703,55 @@ func resetFileStamps() {
reset(gorootInstallDir)
}
-// touch makes path newer than the "old" time stamp used by resetFileStamps.
-func touch(path string) {
+// touch changes path and returns a function that changes it back.
+// It also sets the time of the file, so that we can see if it is rewritten.
+func touch(t *testing.T, path string) (cleanup func()) {
+ data, err := ioutil.ReadFile(path)
+ if err != nil {
+ t.Fatal(err)
+ }
+ old := make([]byte, len(data))
+ copy(old, data)
+ if bytes.HasPrefix(data, []byte("!\n")) {
+ // Change last digit of build ID.
+ // (Content ID in the new content-based build IDs.)
+ const marker = `build id "`
+ i := bytes.Index(data, []byte(marker))
+ if i < 0 {
+ t.Fatal("cannot find build id in archive")
+ }
+ j := bytes.IndexByte(data[i+len(marker):], '"')
+ if j < 0 {
+ t.Fatal("cannot find build id in archive")
+ }
+ i += len(marker) + j - 1
+ if data[i] == 'a' {
+ data[i] = 'b'
+ } else {
+ data[i] = 'a'
+ }
+ } else {
+ // assume it's a text file
+ data = append(data, '\n')
+ }
+ if err := ioutil.WriteFile(path, data, 0666); err != nil {
+ t.Fatal(err)
+ }
if err := os.Chtimes(path, nearlyNew, nearlyNew); err != nil {
- log.Fatalf("os.Chtimes failed: %v", err)
+ t.Fatal(err)
+ }
+ return func() {
+ if err := ioutil.WriteFile(path, old, 0666); err != nil {
+ t.Fatal(err)
+ }
}
}
// isNew returns if the path is newer than the time stamp used by touch.
-func isNew(path string) bool {
+func isNew(t *testing.T, path string) bool {
fi, err := os.Stat(path)
if err != nil {
- log.Fatalf("os.Stat failed: %v", err)
+ t.Fatal(err)
}
return fi.ModTime().After(stampTime)
}
@@ -715,14 +759,16 @@ func isNew(path string) bool {
// Fail unless path has been rebuilt (i.e. is newer than the time stamp used by
// isNew)
func AssertRebuilt(t *testing.T, msg, path string) {
- if !isNew(path) {
+ t.Helper()
+ if !isNew(t, path) {
t.Errorf("%s was not rebuilt (%s)", msg, path)
}
}
// Fail if path has been rebuilt (i.e. is newer than the time stamp used by isNew)
func AssertNotRebuilt(t *testing.T, msg, path string) {
- if isNew(path) {
+ t.Helper()
+ if isNew(t, path) {
t.Errorf("%s was rebuilt (%s)", msg, path)
}
}
@@ -732,41 +778,55 @@ func TestRebuilding(t *testing.T) {
goCmd(t, "install", "-linkshared", "exe")
// If the source is newer than both the .a file and the .so, both are rebuilt.
- resetFileStamps()
- touch("src/depBase/dep.go")
- goCmd(t, "install", "-linkshared", "exe")
- AssertRebuilt(t, "new source", filepath.Join(gopathInstallDir, "depBase.a"))
- AssertRebuilt(t, "new source", filepath.Join(gopathInstallDir, "libdepBase.so"))
+ t.Run("newsource", func(t *testing.T) {
+ resetFileStamps()
+ cleanup := touch(t, "src/depBase/dep.go")
+ defer func() {
+ cleanup()
+ goCmd(t, "install", "-linkshared", "exe")
+ }()
+ goCmd(t, "install", "-linkshared", "exe")
+ AssertRebuilt(t, "new source", filepath.Join(gopathInstallDir, "depBase.a"))
+ AssertRebuilt(t, "new source", filepath.Join(gopathInstallDir, "libdepBase.so"))
+ })
// If the .a file is newer than the .so, the .so is rebuilt (but not the .a)
- resetFileStamps()
- touch(filepath.Join(gopathInstallDir, "depBase.a"))
- goCmd(t, "install", "-linkshared", "exe")
- AssertNotRebuilt(t, "new .a file", filepath.Join(gopathInstallDir, "depBase.a"))
- AssertRebuilt(t, "new .a file", filepath.Join(gopathInstallDir, "libdepBase.so"))
+ t.Run("newarchive", func(t *testing.T) {
+ resetFileStamps()
+ goCmd(t, "list", "-linkshared", "-f={{.ImportPath}} {{.Stale}} {{.StaleReason}} {{.Target}}", "depBase")
+ AssertNotRebuilt(t, "new .a file before build", filepath.Join(gopathInstallDir, "depBase.a"))
+ cleanup := touch(t, filepath.Join(gopathInstallDir, "depBase.a"))
+ defer func() {
+ cleanup()
+ goCmd(t, "install", "-v", "-linkshared", "exe")
+ }()
+ goCmd(t, "install", "-v", "-linkshared", "exe")
+ AssertNotRebuilt(t, "new .a file", filepath.Join(gopathInstallDir, "depBase.a"))
+ AssertRebuilt(t, "new .a file", filepath.Join(gopathInstallDir, "libdepBase.so"))
+ })
}
-func appendFile(path, content string) {
+func appendFile(t *testing.T, path, content string) {
f, err := os.OpenFile(path, os.O_WRONLY|os.O_APPEND, 0660)
if err != nil {
- log.Fatalf("os.OpenFile failed: %v", err)
+ t.Fatalf("os.OpenFile failed: %v", err)
}
defer func() {
err := f.Close()
if err != nil {
- log.Fatalf("f.Close failed: %v", err)
+ t.Fatalf("f.Close failed: %v", err)
}
}()
_, err = f.WriteString(content)
if err != nil {
- log.Fatalf("f.WriteString failed: %v", err)
+ t.Fatalf("f.WriteString failed: %v", err)
}
}
-func writeFile(path, content string) {
+func writeFile(t *testing.T, path, content string) {
err := ioutil.WriteFile(path, []byte(content), 0644)
if err != nil {
- log.Fatalf("ioutil.WriteFile failed: %v", err)
+ t.Fatalf("ioutil.WriteFile failed: %v", err)
}
}
@@ -780,7 +840,7 @@ func TestABIChecking(t *testing.T) {
// some senses but suffices for the narrow definition of ABI compatibility the
// toolchain uses today.
resetFileStamps()
- appendFile("src/depBase/dep.go", "func ABIBreak() {}\n")
+ appendFile(t, "src/depBase/dep.go", "func ABIBreak() {}\n")
goCmd(t, "install", "-buildmode=shared", "-linkshared", "depBase")
c := exec.Command("./bin/exe")
output, err := c.CombinedOutput()
@@ -811,7 +871,7 @@ func TestABIChecking(t *testing.T) {
// function) and rebuild libdepBase.so, exe still works, even if new function
// is in a file by itself.
resetFileStamps()
- writeFile("src/depBase/dep2.go", "package depBase\nfunc noABIBreak() {}\n")
+ writeFile(t, "src/depBase/dep2.go", "package depBase\nfunc noABIBreak() {}\n")
goCmd(t, "install", "-buildmode=shared", "-linkshared", "depBase")
run(t, "after non-ABI breaking change", "./bin/exe")
}
@@ -838,3 +898,12 @@ func TestInterface(t *testing.T) {
goCmd(t, "install", "-linkshared", "iface")
run(t, "running type/itab uniqueness tester", "./bin/iface")
}
+
+// Access a global variable from a library.
+func TestGlobal(t *testing.T) {
+ goCmd(t, "install", "-buildmode=shared", "-linkshared", "globallib")
+ goCmd(t, "install", "-linkshared", "global")
+ run(t, "global executable", "./bin/global")
+ AssertIsLinkedTo(t, "./bin/global", soname)
+ AssertHasRPath(t, "./bin/global", gorootInstallDir)
+}
diff --git a/misc/cgo/testshared/src/depBase/dep.go b/misc/cgo/testshared/src/depBase/dep.go
index 9f86710db01..569c210aa14 100644
--- a/misc/cgo/testshared/src/depBase/dep.go
+++ b/misc/cgo/testshared/src/depBase/dep.go
@@ -22,7 +22,7 @@ type Dep struct {
func (d *Dep) Method() int {
// This code below causes various go.itab.* symbols to be generated in
// the shared library. Similar code in ../exe/exe.go results in
- // exercising https://github.com/golang/go/issues/17594
+ // exercising https://golang.org/issues/17594
reflect.TypeOf(os.Stdout).Elem()
return 10
}
diff --git a/misc/cgo/testshared/src/division/division.go b/misc/cgo/testshared/src/division/division.go
index a0b11a55e22..bb5fc984602 100644
--- a/misc/cgo/testshared/src/division/division.go
+++ b/misc/cgo/testshared/src/division/division.go
@@ -14,4 +14,4 @@ func main() {
if a != 8 {
panic("FAIL")
}
-}
\ No newline at end of file
+}
diff --git a/misc/cgo/testshared/src/exe/exe.go b/misc/cgo/testshared/src/exe/exe.go
index 84302a811f0..bd864d88ad8 100644
--- a/misc/cgo/testshared/src/exe/exe.go
+++ b/misc/cgo/testshared/src/exe/exe.go
@@ -25,7 +25,7 @@ func main() {
defer depBase.ImplementedInAsm()
// This code below causes various go.itab.* symbols to be generated in
// the executable. Similar code in ../depBase/dep.go results in
- // exercising https://github.com/golang/go/issues/17594
+ // exercising https://golang.org/issues/17594
reflect.TypeOf(os.Stdout).Elem()
runtime.GC()
depBase.V = depBase.F() + 1
diff --git a/misc/cgo/testshared/src/global/main.go b/misc/cgo/testshared/src/global/main.go
new file mode 100644
index 00000000000..94e7f247dee
--- /dev/null
+++ b/misc/cgo/testshared/src/global/main.go
@@ -0,0 +1,71 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "globallib"
+)
+
+//go:noinline
+func testLoop() {
+ for i, s := range globallib.Data {
+ if s != int64(i) {
+ panic("testLoop: mismatch")
+ }
+ }
+}
+
+//go:noinline
+func ptrData() *[1<<20 + 10]int64 {
+ return &globallib.Data
+}
+
+//go:noinline
+func testMediumOffset() {
+ for i, s := range globallib.Data[1<<16-2:] {
+ if s != int64(i)+1<<16-2 {
+ panic("testMediumOffset: index mismatch")
+ }
+ }
+
+ x := globallib.Data[1<<16-1]
+ if x != 1<<16-1 {
+ panic("testMediumOffset: direct mismatch")
+ }
+
+ y := &globallib.Data[1<<16-3]
+ if y != &ptrData()[1<<16-3] {
+ panic("testMediumOffset: address mismatch")
+ }
+}
+
+//go:noinline
+func testLargeOffset() {
+ for i, s := range globallib.Data[1<<20:] {
+ if s != int64(i)+1<<20 {
+ panic("testLargeOffset: index mismatch")
+ }
+ }
+
+ x := globallib.Data[1<<20+1]
+ if x != 1<<20+1 {
+ panic("testLargeOffset: direct mismatch")
+ }
+
+ y := &globallib.Data[1<<20+2]
+ if y != &ptrData()[1<<20+2] {
+ panic("testLargeOffset: address mismatch")
+ }
+}
+
+func main() {
+ testLoop()
+
+ // SSA rules commonly merge offsets into addresses. These
+ // tests access global data in different ways to try
+ // and exercise different SSA rules.
+ testMediumOffset()
+ testLargeOffset()
+}
diff --git a/misc/cgo/testshared/src/globallib/global.go b/misc/cgo/testshared/src/globallib/global.go
new file mode 100644
index 00000000000..b4372a2e9e2
--- /dev/null
+++ b/misc/cgo/testshared/src/globallib/global.go
@@ -0,0 +1,17 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package globallib
+
+// Data is large enough to that offsets into it do not fit into
+// 16-bit or 20-bit immediates. Ideally we'd also try and overrun
+// 32-bit immediates, but that requires the test machine to have
+// too much memory.
+var Data [1<<20 + 10]int64
+
+func init() {
+ for i := range Data {
+ Data[i] = int64(i)
+ }
+}
diff --git a/misc/ios/go_darwin_arm_exec.go b/misc/ios/go_darwin_arm_exec.go
index e84e513f933..56dbb009a18 100644
--- a/misc/ios/go_darwin_arm_exec.go
+++ b/misc/ios/go_darwin_arm_exec.go
@@ -49,6 +49,7 @@ var (
appID string
teamID string
bundleID string
+ deviceID string
)
// lock is a file lock to serialize iOS runs. It is global to avoid the
@@ -77,6 +78,9 @@ func main() {
// https://developer.apple.com/membercenter/index.action#accountSummary as Team ID.
teamID = getenv("GOIOS_TEAM_ID")
+ // Device IDs as listed with ios-deploy -c.
+ deviceID = os.Getenv("GOIOS_DEVICE_ID")
+
parts := strings.SplitN(appID, ".", 2)
// For compatibility with the old builders, use a fallback bundle ID
bundleID = "golang.gotest"
@@ -96,7 +100,7 @@ func main() {
//
// The lock file is never deleted, to avoid concurrent locks on distinct
// files with the same path.
- lockName := filepath.Join(os.TempDir(), "go_darwin_arm_exec.lock")
+ lockName := filepath.Join(os.TempDir(), "go_darwin_arm_exec-"+deviceID+".lock")
lock, err = os.OpenFile(lockName, os.O_CREATE|os.O_RDONLY, 0666)
if err != nil {
log.Fatal(err)
@@ -228,6 +232,16 @@ func run(bin string, args []string) (err error) {
os.Stdout.Write(b)
}()
+ cond := func(out *buf) bool {
+ i0 := s.out.LastIndex([]byte("(lldb)"))
+ i1 := s.out.LastIndex([]byte("fruitstrap"))
+ i2 := s.out.LastIndex([]byte(" connect"))
+ return i0 > 0 && i1 > 0 && i2 > 0
+ }
+ if err := s.wait("lldb start", cond, 15*time.Second); err != nil {
+ panic(waitPanic{err})
+ }
+
// Script LLDB. Oh dear.
s.do(`process handle SIGHUP --stop false --pass true --notify false`)
s.do(`process handle SIGPIPE --stop false --pass true --notify false`)
@@ -294,7 +308,7 @@ func newSession(appdir string, args []string, opts options) (*lldbSession, error
if err != nil {
return nil, err
}
- s.cmd = exec.Command(
+ cmdArgs := []string{
// lldb tries to be clever with terminals.
// So we wrap it in script(1) and be clever
// right back at it.
@@ -307,9 +321,13 @@ func newSession(appdir string, args []string, opts options) (*lldbSession, error
"-u",
"-r",
"-n",
- `--args=`+strings.Join(args, " ")+``,
+ `--args=` + strings.Join(args, " ") + ``,
"--bundle", appdir,
- )
+ }
+ if deviceID != "" {
+ cmdArgs = append(cmdArgs, "--id", deviceID)
+ }
+ s.cmd = exec.Command(cmdArgs[0], cmdArgs[1:]...)
if debug {
log.Println(strings.Join(s.cmd.Args, " "))
}
@@ -340,15 +358,6 @@ func newSession(appdir string, args []string, opts options) (*lldbSession, error
s.exited <- s.cmd.Wait()
}()
- cond := func(out *buf) bool {
- i0 := s.out.LastIndex([]byte("(lldb)"))
- i1 := s.out.LastIndex([]byte("fruitstrap"))
- i2 := s.out.LastIndex([]byte(" connect"))
- return i0 > 0 && i1 > 0 && i2 > 0
- }
- if err := s.wait("lldb start", cond, 15*time.Second); err != nil {
- panic(waitPanic{err})
- }
return s, nil
}
@@ -377,6 +386,9 @@ func (s *lldbSession) wait(reason string, cond func(out *buf) bool, extraTimeout
}
return fmt.Errorf("test timeout (%s)", reason)
case <-doTimedout:
+ if p := s.cmd.Process; p != nil {
+ p.Kill()
+ }
return fmt.Errorf("command timeout (%s for %v)", reason, doTimeout)
case err := <-s.exited:
return fmt.Errorf("exited (%s: %v)", reason, err)
diff --git a/misc/nacl/testzip.proto b/misc/nacl/testzip.proto
index 8bf25400cbc..f15a2ab2246 100644
--- a/misc/nacl/testzip.proto
+++ b/misc/nacl/testzip.proto
@@ -22,6 +22,9 @@ go src=..
internal
syntax
parser.go
+ cover
+ testdata
+ +
doc
main.go
pkg.go
@@ -31,6 +34,9 @@ go src=..
internal
objfile
objfile.go
+ buildid
+ testdata
+ +
gofmt
gofmt.go
gofmt_test.go
@@ -64,6 +70,10 @@ go src=..
armasm
testdata
+
+ arm64
+ arm64asm
+ testdata
+ +
x86
x86asm
testdata
diff --git a/misc/swig/stdio/file.go b/misc/swig/stdio/file.go
new file mode 100644
index 00000000000..a582f776f6c
--- /dev/null
+++ b/misc/swig/stdio/file.go
@@ -0,0 +1,15 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is here just to cause problems.
+// file.swig turns into a file also named file.go.
+// Make sure cmd/go keeps them separate
+// when both are passed to cgo.
+
+package file
+
+//int F(void) { return 1; }
+import "C"
+
+func F() int { return int(C.F()) }
diff --git a/misc/swig/stdio/file_test.go b/misc/swig/stdio/file_test.go
index b1a520e6bc9..aea92aafd55 100644
--- a/misc/swig/stdio/file_test.go
+++ b/misc/swig/stdio/file_test.go
@@ -20,3 +20,9 @@ func TestRead(t *testing.T) {
t.Error("fclose failed")
}
}
+
+func TestF(t *testing.T) {
+ if x := F(); x != 1 {
+ t.Fatalf("x = %d, want 1", x)
+ }
+}
diff --git a/misc/trace/trace_viewer_full.html b/misc/trace/trace_viewer_full.html
new file mode 100644
index 00000000000..f0d2e60b185
--- /dev/null
+++ b/misc/trace/trace_viewer_full.html
@@ -0,0 +1,9525 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ ✕
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
diff --git a/src/archive/tar/common.go b/src/archive/tar/common.go
index d49c5c3fd9e..4a2c173bf3a 100644
--- a/src/archive/tar/common.go
+++ b/src/archive/tar/common.go
@@ -3,20 +3,22 @@
// license that can be found in the LICENSE file.
// Package tar implements access to tar archives.
-// It aims to cover most of the variations, including those produced
-// by GNU and BSD tars.
//
-// References:
-// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
-// http://www.gnu.org/software/tar/manual/html_node/Standard.html
-// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
+// Tape archives (tar) are a file format for storing a sequence of files that
+// can be read and written in a streaming manner.
+// This package aims to cover most variations of the format,
+// including those produced by GNU and BSD tar tools.
package tar
import (
"errors"
"fmt"
+ "math"
"os"
"path"
+ "reflect"
+ "strconv"
+ "strings"
"time"
)
@@ -24,42 +26,500 @@ import (
// architectures. If a large value is encountered when decoding, the result
// stored in Header will be the truncated version.
-// Header type flags.
-const (
- TypeReg = '0' // regular file
- TypeRegA = '\x00' // regular file
- TypeLink = '1' // hard link
- TypeSymlink = '2' // symbolic link
- TypeChar = '3' // character device node
- TypeBlock = '4' // block device node
- TypeDir = '5' // directory
- TypeFifo = '6' // fifo node
- TypeCont = '7' // reserved
- TypeXHeader = 'x' // extended header
- TypeXGlobalHeader = 'g' // global extended header
- TypeGNULongName = 'L' // Next file has a long name
- TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
- TypeGNUSparse = 'S' // sparse file
+var (
+ ErrHeader = errors.New("archive/tar: invalid tar header")
+ ErrWriteTooLong = errors.New("archive/tar: write too long")
+ ErrFieldTooLong = errors.New("archive/tar: header field too long")
+ ErrWriteAfterClose = errors.New("archive/tar: write after close")
+ errMissData = errors.New("archive/tar: sparse file references non-existent data")
+ errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data")
+ errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole")
)
+type headerError []string
+
+func (he headerError) Error() string {
+ const prefix = "archive/tar: cannot encode header"
+ var ss []string
+ for _, s := range he {
+ if s != "" {
+ ss = append(ss, s)
+ }
+ }
+ if len(ss) == 0 {
+ return prefix
+ }
+ return fmt.Sprintf("%s: %v", prefix, strings.Join(ss, "; and "))
+}
+
+// Type flags for Header.Typeflag.
+const (
+ // Type '0' indicates a regular file.
+ TypeReg = '0'
+ TypeRegA = '\x00' // For legacy support; use TypeReg instead
+
+ // Type '1' to '6' are header-only flags and may not have a data body.
+ TypeLink = '1' // Hard link
+ TypeSymlink = '2' // Symbolic link
+ TypeChar = '3' // Character device node
+ TypeBlock = '4' // Block device node
+ TypeDir = '5' // Directory
+ TypeFifo = '6' // FIFO node
+
+ // Type '7' is reserved.
+ TypeCont = '7'
+
+ // Type 'x' is used by the PAX format to store key-value records that
+ // are only relevant to the next file.
+ // This package transparently handles these types.
+ TypeXHeader = 'x'
+
+ // Type 'g' is used by the PAX format to store key-value records that
+ // are relevant to all subsequent files.
+ // This package only supports parsing and composing such headers,
+ // but does not currently support persisting the global state across files.
+ TypeXGlobalHeader = 'g'
+
+ // Type 'S' indicates a sparse file in the GNU format.
+ TypeGNUSparse = 'S'
+
+ // Types 'L' and 'K' are used by the GNU format for a meta file
+ // used to store the path or link name for the next file.
+ // This package transparently handles these types.
+ TypeGNULongName = 'L'
+ TypeGNULongLink = 'K'
+)
+
+// Keywords for PAX extended header records.
+const (
+ paxNone = "" // Indicates that no PAX key is suitable
+ paxPath = "path"
+ paxLinkpath = "linkpath"
+ paxSize = "size"
+ paxUid = "uid"
+ paxGid = "gid"
+ paxUname = "uname"
+ paxGname = "gname"
+ paxMtime = "mtime"
+ paxAtime = "atime"
+ paxCtime = "ctime" // Removed from later revision of PAX spec, but was valid
+ paxCharset = "charset" // Currently unused
+ paxComment = "comment" // Currently unused
+
+ paxSchilyXattr = "SCHILY.xattr."
+
+ // Keywords for GNU sparse files in a PAX extended header.
+ paxGNUSparse = "GNU.sparse."
+ paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
+ paxGNUSparseOffset = "GNU.sparse.offset"
+ paxGNUSparseNumBytes = "GNU.sparse.numbytes"
+ paxGNUSparseMap = "GNU.sparse.map"
+ paxGNUSparseName = "GNU.sparse.name"
+ paxGNUSparseMajor = "GNU.sparse.major"
+ paxGNUSparseMinor = "GNU.sparse.minor"
+ paxGNUSparseSize = "GNU.sparse.size"
+ paxGNUSparseRealSize = "GNU.sparse.realsize"
+)
+
+// basicKeys is a set of the PAX keys for which we have built-in support.
+// This does not contain "charset" or "comment", which are both PAX-specific,
+// so adding them as first-class features of Header is unlikely.
+// Users can use the PAXRecords field to set it themselves.
+var basicKeys = map[string]bool{
+ paxPath: true, paxLinkpath: true, paxSize: true, paxUid: true, paxGid: true,
+ paxUname: true, paxGname: true, paxMtime: true, paxAtime: true, paxCtime: true,
+}
+
// A Header represents a single header in a tar archive.
// Some fields may not be populated.
+//
+// For forward compatibility, users that retrieve a Header from Reader.Next,
+// mutate it in some ways, and then pass it back to Writer.WriteHeader
+// should do so by creating a new Header and copying the fields
+// that they are interested in preserving.
type Header struct {
- Name string // name of header file entry
- Mode int64 // permission and mode bits
- Uid int // user id of owner
- Gid int // group id of owner
- Size int64 // length in bytes
- ModTime time.Time // modified time
- Typeflag byte // type of header entry
- Linkname string // target name of link
- Uname string // user name of owner
- Gname string // group name of owner
- Devmajor int64 // major number of character or block device
- Devminor int64 // minor number of character or block device
- AccessTime time.Time // access time
- ChangeTime time.Time // status change time
- Xattrs map[string]string
+ Typeflag byte // Type of header entry (should be TypeReg for most files)
+
+ Name string // Name of file entry
+ Linkname string // Target name of link (valid for TypeLink or TypeSymlink)
+
+ Size int64 // Logical file size in bytes
+ Mode int64 // Permission and mode bits
+ Uid int // User ID of owner
+ Gid int // Group ID of owner
+ Uname string // User name of owner
+ Gname string // Group name of owner
+
+ // If the Format is unspecified, then Writer.WriteHeader rounds ModTime
+ // to the nearest second and ignores the AccessTime and ChangeTime fields.
+ //
+ // To use AccessTime or ChangeTime, specify the Format as PAX or GNU.
+ // To use sub-second resolution, specify the Format as PAX.
+ ModTime time.Time // Modification time
+ AccessTime time.Time // Access time (requires either PAX or GNU support)
+ ChangeTime time.Time // Change time (requires either PAX or GNU support)
+
+ Devmajor int64 // Major device number (valid for TypeChar or TypeBlock)
+ Devminor int64 // Minor device number (valid for TypeChar or TypeBlock)
+
+ // Xattrs stores extended attributes as PAX records under the
+ // "SCHILY.xattr." namespace.
+ //
+ // The following are semantically equivalent:
+ // h.Xattrs[key] = value
+ // h.PAXRecords["SCHILY.xattr."+key] = value
+ //
+ // When Writer.WriteHeader is called, the contents of Xattrs will take
+ // precedence over those in PAXRecords.
+ //
+ // Deprecated: Use PAXRecords instead.
+ Xattrs map[string]string
+
+ // PAXRecords is a map of PAX extended header records.
+ //
+ // User-defined records should have keys of the following form:
+ // VENDOR.keyword
+ // Where VENDOR is some namespace in all uppercase, and keyword may
+ // not contain the '=' character (e.g., "GOLANG.pkg.version").
+ // The key and value should be non-empty UTF-8 strings.
+ //
+ // When Writer.WriteHeader is called, PAX records derived from the
+ // the other fields in Header take precedence over PAXRecords.
+ PAXRecords map[string]string
+
+ // Format specifies the format of the tar header.
+ //
+ // This is set by Reader.Next as a best-effort guess at the format.
+ // Since the Reader liberally reads some non-compliant files,
+ // it is possible for this to be FormatUnknown.
+ //
+ // If the format is unspecified when Writer.WriteHeader is called,
+ // then it uses the first format (in the order of USTAR, PAX, GNU)
+ // capable of encoding this Header (see Format).
+ Format Format
+}
+
+// sparseEntry represents a Length-sized fragment at Offset in the file.
+type sparseEntry struct{ Offset, Length int64 }
+
+func (s sparseEntry) endOffset() int64 { return s.Offset + s.Length }
+
+// A sparse file can be represented as either a sparseDatas or a sparseHoles.
+// As long as the total size is known, they are equivalent and one can be
+// converted to the other form and back. The various tar formats with sparse
+// file support represent sparse files in the sparseDatas form. That is, they
+// specify the fragments in the file that has data, and treat everything else as
+// having zero bytes. As such, the encoding and decoding logic in this package
+// deals with sparseDatas.
+//
+// However, the external API uses sparseHoles instead of sparseDatas because the
+// zero value of sparseHoles logically represents a normal file (i.e., there are
+// no holes in it). On the other hand, the zero value of sparseDatas implies
+// that the file has no data in it, which is rather odd.
+//
+// As an example, if the underlying raw file contains the 10-byte data:
+// var compactFile = "abcdefgh"
+//
+// And the sparse map has the following entries:
+// var spd sparseDatas = []sparseEntry{
+// {Offset: 2, Length: 5}, // Data fragment for 2..6
+// {Offset: 18, Length: 3}, // Data fragment for 18..20
+// }
+// var sph sparseHoles = []sparseEntry{
+// {Offset: 0, Length: 2}, // Hole fragment for 0..1
+// {Offset: 7, Length: 11}, // Hole fragment for 7..17
+// {Offset: 21, Length: 4}, // Hole fragment for 21..24
+// }
+//
+// Then the content of the resulting sparse file with a Header.Size of 25 is:
+// var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
+type (
+ sparseDatas []sparseEntry
+ sparseHoles []sparseEntry
+)
+
+// validateSparseEntries reports whether sp is a valid sparse map.
+// It does not matter whether sp represents data fragments or hole fragments.
+func validateSparseEntries(sp []sparseEntry, size int64) bool {
+ // Validate all sparse entries. These are the same checks as performed by
+ // the BSD tar utility.
+ if size < 0 {
+ return false
+ }
+ var pre sparseEntry
+ for _, cur := range sp {
+ switch {
+ case cur.Offset < 0 || cur.Length < 0:
+ return false // Negative values are never okay
+ case cur.Offset > math.MaxInt64-cur.Length:
+ return false // Integer overflow with large length
+ case cur.endOffset() > size:
+ return false // Region extends beyond the actual size
+ case pre.endOffset() > cur.Offset:
+ return false // Regions cannot overlap and must be in order
+ }
+ pre = cur
+ }
+ return true
+}
+
+// alignSparseEntries mutates src and returns dst where each fragment's
+// starting offset is aligned up to the nearest block edge, and each
+// ending offset is aligned down to the nearest block edge.
+//
+// Even though the Go tar Reader and the BSD tar utility can handle entries
+// with arbitrary offsets and lengths, the GNU tar utility can only handle
+// offsets and lengths that are multiples of blockSize.
+func alignSparseEntries(src []sparseEntry, size int64) []sparseEntry {
+ dst := src[:0]
+ for _, s := range src {
+ pos, end := s.Offset, s.endOffset()
+ pos += blockPadding(+pos) // Round-up to nearest blockSize
+ if end != size {
+ end -= blockPadding(-end) // Round-down to nearest blockSize
+ }
+ if pos < end {
+ dst = append(dst, sparseEntry{Offset: pos, Length: end - pos})
+ }
+ }
+ return dst
+}
+
+// invertSparseEntries converts a sparse map from one form to the other.
+// If the input is sparseHoles, then it will output sparseDatas and vice-versa.
+// The input must have been already validated.
+//
+// This function mutates src and returns a normalized map where:
+// * adjacent fragments are coalesced together
+// * only the last fragment may be empty
+// * the endOffset of the last fragment is the total size
+func invertSparseEntries(src []sparseEntry, size int64) []sparseEntry {
+ dst := src[:0]
+ var pre sparseEntry
+ for _, cur := range src {
+ if cur.Length == 0 {
+ continue // Skip empty fragments
+ }
+ pre.Length = cur.Offset - pre.Offset
+ if pre.Length > 0 {
+ dst = append(dst, pre) // Only add non-empty fragments
+ }
+ pre.Offset = cur.endOffset()
+ }
+ pre.Length = size - pre.Offset // Possibly the only empty fragment
+ return append(dst, pre)
+}
+
+// fileState tracks the number of logical (includes sparse holes) and physical
+// (actual in tar archive) bytes remaining for the current file.
+//
+// Invariant: LogicalRemaining >= PhysicalRemaining
+type fileState interface {
+ LogicalRemaining() int64
+ PhysicalRemaining() int64
+}
+
+// allowedFormats determines which formats can be used.
+// The value returned is the logical OR of multiple possible formats.
+// If the value is FormatUnknown, then the input Header cannot be encoded
+// and an error is returned explaining why.
+//
+// As a by-product of checking the fields, this function returns paxHdrs, which
+// contain all fields that could not be directly encoded.
+// A value receiver ensures that this method does not mutate the source Header.
+func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err error) {
+ format = FormatUSTAR | FormatPAX | FormatGNU
+ paxHdrs = make(map[string]string)
+
+ var whyNoUSTAR, whyNoPAX, whyNoGNU string
+ var preferPAX bool // Prefer PAX over USTAR
+ verifyString := func(s string, size int, name, paxKey string) {
+ // NUL-terminator is optional for path and linkpath.
+ // Technically, it is required for uname and gname,
+ // but neither GNU nor BSD tar checks for it.
+ tooLong := len(s) > size
+ allowLongGNU := paxKey == paxPath || paxKey == paxLinkpath
+ if hasNUL(s) || (tooLong && !allowLongGNU) {
+ whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%q", name, s)
+ format.mustNotBe(FormatGNU)
+ }
+ if !isASCII(s) || tooLong {
+ canSplitUSTAR := paxKey == paxPath
+ if _, _, ok := splitUSTARPath(s); !canSplitUSTAR || !ok {
+ whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%q", name, s)
+ format.mustNotBe(FormatUSTAR)
+ }
+ if paxKey == paxNone {
+ whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%q", name, s)
+ format.mustNotBe(FormatPAX)
+ } else {
+ paxHdrs[paxKey] = s
+ }
+ }
+ if v, ok := h.PAXRecords[paxKey]; ok && v == s {
+ paxHdrs[paxKey] = v
+ }
+ }
+ verifyNumeric := func(n int64, size int, name, paxKey string) {
+ if !fitsInBase256(size, n) {
+ whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%d", name, n)
+ format.mustNotBe(FormatGNU)
+ }
+ if !fitsInOctal(size, n) {
+ whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%d", name, n)
+ format.mustNotBe(FormatUSTAR)
+ if paxKey == paxNone {
+ whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%d", name, n)
+ format.mustNotBe(FormatPAX)
+ } else {
+ paxHdrs[paxKey] = strconv.FormatInt(n, 10)
+ }
+ }
+ if v, ok := h.PAXRecords[paxKey]; ok && v == strconv.FormatInt(n, 10) {
+ paxHdrs[paxKey] = v
+ }
+ }
+ verifyTime := func(ts time.Time, size int, name, paxKey string) {
+ if ts.IsZero() {
+ return // Always okay
+ }
+ if !fitsInBase256(size, ts.Unix()) {
+ whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%v", name, ts)
+ format.mustNotBe(FormatGNU)
+ }
+ isMtime := paxKey == paxMtime
+ fitsOctal := fitsInOctal(size, ts.Unix())
+ if (isMtime && !fitsOctal) || !isMtime {
+ whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%v", name, ts)
+ format.mustNotBe(FormatUSTAR)
+ }
+ needsNano := ts.Nanosecond() != 0
+ if !isMtime || !fitsOctal || needsNano {
+ preferPAX = true // USTAR may truncate sub-second measurements
+ if paxKey == paxNone {
+ whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%v", name, ts)
+ format.mustNotBe(FormatPAX)
+ } else {
+ paxHdrs[paxKey] = formatPAXTime(ts)
+ }
+ }
+ if v, ok := h.PAXRecords[paxKey]; ok && v == formatPAXTime(ts) {
+ paxHdrs[paxKey] = v
+ }
+ }
+
+ // Check basic fields.
+ var blk block
+ v7 := blk.V7()
+ ustar := blk.USTAR()
+ gnu := blk.GNU()
+ verifyString(h.Name, len(v7.Name()), "Name", paxPath)
+ verifyString(h.Linkname, len(v7.LinkName()), "Linkname", paxLinkpath)
+ verifyString(h.Uname, len(ustar.UserName()), "Uname", paxUname)
+ verifyString(h.Gname, len(ustar.GroupName()), "Gname", paxGname)
+ verifyNumeric(h.Mode, len(v7.Mode()), "Mode", paxNone)
+ verifyNumeric(int64(h.Uid), len(v7.UID()), "Uid", paxUid)
+ verifyNumeric(int64(h.Gid), len(v7.GID()), "Gid", paxGid)
+ verifyNumeric(h.Size, len(v7.Size()), "Size", paxSize)
+ verifyNumeric(h.Devmajor, len(ustar.DevMajor()), "Devmajor", paxNone)
+ verifyNumeric(h.Devminor, len(ustar.DevMinor()), "Devminor", paxNone)
+ verifyTime(h.ModTime, len(v7.ModTime()), "ModTime", paxMtime)
+ verifyTime(h.AccessTime, len(gnu.AccessTime()), "AccessTime", paxAtime)
+ verifyTime(h.ChangeTime, len(gnu.ChangeTime()), "ChangeTime", paxCtime)
+
+ // Check for header-only types.
+ var whyOnlyPAX, whyOnlyGNU string
+ switch h.Typeflag {
+ case TypeReg, TypeChar, TypeBlock, TypeFifo, TypeGNUSparse:
+ // Exclude TypeLink and TypeSymlink, since they may reference directories.
+ if strings.HasSuffix(h.Name, "/") {
+ return FormatUnknown, nil, headerError{"filename may not have trailing slash"}
+ }
+ case TypeXHeader, TypeGNULongName, TypeGNULongLink:
+ return FormatUnknown, nil, headerError{"cannot manually encode TypeXHeader, TypeGNULongName, or TypeGNULongLink headers"}
+ case TypeXGlobalHeader:
+ h2 := Header{Name: h.Name, Typeflag: h.Typeflag, Xattrs: h.Xattrs, PAXRecords: h.PAXRecords, Format: h.Format}
+ if !reflect.DeepEqual(h, h2) {
+ return FormatUnknown, nil, headerError{"only PAXRecords should be set for TypeXGlobalHeader"}
+ }
+ whyOnlyPAX = "only PAX supports TypeXGlobalHeader"
+ format.mayOnlyBe(FormatPAX)
+ }
+ if !isHeaderOnlyType(h.Typeflag) && h.Size < 0 {
+ return FormatUnknown, nil, headerError{"negative size on header-only type"}
+ }
+
+ // Check PAX records.
+ if len(h.Xattrs) > 0 {
+ for k, v := range h.Xattrs {
+ paxHdrs[paxSchilyXattr+k] = v
+ }
+ whyOnlyPAX = "only PAX supports Xattrs"
+ format.mayOnlyBe(FormatPAX)
+ }
+ if len(h.PAXRecords) > 0 {
+ for k, v := range h.PAXRecords {
+ switch _, exists := paxHdrs[k]; {
+ case exists:
+ continue // Do not overwrite existing records
+ case h.Typeflag == TypeXGlobalHeader:
+ paxHdrs[k] = v // Copy all records
+ case !basicKeys[k] && !strings.HasPrefix(k, paxGNUSparse):
+ paxHdrs[k] = v // Ignore local records that may conflict
+ }
+ }
+ whyOnlyPAX = "only PAX supports PAXRecords"
+ format.mayOnlyBe(FormatPAX)
+ }
+ for k, v := range paxHdrs {
+ if !validPAXRecord(k, v) {
+ return FormatUnknown, nil, headerError{fmt.Sprintf("invalid PAX record: %q", k+" = "+v)}
+ }
+ }
+
+ // TODO(dsnet): Re-enable this when adding sparse support.
+ // See https://golang.org/issue/22735
+ /*
+ // Check sparse files.
+ if len(h.SparseHoles) > 0 || h.Typeflag == TypeGNUSparse {
+ if isHeaderOnlyType(h.Typeflag) {
+ return FormatUnknown, nil, headerError{"header-only type cannot be sparse"}
+ }
+ if !validateSparseEntries(h.SparseHoles, h.Size) {
+ return FormatUnknown, nil, headerError{"invalid sparse holes"}
+ }
+ if h.Typeflag == TypeGNUSparse {
+ whyOnlyGNU = "only GNU supports TypeGNUSparse"
+ format.mayOnlyBe(FormatGNU)
+ } else {
+ whyNoGNU = "GNU supports sparse files only with TypeGNUSparse"
+ format.mustNotBe(FormatGNU)
+ }
+ whyNoUSTAR = "USTAR does not support sparse files"
+ format.mustNotBe(FormatUSTAR)
+ }
+ */
+
+ // Check desired format.
+ if wantFormat := h.Format; wantFormat != FormatUnknown {
+ if wantFormat.has(FormatPAX) && !preferPAX {
+ wantFormat.mayBe(FormatUSTAR) // PAX implies USTAR allowed too
+ }
+ format.mayOnlyBe(wantFormat) // Set union of formats allowed and format wanted
+ }
+ if format == FormatUnknown {
+ switch h.Format {
+ case FormatUSTAR:
+ err = headerError{"Format specifies USTAR", whyNoUSTAR, whyOnlyPAX, whyOnlyGNU}
+ case FormatPAX:
+ err = headerError{"Format specifies PAX", whyNoPAX, whyOnlyGNU}
+ case FormatGNU:
+ err = headerError{"Format specifies GNU", whyNoGNU, whyOnlyPAX}
+ default:
+ err = headerError{whyNoUSTAR, whyNoPAX, whyNoGNU, whyOnlyPAX, whyOnlyGNU}
+ }
+ }
+ return format, paxHdrs, err
}
// FileInfo returns an os.FileInfo for the Header.
@@ -92,63 +552,43 @@ func (fi headerFileInfo) Mode() (mode os.FileMode) {
// Set setuid, setgid and sticky bits.
if fi.h.Mode&c_ISUID != 0 {
- // setuid
mode |= os.ModeSetuid
}
if fi.h.Mode&c_ISGID != 0 {
- // setgid
mode |= os.ModeSetgid
}
if fi.h.Mode&c_ISVTX != 0 {
- // sticky
mode |= os.ModeSticky
}
- // Set file mode bits.
- // clear perm, setuid, setgid and sticky bits.
- m := os.FileMode(fi.h.Mode) &^ 07777
- if m == c_ISDIR {
- // directory
+ // Set file mode bits; clear perm, setuid, setgid, and sticky bits.
+ switch m := os.FileMode(fi.h.Mode) &^ 07777; m {
+ case c_ISDIR:
mode |= os.ModeDir
- }
- if m == c_ISFIFO {
- // named pipe (FIFO)
+ case c_ISFIFO:
mode |= os.ModeNamedPipe
- }
- if m == c_ISLNK {
- // symbolic link
+ case c_ISLNK:
mode |= os.ModeSymlink
- }
- if m == c_ISBLK {
- // device file
+ case c_ISBLK:
mode |= os.ModeDevice
- }
- if m == c_ISCHR {
- // Unix character device
+ case c_ISCHR:
mode |= os.ModeDevice
mode |= os.ModeCharDevice
- }
- if m == c_ISSOCK {
- // Unix domain socket
+ case c_ISSOCK:
mode |= os.ModeSocket
}
switch fi.h.Typeflag {
case TypeSymlink:
- // symbolic link
mode |= os.ModeSymlink
case TypeChar:
- // character device node
mode |= os.ModeDevice
mode |= os.ModeCharDevice
case TypeBlock:
- // block device node
mode |= os.ModeDevice
case TypeDir:
- // directory
mode |= os.ModeDir
case TypeFifo:
- // fifo node
mode |= os.ModeNamedPipe
}
@@ -176,33 +616,16 @@ const (
c_ISSOCK = 0140000 // Socket
)
-// Keywords for the PAX Extended Header
-const (
- paxAtime = "atime"
- paxCharset = "charset"
- paxComment = "comment"
- paxCtime = "ctime" // please note that ctime is not a valid pax header.
- paxGid = "gid"
- paxGname = "gname"
- paxLinkpath = "linkpath"
- paxMtime = "mtime"
- paxPath = "path"
- paxSize = "size"
- paxUid = "uid"
- paxUname = "uname"
- paxXattr = "SCHILY.xattr."
- paxNone = ""
-)
-
// FileInfoHeader creates a partially-populated Header from fi.
// If fi describes a symlink, FileInfoHeader records link as the link target.
// If fi describes a directory, a slash is appended to the name.
-// Because os.FileInfo's Name method returns only the base name of
-// the file it describes, it may be necessary to modify the Name field
-// of the returned header to provide the full path name of the file.
+//
+// Since os.FileInfo's Name method only returns the base name of
+// the file it describes, it may be necessary to modify Header.Name
+// to provide the full path name of the file.
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
if fi == nil {
- return nil, errors.New("tar: FileInfo is nil")
+ return nil, errors.New("archive/tar: FileInfo is nil")
}
fm := fi.Mode()
h := &Header{
@@ -265,6 +688,12 @@ func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
h.Size = 0
h.Linkname = sys.Linkname
}
+ if sys.PAXRecords != nil {
+ h.PAXRecords = make(map[string]string)
+ for k, v := range sys.PAXRecords {
+ h.PAXRecords[k] = v
+ }
+ }
}
if sysStat != nil {
return h, sysStat(fi, h)
@@ -282,3 +711,10 @@ func isHeaderOnlyType(flag byte) bool {
return false
}
}
+
+func min(a, b int64) int64 {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/src/archive/tar/example_test.go b/src/archive/tar/example_test.go
index 5f0ce2f4029..a2474b959f6 100644
--- a/src/archive/tar/example_test.go
+++ b/src/archive/tar/example_test.go
@@ -13,14 +13,10 @@ import (
"os"
)
-func Example() {
- // Create a buffer to write our archive to.
- buf := new(bytes.Buffer)
-
- // Create a new tar archive.
- tw := tar.NewWriter(buf)
-
- // Add some files to the archive.
+func Example_minimal() {
+ // Create and add some files to the archive.
+ var buf bytes.Buffer
+ tw := tar.NewWriter(&buf)
var files = []struct {
Name, Body string
}{
@@ -35,34 +31,29 @@ func Example() {
Size: int64(len(file.Body)),
}
if err := tw.WriteHeader(hdr); err != nil {
- log.Fatalln(err)
+ log.Fatal(err)
}
if _, err := tw.Write([]byte(file.Body)); err != nil {
- log.Fatalln(err)
+ log.Fatal(err)
}
}
- // Make sure to check the error on Close.
if err := tw.Close(); err != nil {
- log.Fatalln(err)
+ log.Fatal(err)
}
- // Open the tar archive for reading.
- r := bytes.NewReader(buf.Bytes())
- tr := tar.NewReader(r)
-
- // Iterate through the files in the archive.
+ // Open and iterate through the files in the archive.
+ tr := tar.NewReader(&buf)
for {
hdr, err := tr.Next()
if err == io.EOF {
- // end of tar archive
- break
+ break // End of archive
}
if err != nil {
- log.Fatalln(err)
+ log.Fatal(err)
}
fmt.Printf("Contents of %s:\n", hdr.Name)
if _, err := io.Copy(os.Stdout, tr); err != nil {
- log.Fatalln(err)
+ log.Fatal(err)
}
fmt.Println()
}
diff --git a/src/archive/tar/format.go b/src/archive/tar/format.go
index c2c9910d002..6e29698a14a 100644
--- a/src/archive/tar/format.go
+++ b/src/archive/tar/format.go
@@ -4,38 +4,133 @@
package tar
+import "strings"
+
+// Format represents the tar archive format.
+//
+// The original tar format was introduced in Unix V7.
+// Since then, there have been multiple competing formats attempting to
+// standardize or extend the V7 format to overcome its limitations.
+// The most common formats are the USTAR, PAX, and GNU formats,
+// each with their own advantages and limitations.
+//
+// The following table captures the capabilities of each format:
+//
+// | USTAR | PAX | GNU
+// ------------------+--------+-----------+----------
+// Name | 256B | unlimited | unlimited
+// Linkname | 100B | unlimited | unlimited
+// Size | uint33 | unlimited | uint89
+// Mode | uint21 | uint21 | uint57
+// Uid/Gid | uint21 | unlimited | uint57
+// Uname/Gname | 32B | unlimited | 32B
+// ModTime | uint33 | unlimited | int89
+// AccessTime | n/a | unlimited | int89
+// ChangeTime | n/a | unlimited | int89
+// Devmajor/Devminor | uint21 | uint21 | uint57
+// ------------------+--------+-----------+----------
+// string encoding | ASCII | UTF-8 | binary
+// sub-second times | no | yes | no
+// sparse files | no | yes | yes
+//
+// The table's upper portion shows the Header fields, where each format reports
+// the maximum number of bytes allowed for each string field and
+// the integer type used to store each numeric field
+// (where timestamps are stored as the number of seconds since the Unix epoch).
+//
+// The table's lower portion shows specialized features of each format,
+// such as supported string encodings, support for sub-second timestamps,
+// or support for sparse files.
+//
+// The Writer currently provides no support for sparse files.
+type Format int
+
// Constants to identify various tar formats.
const (
- // The format is unknown.
- formatUnknown = (1 << iota) / 2 // Sequence of 0, 1, 2, 4, 8, etc...
+ // Deliberately hide the meaning of constants from public API.
+ _ Format = (1 << iota) / 4 // Sequence of 0, 0, 1, 2, 4, 8, etc...
+
+ // FormatUnknown indicates that the format is unknown.
+ FormatUnknown
// The format of the original Unix V7 tar tool prior to standardization.
formatV7
- // The old and new GNU formats, which are incompatible with USTAR.
- // This does cover the old GNU sparse extension.
- // This does not cover the GNU sparse extensions using PAX headers,
- // versions 0.0, 0.1, and 1.0; these fall under the PAX format.
- formatGNU
+ // FormatUSTAR represents the USTAR header format defined in POSIX.1-1988.
+ //
+ // While this format is compatible with most tar readers,
+ // the format has several limitations making it unsuitable for some usages.
+ // Most notably, it cannot support sparse files, files larger than 8GiB,
+ // filenames larger than 256 characters, and non-ASCII filenames.
+ //
+ // Reference:
+ // http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
+ FormatUSTAR
+
+ // FormatPAX represents the PAX header format defined in POSIX.1-2001.
+ //
+ // PAX extends USTAR by writing a special file with Typeflag TypeXHeader
+ // preceding the original header. This file contains a set of key-value
+ // records, which are used to overcome USTAR's shortcomings, in addition to
+ // providing the ability to have sub-second resolution for timestamps.
+ //
+ // Some newer formats add their own extensions to PAX by defining their
+ // own keys and assigning certain semantic meaning to the associated values.
+ // For example, sparse file support in PAX is implemented using keys
+ // defined by the GNU manual (e.g., "GNU.sparse.map").
+ //
+ // Reference:
+ // http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html
+ FormatPAX
+
+ // FormatGNU represents the GNU header format.
+ //
+ // The GNU header format is older than the USTAR and PAX standards and
+ // is not compatible with them. The GNU format supports
+ // arbitrary file sizes, filenames of arbitrary encoding and length,
+ // sparse files, and other features.
+ //
+ // It is recommended that PAX be chosen over GNU unless the target
+ // application can only parse GNU formatted archives.
+ //
+ // Reference:
+ // http://www.gnu.org/software/tar/manual/html_node/Standard.html
+ FormatGNU
// Schily's tar format, which is incompatible with USTAR.
// This does not cover STAR extensions to the PAX format; these fall under
// the PAX format.
formatSTAR
- // USTAR is the former standardization of tar defined in POSIX.1-1988.
- // This is incompatible with the GNU and STAR formats.
- formatUSTAR
-
- // PAX is the latest standardization of tar defined in POSIX.1-2001.
- // This is an extension of USTAR and is "backwards compatible" with it.
- //
- // Some newer formats add their own extensions to PAX, such as GNU sparse
- // files and SCHILY extended attributes. Since they are backwards compatible
- // with PAX, they will be labelled as "PAX".
- formatPAX
+ formatMax
)
+func (f Format) has(f2 Format) bool { return f&f2 != 0 }
+func (f *Format) mayBe(f2 Format) { *f |= f2 }
+func (f *Format) mayOnlyBe(f2 Format) { *f &= f2 }
+func (f *Format) mustNotBe(f2 Format) { *f &^= f2 }
+
+var formatNames = map[Format]string{
+ formatV7: "V7", FormatUSTAR: "USTAR", FormatPAX: "PAX", FormatGNU: "GNU", formatSTAR: "STAR",
+}
+
+func (f Format) String() string {
+ var ss []string
+ for f2 := Format(1); f2 < formatMax; f2 <<= 1 {
+ if f.has(f2) {
+ ss = append(ss, formatNames[f2])
+ }
+ }
+ switch len(ss) {
+ case 0:
+ return ""
+ case 1:
+ return ss[0]
+ default:
+ return "(" + strings.Join(ss, " | ") + ")"
+ }
+}
+
// Magics used to identify various formats.
const (
magicGNU, versionGNU = "ustar ", " \x00"
@@ -50,6 +145,12 @@ const (
prefixSize = 155 // Max length of the prefix field in USTAR format
)
+// blockPadding computes the number of bytes needed to pad offset up to the
+// nearest block edge where 0 <= n < blockSize.
+func blockPadding(offset int64) (n int64) {
+ return -offset & (blockSize - 1)
+}
+
var zeroBlock block
type block [blockSize]byte
@@ -63,14 +164,14 @@ func (b *block) Sparse() sparseArray { return (sparseArray)(b[:]) }
// GetFormat checks that the block is a valid tar header based on the checksum.
// It then attempts to guess the specific format based on magic values.
-// If the checksum fails, then formatUnknown is returned.
-func (b *block) GetFormat() (format int) {
+// If the checksum fails, then FormatUnknown is returned.
+func (b *block) GetFormat() Format {
// Verify checksum.
var p parser
value := p.parseOctal(b.V7().Chksum())
chksum1, chksum2 := b.ComputeChecksum()
if p.err != nil || (value != chksum1 && value != chksum2) {
- return formatUnknown
+ return FormatUnknown
}
// Guess the magic values.
@@ -81,9 +182,9 @@ func (b *block) GetFormat() (format int) {
case magic == magicUSTAR && trailer == trailerSTAR:
return formatSTAR
case magic == magicUSTAR:
- return formatUSTAR
+ return FormatUSTAR | FormatPAX
case magic == magicGNU && version == versionGNU:
- return formatGNU
+ return FormatGNU
default:
return formatV7
}
@@ -91,19 +192,19 @@ func (b *block) GetFormat() (format int) {
// SetFormat writes the magic values necessary for specified format
// and then updates the checksum accordingly.
-func (b *block) SetFormat(format int) {
+func (b *block) SetFormat(format Format) {
// Set the magic values.
- switch format {
- case formatV7:
+ switch {
+ case format.has(formatV7):
// Do nothing.
- case formatGNU:
+ case format.has(FormatGNU):
copy(b.GNU().Magic(), magicGNU)
copy(b.GNU().Version(), versionGNU)
- case formatSTAR:
+ case format.has(formatSTAR):
copy(b.STAR().Magic(), magicUSTAR)
copy(b.STAR().Version(), versionUSTAR)
copy(b.STAR().Trailer(), trailerSTAR)
- case formatUSTAR, formatPAX:
+ case format.has(FormatUSTAR | FormatPAX):
copy(b.USTAR().Magic(), magicUSTAR)
copy(b.USTAR().Version(), versionUSTAR)
default:
@@ -128,12 +229,17 @@ func (b *block) ComputeChecksum() (unsigned, signed int64) {
if 148 <= i && i < 156 {
c = ' ' // Treat the checksum field itself as all spaces.
}
- unsigned += int64(uint8(c))
+ unsigned += int64(c)
signed += int64(int8(c))
}
return unsigned, signed
}
+// Reset clears the block with all zeros.
+func (b *block) Reset() {
+ *b = block{}
+}
+
type headerV7 [blockSize]byte
func (h *headerV7) Name() []byte { return h[000:][:100] }
@@ -187,11 +293,11 @@ func (h *headerUSTAR) Prefix() []byte { return h[345:][:155] }
type sparseArray []byte
-func (s sparseArray) Entry(i int) sparseNode { return (sparseNode)(s[i*24:]) }
+func (s sparseArray) Entry(i int) sparseElem { return (sparseElem)(s[i*24:]) }
func (s sparseArray) IsExtended() []byte { return s[24*s.MaxEntries():][:1] }
func (s sparseArray) MaxEntries() int { return len(s) / 24 }
-type sparseNode []byte
+type sparseElem []byte
-func (s sparseNode) Offset() []byte { return s[00:][:12] }
-func (s sparseNode) NumBytes() []byte { return s[12:][:12] }
+func (s sparseElem) Offset() []byte { return s[00:][:12] }
+func (s sparseElem) Length() []byte { return s[12:][:12] }
diff --git a/src/archive/tar/reader.go b/src/archive/tar/reader.go
index 9abe888218f..f4eeb557be9 100644
--- a/src/archive/tar/reader.go
+++ b/src/archive/tar/reader.go
@@ -4,33 +4,23 @@
package tar
-// TODO(dsymonds):
-// - pax extensions
-
import (
"bytes"
- "errors"
"io"
"io/ioutil"
- "math"
"strconv"
"strings"
"time"
)
-var (
- ErrHeader = errors.New("archive/tar: invalid tar header")
-)
-
-// A Reader provides sequential access to the contents of a tar archive.
-// A tar archive consists of a sequence of files.
-// The Next method advances to the next file in the archive (including the first),
-// and then it can be treated as an io.Reader to access the file's data.
+// Reader provides sequential access to the contents of a tar archive.
+// Reader.Next advances to the next file in the archive (including the first),
+// and then Reader can be treated as an io.Reader to access the file's data.
type Reader struct {
r io.Reader
- pad int64 // amount of padding (ignored) after current file entry
- curr numBytesReader // reader for current file entry
- blk block // buffer to use as temporary local storage
+ pad int64 // Amount of padding (ignored) after current file entry
+ curr fileReader // Reader for current file entry
+ blk block // Buffer to use as temporary local storage
// err is a persistent error.
// It is only the responsibility of every exported method of Reader to
@@ -38,68 +28,21 @@ type Reader struct {
err error
}
-// A numBytesReader is an io.Reader with a numBytes method, returning the number
-// of bytes remaining in the underlying encoded data.
-type numBytesReader interface {
+type fileReader interface {
io.Reader
- numBytes() int64
-}
+ fileState
-// A regFileReader is a numBytesReader for reading file data from a tar archive.
-type regFileReader struct {
- r io.Reader // underlying reader
- nb int64 // number of unread bytes for current file entry
+ WriteTo(io.Writer) (int64, error)
}
-// A sparseFileReader is a numBytesReader for reading sparse file data from a
-// tar archive.
-type sparseFileReader struct {
- rfr numBytesReader // Reads the sparse-encoded file data
- sp []sparseEntry // The sparse map for the file
- pos int64 // Keeps track of file position
- total int64 // Total size of the file
-}
-
-// A sparseEntry holds a single entry in a sparse file's sparse map.
-//
-// Sparse files are represented using a series of sparseEntrys.
-// Despite the name, a sparseEntry represents an actual data fragment that
-// references data found in the underlying archive stream. All regions not
-// covered by a sparseEntry are logically filled with zeros.
-//
-// For example, if the underlying raw file contains the 10-byte data:
-// var compactData = "abcdefgh"
-//
-// And the sparse map has the following entries:
-// var sp = []sparseEntry{
-// {offset: 2, numBytes: 5} // Data fragment for [2..7]
-// {offset: 18, numBytes: 3} // Data fragment for [18..21]
-// }
-//
-// Then the content of the resulting sparse file with a "real" size of 25 is:
-// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
-type sparseEntry struct {
- offset int64 // Starting position of the fragment
- numBytes int64 // Length of the fragment
-}
-
-// Keywords for GNU sparse files in a PAX extended header
-const (
- paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
- paxGNUSparseOffset = "GNU.sparse.offset"
- paxGNUSparseNumBytes = "GNU.sparse.numbytes"
- paxGNUSparseMap = "GNU.sparse.map"
- paxGNUSparseName = "GNU.sparse.name"
- paxGNUSparseMajor = "GNU.sparse.major"
- paxGNUSparseMinor = "GNU.sparse.minor"
- paxGNUSparseSize = "GNU.sparse.size"
- paxGNUSparseRealSize = "GNU.sparse.realsize"
-)
-
// NewReader creates a new Reader reading from r.
-func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
+func NewReader(r io.Reader) *Reader {
+ return &Reader{r: r, curr: ®FileReader{r, 0}}
+}
// Next advances to the next entry in the tar archive.
+// The Header.Size determines how many bytes can be read for the next file.
+// Any remaining data in the current file is automatically discarded.
//
// io.EOF is returned at the end of the input.
func (tr *Reader) Next() (*Header, error) {
@@ -112,18 +55,26 @@ func (tr *Reader) Next() (*Header, error) {
}
func (tr *Reader) next() (*Header, error) {
- var extHdrs map[string]string
+ var paxHdrs map[string]string
+ var gnuLongName, gnuLongLink string
// Externally, Next iterates through the tar archive as if it is a series of
// files. Internally, the tar format often uses fake "files" to add meta
// data that describes the next file. These meta data "files" should not
// normally be visible to the outside. As such, this loop iterates through
// one or more "header files" until it finds a "normal file".
+ format := FormatUSTAR | FormatPAX | FormatGNU
loop:
for {
- if err := tr.skipUnread(); err != nil {
+ // Discard the remainder of the file and any padding.
+ if err := discard(tr.r, tr.curr.PhysicalRemaining()); err != nil {
return nil, err
}
+ if _, err := tryReadFull(tr.r, tr.blk[:tr.pad]); err != nil {
+ return nil, err
+ }
+ tr.pad = 0
+
hdr, rawHdr, err := tr.readHeader()
if err != nil {
return nil, err
@@ -131,43 +82,58 @@ loop:
if err := tr.handleRegularFile(hdr); err != nil {
return nil, err
}
+ format.mayOnlyBe(hdr.Format)
// Check for PAX/GNU special headers and files.
switch hdr.Typeflag {
- case TypeXHeader:
- extHdrs, err = parsePAX(tr)
+ case TypeXHeader, TypeXGlobalHeader:
+ format.mayOnlyBe(FormatPAX)
+ paxHdrs, err = parsePAX(tr)
if err != nil {
return nil, err
}
+ if hdr.Typeflag == TypeXGlobalHeader {
+ mergePAX(hdr, paxHdrs)
+ return &Header{
+ Name: hdr.Name,
+ Typeflag: hdr.Typeflag,
+ Xattrs: hdr.Xattrs,
+ PAXRecords: hdr.PAXRecords,
+ Format: format,
+ }, nil
+ }
continue loop // This is a meta header affecting the next header
case TypeGNULongName, TypeGNULongLink:
+ format.mayOnlyBe(FormatGNU)
realname, err := ioutil.ReadAll(tr)
if err != nil {
return nil, err
}
- // Convert GNU extensions to use PAX headers.
- if extHdrs == nil {
- extHdrs = make(map[string]string)
- }
var p parser
switch hdr.Typeflag {
case TypeGNULongName:
- extHdrs[paxPath] = p.parseString(realname)
+ gnuLongName = p.parseString(realname)
case TypeGNULongLink:
- extHdrs[paxLinkpath] = p.parseString(realname)
- }
- if p.err != nil {
- return nil, p.err
+ gnuLongLink = p.parseString(realname)
}
continue loop // This is a meta header affecting the next header
default:
// The old GNU sparse format is handled here since it is technically
// just a regular file with additional attributes.
- if err := mergePAX(hdr, extHdrs); err != nil {
+ if err := mergePAX(hdr, paxHdrs); err != nil {
return nil, err
}
+ if gnuLongName != "" {
+ hdr.Name = gnuLongName
+ }
+ if gnuLongLink != "" {
+ hdr.Linkname = gnuLongLink
+ }
+ if hdr.Typeflag == TypeRegA && strings.HasSuffix(hdr.Name, "/") {
+ hdr.Typeflag = TypeDir // Legacy archives use trailing slash for directories
+ }
// The extended headers may have updated the size.
// Thus, setup the regFileReader again after merging PAX headers.
@@ -177,9 +143,15 @@ loop:
// Sparse formats rely on being able to read from the logical data
// section; there must be a preceding call to handleRegularFile.
- if err := tr.handleSparseFile(hdr, rawHdr, extHdrs); err != nil {
+ if err := tr.handleSparseFile(hdr, rawHdr); err != nil {
return nil, err
}
+
+ // Set the final guess at the format.
+ if format.has(FormatUSTAR) && format.has(FormatPAX) {
+ format.mayOnlyBe(FormatUSTAR)
+ }
+ hdr.Format = format
return hdr, nil // This is a file, so stop
}
}
@@ -197,105 +169,86 @@ func (tr *Reader) handleRegularFile(hdr *Header) error {
return ErrHeader
}
- tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
+ tr.pad = blockPadding(nb)
tr.curr = ®FileReader{r: tr.r, nb: nb}
return nil
}
// handleSparseFile checks if the current file is a sparse format of any type
// and sets the curr reader appropriately.
-func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block, extHdrs map[string]string) error {
- var sp []sparseEntry
+func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error {
+ var spd sparseDatas
var err error
if hdr.Typeflag == TypeGNUSparse {
- sp, err = tr.readOldGNUSparseMap(hdr, rawHdr)
- if err != nil {
- return err
- }
+ spd, err = tr.readOldGNUSparseMap(hdr, rawHdr)
} else {
- sp, err = tr.checkForGNUSparsePAXHeaders(hdr, extHdrs)
- if err != nil {
- return err
- }
+ spd, err = tr.readGNUSparsePAXHeaders(hdr)
}
// If sp is non-nil, then this is a sparse file.
- // Note that it is possible for len(sp) to be zero.
- if sp != nil {
- tr.curr, err = newSparseFileReader(tr.curr, sp, hdr.Size)
+ // Note that it is possible for len(sp) == 0.
+ if err == nil && spd != nil {
+ if isHeaderOnlyType(hdr.Typeflag) || !validateSparseEntries(spd, hdr.Size) {
+ return ErrHeader
+ }
+ sph := invertSparseEntries(spd, hdr.Size)
+ tr.curr = &sparseFileReader{tr.curr, sph, 0}
}
return err
}
-// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
-// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to
-// be treated as a regular file.
-func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) {
- var sparseFormat string
-
- // Check for sparse format indicators
- major, majorOk := headers[paxGNUSparseMajor]
- minor, minorOk := headers[paxGNUSparseMinor]
- sparseName, sparseNameOk := headers[paxGNUSparseName]
- _, sparseMapOk := headers[paxGNUSparseMap]
- sparseSize, sparseSizeOk := headers[paxGNUSparseSize]
- sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize]
-
- // Identify which, if any, sparse format applies from which PAX headers are set
- if majorOk && minorOk {
- sparseFormat = major + "." + minor
- } else if sparseNameOk && sparseMapOk {
- sparseFormat = "0.1"
- } else if sparseSizeOk {
- sparseFormat = "0.0"
- } else {
- // Not a PAX format GNU sparse file.
- return nil, nil
+// readGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers.
+// If they are found, then this function reads the sparse map and returns it.
+// This assumes that 0.0 headers have already been converted to 0.1 headers
+// by the the PAX header parsing logic.
+func (tr *Reader) readGNUSparsePAXHeaders(hdr *Header) (sparseDatas, error) {
+ // Identify the version of GNU headers.
+ var is1x0 bool
+ major, minor := hdr.PAXRecords[paxGNUSparseMajor], hdr.PAXRecords[paxGNUSparseMinor]
+ switch {
+ case major == "0" && (minor == "0" || minor == "1"):
+ is1x0 = false
+ case major == "1" && minor == "0":
+ is1x0 = true
+ case major != "" || minor != "":
+ return nil, nil // Unknown GNU sparse PAX version
+ case hdr.PAXRecords[paxGNUSparseMap] != "":
+ is1x0 = false // 0.0 and 0.1 did not have explicit version records, so guess
+ default:
+ return nil, nil // Not a PAX format GNU sparse file.
}
+ hdr.Format.mayOnlyBe(FormatPAX)
- // Check for unknown sparse format
- if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" {
- return nil, nil
+ // Update hdr from GNU sparse PAX headers.
+ if name := hdr.PAXRecords[paxGNUSparseName]; name != "" {
+ hdr.Name = name
}
-
- // Update hdr from GNU sparse PAX headers
- if sparseNameOk {
- hdr.Name = sparseName
+ size := hdr.PAXRecords[paxGNUSparseSize]
+ if size == "" {
+ size = hdr.PAXRecords[paxGNUSparseRealSize]
}
- if sparseSizeOk {
- realSize, err := strconv.ParseInt(sparseSize, 10, 64)
+ if size != "" {
+ n, err := strconv.ParseInt(size, 10, 64)
if err != nil {
return nil, ErrHeader
}
- hdr.Size = realSize
- } else if sparseRealSizeOk {
- realSize, err := strconv.ParseInt(sparseRealSize, 10, 64)
- if err != nil {
- return nil, ErrHeader
- }
- hdr.Size = realSize
+ hdr.Size = n
}
- // Set up the sparse map, according to the particular sparse format in use
- var sp []sparseEntry
- var err error
- switch sparseFormat {
- case "0.0", "0.1":
- sp, err = readGNUSparseMap0x1(headers)
- case "1.0":
- sp, err = readGNUSparseMap1x0(tr.curr)
+ // Read the sparse map according to the appropriate format.
+ if is1x0 {
+ return readGNUSparseMap1x0(tr.curr)
}
- return sp, err
+ return readGNUSparseMap0x1(hdr.PAXRecords)
}
-// mergePAX merges well known headers according to PAX standard.
-// In general headers with the same name as those found
-// in the header struct overwrite those found in the header
-// struct with higher precision or longer values. Esp. useful
-// for name and linkname fields.
-func mergePAX(hdr *Header, headers map[string]string) (err error) {
- var id64 int64
- for k, v := range headers {
+// mergePAX merges paxHdrs into hdr for all relevant fields of Header.
+func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) {
+ for k, v := range paxHdrs {
+ if v == "" {
+ continue // Keep the original USTAR value
+ }
+ var id64 int64
switch k {
case paxPath:
hdr.Name = v
@@ -320,17 +273,18 @@ func mergePAX(hdr *Header, headers map[string]string) (err error) {
case paxSize:
hdr.Size, err = strconv.ParseInt(v, 10, 64)
default:
- if strings.HasPrefix(k, paxXattr) {
+ if strings.HasPrefix(k, paxSchilyXattr) {
if hdr.Xattrs == nil {
hdr.Xattrs = make(map[string]string)
}
- hdr.Xattrs[k[len(paxXattr):]] = v
+ hdr.Xattrs[k[len(paxSchilyXattr):]] = v
}
}
if err != nil {
return ErrHeader
}
}
+ hdr.PAXRecords = paxHdrs
return nil
}
@@ -348,7 +302,7 @@ func parsePAX(r io.Reader) (map[string]string, error) {
// headers since 0.0 headers were not PAX compliant.
var sparseMap []string
- extHdrs := make(map[string]string)
+ paxHdrs := make(map[string]string)
for len(sbuf) > 0 {
key, value, residual, err := parsePAXRecord(sbuf)
if err != nil {
@@ -366,58 +320,13 @@ func parsePAX(r io.Reader) (map[string]string, error) {
}
sparseMap = append(sparseMap, value)
default:
- // According to PAX specification, a value is stored only if it is
- // non-empty. Otherwise, the key is deleted.
- if len(value) > 0 {
- extHdrs[key] = value
- } else {
- delete(extHdrs, key)
- }
+ paxHdrs[key] = value
}
}
if len(sparseMap) > 0 {
- extHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",")
+ paxHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",")
}
- return extHdrs, nil
-}
-
-// skipUnread skips any unread bytes in the existing file entry, as well as any
-// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is
-// encountered in the data portion; it is okay to hit io.EOF in the padding.
-//
-// Note that this function still works properly even when sparse files are being
-// used since numBytes returns the bytes remaining in the underlying io.Reader.
-func (tr *Reader) skipUnread() error {
- dataSkip := tr.numBytes() // Number of data bytes to skip
- totalSkip := dataSkip + tr.pad // Total number of bytes to skip
- tr.curr, tr.pad = nil, 0
-
- // If possible, Seek to the last byte before the end of the data section.
- // Do this because Seek is often lazy about reporting errors; this will mask
- // the fact that the tar stream may be truncated. We can rely on the
- // io.CopyN done shortly afterwards to trigger any IO errors.
- var seekSkipped int64 // Number of bytes skipped via Seek
- if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 {
- // Not all io.Seeker can actually Seek. For example, os.Stdin implements
- // io.Seeker, but calling Seek always returns an error and performs
- // no action. Thus, we try an innocent seek to the current position
- // to see if Seek is really supported.
- pos1, err := sr.Seek(0, io.SeekCurrent)
- if err == nil {
- // Seek seems supported, so perform the real Seek.
- pos2, err := sr.Seek(dataSkip-1, io.SeekCurrent)
- if err != nil {
- return err
- }
- seekSkipped = pos2 - pos1
- }
- }
-
- copySkipped, err := io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped)
- if err == io.EOF && seekSkipped+copySkipped < dataSkip {
- err = io.ErrUnexpectedEOF
- }
- return err
+ return paxHdrs, nil
}
// readHeader reads the next block header and assumes that the underlying reader
@@ -445,7 +354,7 @@ func (tr *Reader) readHeader() (*Header, *block, error) {
// Verify the header matches a known format.
format := tr.blk.GetFormat()
- if format == formatUnknown {
+ if format == FormatUnknown {
return nil, nil, ErrHeader
}
@@ -454,59 +363,86 @@ func (tr *Reader) readHeader() (*Header, *block, error) {
// Unpack the V7 header.
v7 := tr.blk.V7()
+ hdr.Typeflag = v7.TypeFlag()[0]
hdr.Name = p.parseString(v7.Name())
+ hdr.Linkname = p.parseString(v7.LinkName())
+ hdr.Size = p.parseNumeric(v7.Size())
hdr.Mode = p.parseNumeric(v7.Mode())
hdr.Uid = int(p.parseNumeric(v7.UID()))
hdr.Gid = int(p.parseNumeric(v7.GID()))
- hdr.Size = p.parseNumeric(v7.Size())
hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0)
- hdr.Typeflag = v7.TypeFlag()[0]
- hdr.Linkname = p.parseString(v7.LinkName())
-
- // The atime and ctime fields are often left unused. Some versions of Go
- // had a bug in the tar.Writer where it would output an invalid tar file
- // in certain rare situations because the logic incorrectly believed that
- // the old GNU format had a prefix field. This is wrong and leads to
- // an outputted file that actually mangles the atime and ctime fields.
- //
- // In order to continue reading tar files created by a buggy writer, we
- // try to parse the atime and ctime fields, but just return the zero value
- // of time.Time when we cannot parse them.
- //
- // See https://golang.org/issues/12594
- tryParseTime := func(b []byte) time.Time {
- var p parser
- n := p.parseNumeric(b)
- if b[0] != 0x00 && p.err == nil {
- return time.Unix(n, 0)
- }
- return time.Time{}
- }
// Unpack format specific fields.
if format > formatV7 {
ustar := tr.blk.USTAR()
hdr.Uname = p.parseString(ustar.UserName())
hdr.Gname = p.parseString(ustar.GroupName())
- if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
- hdr.Devmajor = p.parseNumeric(ustar.DevMajor())
- hdr.Devminor = p.parseNumeric(ustar.DevMinor())
- }
+ hdr.Devmajor = p.parseNumeric(ustar.DevMajor())
+ hdr.Devminor = p.parseNumeric(ustar.DevMinor())
var prefix string
- switch format {
- case formatUSTAR:
+ switch {
+ case format.has(FormatUSTAR | FormatPAX):
+ hdr.Format = format
ustar := tr.blk.USTAR()
prefix = p.parseString(ustar.Prefix())
- case formatSTAR:
+
+ // For Format detection, check if block is properly formatted since
+ // the parser is more liberal than what USTAR actually permits.
+ notASCII := func(r rune) bool { return r >= 0x80 }
+ if bytes.IndexFunc(tr.blk[:], notASCII) >= 0 {
+ hdr.Format = FormatUnknown // Non-ASCII characters in block.
+ }
+ nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 }
+ if !(nul(v7.Size()) && nul(v7.Mode()) && nul(v7.UID()) && nul(v7.GID()) &&
+ nul(v7.ModTime()) && nul(ustar.DevMajor()) && nul(ustar.DevMinor())) {
+ hdr.Format = FormatUnknown // Numeric fields must end in NUL
+ }
+ case format.has(formatSTAR):
star := tr.blk.STAR()
prefix = p.parseString(star.Prefix())
hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0)
hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0)
- case formatGNU:
+ case format.has(FormatGNU):
+ hdr.Format = format
+ var p2 parser
gnu := tr.blk.GNU()
- hdr.AccessTime = tryParseTime(gnu.AccessTime())
- hdr.ChangeTime = tryParseTime(gnu.ChangeTime())
+ if b := gnu.AccessTime(); b[0] != 0 {
+ hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0)
+ }
+ if b := gnu.ChangeTime(); b[0] != 0 {
+ hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0)
+ }
+
+ // Prior to Go1.8, the Writer had a bug where it would output
+ // an invalid tar file in certain rare situations because the logic
+ // incorrectly believed that the old GNU format had a prefix field.
+ // This is wrong and leads to an output file that mangles the
+ // atime and ctime fields, which are often left unused.
+ //
+ // In order to continue reading tar files created by former, buggy
+ // versions of Go, we skeptically parse the atime and ctime fields.
+ // If we are unable to parse them and the prefix field looks like
+ // an ASCII string, then we fallback on the pre-Go1.8 behavior
+ // of treating these fields as the USTAR prefix field.
+ //
+ // Note that this will not use the fallback logic for all possible
+ // files generated by a pre-Go1.8 toolchain. If the generated file
+ // happened to have a prefix field that parses as valid
+ // atime and ctime fields (e.g., when they are valid octal strings),
+ // then it is impossible to distinguish between an valid GNU file
+ // and an invalid pre-Go1.8 file.
+ //
+ // See https://golang.org/issues/12594
+ // See https://golang.org/issues/21005
+ if p2.err != nil {
+ hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{}
+ ustar := tr.blk.USTAR()
+ if s := p.parseString(ustar.Prefix()); isASCII(s) {
+ prefix = s
+ }
+ hdr.Format = FormatUnknown // Buggy file is not GNU
+ }
}
if len(prefix) > 0 {
hdr.Name = prefix + "/" + hdr.Name
@@ -523,21 +459,22 @@ func (tr *Reader) readHeader() (*Header, *block, error) {
// The Header.Size does not reflect the size of any extended headers used.
// Thus, this function will read from the raw io.Reader to fetch extra headers.
// This method mutates blk in the process.
-func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) ([]sparseEntry, error) {
+func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, error) {
// Make sure that the input format is GNU.
// Unfortunately, the STAR format also has a sparse header format that uses
// the same type flag but has a completely different layout.
- if blk.GetFormat() != formatGNU {
+ if blk.GetFormat() != FormatGNU {
return nil, ErrHeader
}
+ hdr.Format.mayOnlyBe(FormatGNU)
var p parser
hdr.Size = p.parseNumeric(blk.GNU().RealSize())
if p.err != nil {
return nil, p.err
}
- var s sparseArray = blk.GNU().Sparse()
- var sp = make([]sparseEntry, 0, s.MaxEntries())
+ s := blk.GNU().Sparse()
+ spd := make(sparseDatas, 0, s.MaxEntries())
for {
for i := 0; i < s.MaxEntries(); i++ {
// This termination condition is identical to GNU and BSD tar.
@@ -545,25 +482,22 @@ func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) ([]sparseEntry, e
break // Don't return, need to process extended headers (even if empty)
}
offset := p.parseNumeric(s.Entry(i).Offset())
- numBytes := p.parseNumeric(s.Entry(i).NumBytes())
+ length := p.parseNumeric(s.Entry(i).Length())
if p.err != nil {
return nil, p.err
}
- sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+ spd = append(spd, sparseEntry{Offset: offset, Length: length})
}
if s.IsExtended()[0] > 0 {
// There are more entries. Read an extension header and parse its entries.
- if _, err := io.ReadFull(tr.r, blk[:]); err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
+ if _, err := mustReadFull(tr.r, blk[:]); err != nil {
return nil, err
}
s = blk.Sparse()
continue
}
- return sp, nil // Done
+ return spd, nil // Done
}
}
@@ -571,28 +505,27 @@ func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) ([]sparseEntry, e
// version 1.0. The format of the sparse map consists of a series of
// newline-terminated numeric fields. The first field is the number of entries
// and is always present. Following this are the entries, consisting of two
-// fields (offset, numBytes). This function must stop reading at the end
+// fields (offset, length). This function must stop reading at the end
// boundary of the block containing the last newline.
//
// Note that the GNU manual says that numeric values should be encoded in octal
// format. However, the GNU tar utility itself outputs these values in decimal.
// As such, this library treats values as being encoded in decimal.
-func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
- var cntNewline int64
- var buf bytes.Buffer
- var blk = make([]byte, blockSize)
+func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
+ var (
+ cntNewline int64
+ buf bytes.Buffer
+ blk block
+ )
- // feedTokens copies data in numBlock chunks from r into buf until there are
+ // feedTokens copies data in blocks from r into buf until there are
// at least cnt newlines in buf. It will not read more blocks than needed.
- var feedTokens = func(cnt int64) error {
- for cntNewline < cnt {
- if _, err := io.ReadFull(r, blk); err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
+ feedTokens := func(n int64) error {
+ for cntNewline < n {
+ if _, err := mustReadFull(r, blk[:]); err != nil {
return err
}
- buf.Write(blk)
+ buf.Write(blk[:])
for _, c := range blk {
if c == '\n' {
cntNewline++
@@ -604,10 +537,10 @@ func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
// nextToken gets the next token delimited by a newline. This assumes that
// at least one newline exists in the buffer.
- var nextToken = func() string {
+ nextToken := func() string {
cntNewline--
tok, _ := buf.ReadString('\n')
- return tok[:len(tok)-1] // Cut off newline
+ return strings.TrimRight(tok, "\n")
}
// Parse for the number of entries.
@@ -626,80 +559,67 @@ func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
if err := feedTokens(2 * numEntries); err != nil {
return nil, err
}
- sp := make([]sparseEntry, 0, numEntries)
+ spd := make(sparseDatas, 0, numEntries)
for i := int64(0); i < numEntries; i++ {
- offset, err := strconv.ParseInt(nextToken(), 10, 64)
- if err != nil {
+ offset, err1 := strconv.ParseInt(nextToken(), 10, 64)
+ length, err2 := strconv.ParseInt(nextToken(), 10, 64)
+ if err1 != nil || err2 != nil {
return nil, ErrHeader
}
- numBytes, err := strconv.ParseInt(nextToken(), 10, 64)
- if err != nil {
- return nil, ErrHeader
- }
- sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+ spd = append(spd, sparseEntry{Offset: offset, Length: length})
}
- return sp, nil
+ return spd, nil
}
// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
// version 0.1. The sparse map is stored in the PAX headers.
-func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) {
+func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) {
// Get number of entries.
// Use integer overflow resistant math to check this.
- numEntriesStr := extHdrs[paxGNUSparseNumBlocks]
+ numEntriesStr := paxHdrs[paxGNUSparseNumBlocks]
numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
return nil, ErrHeader
}
// There should be two numbers in sparseMap for each entry.
- sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",")
+ sparseMap := strings.Split(paxHdrs[paxGNUSparseMap], ",")
+ if len(sparseMap) == 1 && sparseMap[0] == "" {
+ sparseMap = sparseMap[:0]
+ }
if int64(len(sparseMap)) != 2*numEntries {
return nil, ErrHeader
}
// Loop through the entries in the sparse map.
// numEntries is trusted now.
- sp := make([]sparseEntry, 0, numEntries)
- for i := int64(0); i < numEntries; i++ {
- offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64)
- if err != nil {
+ spd := make(sparseDatas, 0, numEntries)
+ for len(sparseMap) >= 2 {
+ offset, err1 := strconv.ParseInt(sparseMap[0], 10, 64)
+ length, err2 := strconv.ParseInt(sparseMap[1], 10, 64)
+ if err1 != nil || err2 != nil {
return nil, ErrHeader
}
- numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64)
- if err != nil {
- return nil, ErrHeader
- }
- sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+ spd = append(spd, sparseEntry{Offset: offset, Length: length})
+ sparseMap = sparseMap[2:]
}
- return sp, nil
+ return spd, nil
}
-// numBytes returns the number of bytes left to read in the current file's entry
-// in the tar archive, or 0 if there is no current file.
-func (tr *Reader) numBytes() int64 {
- if tr.curr == nil {
- // No current file, so no bytes
- return 0
- }
- return tr.curr.numBytes()
-}
-
-// Read reads from the current entry in the tar archive.
-// It returns 0, io.EOF when it reaches the end of that entry,
-// until Next is called to advance to the next entry.
+// Read reads from the current file in the tar archive.
+// It returns (0, io.EOF) when it reaches the end of that file,
+// until Next is called to advance to the next file.
//
-// Calling Read on special types like TypeLink, TypeSymLink, TypeChar,
-// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what
+// If the current file is sparse, then the regions marked as a hole
+// are read back as NUL-bytes.
+//
+// Calling Read on special types like TypeLink, TypeSymlink, TypeChar,
+// TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what
// the Header.Size claims.
func (tr *Reader) Read(b []byte) (int, error) {
if tr.err != nil {
return 0, tr.err
}
- if tr.curr == nil {
- return 0, io.EOF
- }
-
n, err := tr.curr.Read(b)
if err != nil && err != io.EOF {
tr.err = err
@@ -707,116 +627,229 @@ func (tr *Reader) Read(b []byte) (int, error) {
return n, err
}
-func (rfr *regFileReader) Read(b []byte) (n int, err error) {
- if rfr.nb == 0 {
- // file consumed
- return 0, io.EOF
+// writeTo writes the content of the current file to w.
+// The bytes written matches the number of remaining bytes in the current file.
+//
+// If the current file is sparse and w is an io.WriteSeeker,
+// then writeTo uses Seek to skip past holes defined in Header.SparseHoles,
+// assuming that skipped regions are filled with NULs.
+// This always writes the last byte to ensure w is the right size.
+//
+// TODO(dsnet): Re-export this when adding sparse file support.
+// See https://golang.org/issue/22735
+func (tr *Reader) writeTo(w io.Writer) (int64, error) {
+ if tr.err != nil {
+ return 0, tr.err
}
- if int64(len(b)) > rfr.nb {
- b = b[0:rfr.nb]
- }
- n, err = rfr.r.Read(b)
- rfr.nb -= int64(n)
-
- if err == io.EOF && rfr.nb > 0 {
- err = io.ErrUnexpectedEOF
- }
- return
-}
-
-// numBytes returns the number of bytes left to read in the file's data in the tar archive.
-func (rfr *regFileReader) numBytes() int64 {
- return rfr.nb
-}
-
-// newSparseFileReader creates a new sparseFileReader, but validates all of the
-// sparse entries before doing so.
-func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) {
- if total < 0 {
- return nil, ErrHeader // Total size cannot be negative
- }
-
- // Validate all sparse entries. These are the same checks as performed by
- // the BSD tar utility.
- for i, s := range sp {
- switch {
- case s.offset < 0 || s.numBytes < 0:
- return nil, ErrHeader // Negative values are never okay
- case s.offset > math.MaxInt64-s.numBytes:
- return nil, ErrHeader // Integer overflow with large length
- case s.offset+s.numBytes > total:
- return nil, ErrHeader // Region extends beyond the "real" size
- case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset:
- return nil, ErrHeader // Regions can't overlap and must be in order
- }
- }
- return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil
-}
-
-// readHole reads a sparse hole ending at endOffset.
-func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int {
- n64 := endOffset - sfr.pos
- if n64 > int64(len(b)) {
- n64 = int64(len(b))
- }
- n := int(n64)
- for i := 0; i < n; i++ {
- b[i] = 0
- }
- sfr.pos += n64
- return n
-}
-
-// Read reads the sparse file data in expanded form.
-func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
- // Skip past all empty fragments.
- for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 {
- sfr.sp = sfr.sp[1:]
- }
-
- // If there are no more fragments, then it is possible that there
- // is one last sparse hole.
- if len(sfr.sp) == 0 {
- // This behavior matches the BSD tar utility.
- // However, GNU tar stops returning data even if sfr.total is unmet.
- if sfr.pos < sfr.total {
- return sfr.readHole(b, sfr.total), nil
- }
- return 0, io.EOF
- }
-
- // In front of a data fragment, so read a hole.
- if sfr.pos < sfr.sp[0].offset {
- return sfr.readHole(b, sfr.sp[0].offset), nil
- }
-
- // In a data fragment, so read from it.
- // This math is overflow free since we verify that offset and numBytes can
- // be safely added when creating the sparseFileReader.
- endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment
- bytesLeft := endPos - sfr.pos // Bytes left in fragment
- if int64(len(b)) > bytesLeft {
- b = b[:bytesLeft]
- }
-
- n, err = sfr.rfr.Read(b)
- sfr.pos += int64(n)
- if err == io.EOF {
- if sfr.pos < endPos {
- err = io.ErrUnexpectedEOF // There was supposed to be more data
- } else if sfr.pos < sfr.total {
- err = nil // There is still an implicit sparse hole at the end
- }
- }
-
- if sfr.pos == endPos {
- sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it
+ n, err := tr.curr.WriteTo(w)
+ if err != nil {
+ tr.err = err
}
return n, err
}
-// numBytes returns the number of bytes left to read in the sparse file's
-// sparse-encoded data in the tar archive.
-func (sfr *sparseFileReader) numBytes() int64 {
- return sfr.rfr.numBytes()
+// regFileReader is a fileReader for reading data from a regular file entry.
+type regFileReader struct {
+ r io.Reader // Underlying Reader
+ nb int64 // Number of remaining bytes to read
+}
+
+func (fr *regFileReader) Read(b []byte) (n int, err error) {
+ if int64(len(b)) > fr.nb {
+ b = b[:fr.nb]
+ }
+ if len(b) > 0 {
+ n, err = fr.r.Read(b)
+ fr.nb -= int64(n)
+ }
+ switch {
+ case err == io.EOF && fr.nb > 0:
+ return n, io.ErrUnexpectedEOF
+ case err == nil && fr.nb == 0:
+ return n, io.EOF
+ default:
+ return n, err
+ }
+}
+
+func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) {
+ return io.Copy(w, struct{ io.Reader }{fr})
+}
+
+func (fr regFileReader) LogicalRemaining() int64 {
+ return fr.nb
+}
+
+func (fr regFileReader) PhysicalRemaining() int64 {
+ return fr.nb
+}
+
+// sparseFileReader is a fileReader for reading data from a sparse file entry.
+type sparseFileReader struct {
+ fr fileReader // Underlying fileReader
+ sp sparseHoles // Normalized list of sparse holes
+ pos int64 // Current position in sparse file
+}
+
+func (sr *sparseFileReader) Read(b []byte) (n int, err error) {
+ finished := int64(len(b)) >= sr.LogicalRemaining()
+ if finished {
+ b = b[:sr.LogicalRemaining()]
+ }
+
+ b0 := b
+ endPos := sr.pos + int64(len(b))
+ for endPos > sr.pos && err == nil {
+ var nf int // Bytes read in fragment
+ holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
+ if sr.pos < holeStart { // In a data fragment
+ bf := b[:min(int64(len(b)), holeStart-sr.pos)]
+ nf, err = tryReadFull(sr.fr, bf)
+ } else { // In a hole fragment
+ bf := b[:min(int64(len(b)), holeEnd-sr.pos)]
+ nf, err = tryReadFull(zeroReader{}, bf)
+ }
+ b = b[nf:]
+ sr.pos += int64(nf)
+ if sr.pos >= holeEnd && len(sr.sp) > 1 {
+ sr.sp = sr.sp[1:] // Ensure last fragment always remains
+ }
+ }
+
+ n = len(b0) - len(b)
+ switch {
+ case err == io.EOF:
+ return n, errMissData // Less data in dense file than sparse file
+ case err != nil:
+ return n, err
+ case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0:
+ return n, errUnrefData // More data in dense file than sparse file
+ case finished:
+ return n, io.EOF
+ default:
+ return n, nil
+ }
+}
+
+func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) {
+ ws, ok := w.(io.WriteSeeker)
+ if ok {
+ if _, err := ws.Seek(0, io.SeekCurrent); err != nil {
+ ok = false // Not all io.Seeker can really seek
+ }
+ }
+ if !ok {
+ return io.Copy(w, struct{ io.Reader }{sr})
+ }
+
+ var writeLastByte bool
+ pos0 := sr.pos
+ for sr.LogicalRemaining() > 0 && !writeLastByte && err == nil {
+ var nf int64 // Size of fragment
+ holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
+ if sr.pos < holeStart { // In a data fragment
+ nf = holeStart - sr.pos
+ nf, err = io.CopyN(ws, sr.fr, nf)
+ } else { // In a hole fragment
+ nf = holeEnd - sr.pos
+ if sr.PhysicalRemaining() == 0 {
+ writeLastByte = true
+ nf--
+ }
+ _, err = ws.Seek(nf, io.SeekCurrent)
+ }
+ sr.pos += nf
+ if sr.pos >= holeEnd && len(sr.sp) > 1 {
+ sr.sp = sr.sp[1:] // Ensure last fragment always remains
+ }
+ }
+
+ // If the last fragment is a hole, then seek to 1-byte before EOF, and
+ // write a single byte to ensure the file is the right size.
+ if writeLastByte && err == nil {
+ _, err = ws.Write([]byte{0})
+ sr.pos++
+ }
+
+ n = sr.pos - pos0
+ switch {
+ case err == io.EOF:
+ return n, errMissData // Less data in dense file than sparse file
+ case err != nil:
+ return n, err
+ case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0:
+ return n, errUnrefData // More data in dense file than sparse file
+ default:
+ return n, nil
+ }
+}
+
+func (sr sparseFileReader) LogicalRemaining() int64 {
+ return sr.sp[len(sr.sp)-1].endOffset() - sr.pos
+}
+func (sr sparseFileReader) PhysicalRemaining() int64 {
+ return sr.fr.PhysicalRemaining()
+}
+
+type zeroReader struct{}
+
+func (zeroReader) Read(b []byte) (int, error) {
+ for i := range b {
+ b[i] = 0
+ }
+ return len(b), nil
+}
+
+// mustReadFull is like io.ReadFull except it returns
+// io.ErrUnexpectedEOF when io.EOF is hit before len(b) bytes are read.
+func mustReadFull(r io.Reader, b []byte) (int, error) {
+ n, err := tryReadFull(r, b)
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return n, err
+}
+
+// tryReadFull is like io.ReadFull except it returns
+// io.EOF when it is hit before len(b) bytes are read.
+func tryReadFull(r io.Reader, b []byte) (n int, err error) {
+ for len(b) > n && err == nil {
+ var nn int
+ nn, err = r.Read(b[n:])
+ n += nn
+ }
+ if len(b) == n && err == io.EOF {
+ err = nil
+ }
+ return n, err
+}
+
+// discard skips n bytes in r, reporting an error if unable to do so.
+func discard(r io.Reader, n int64) error {
+ // If possible, Seek to the last byte before the end of the data section.
+ // Do this because Seek is often lazy about reporting errors; this will mask
+ // the fact that the stream may be truncated. We can rely on the
+ // io.CopyN done shortly afterwards to trigger any IO errors.
+ var seekSkipped int64 // Number of bytes skipped via Seek
+ if sr, ok := r.(io.Seeker); ok && n > 1 {
+ // Not all io.Seeker can actually Seek. For example, os.Stdin implements
+ // io.Seeker, but calling Seek always returns an error and performs
+ // no action. Thus, we try an innocent seek to the current position
+ // to see if Seek is really supported.
+ pos1, err := sr.Seek(0, io.SeekCurrent)
+ if pos1 >= 0 && err == nil {
+ // Seek seems supported, so perform the real Seek.
+ pos2, err := sr.Seek(n-1, io.SeekCurrent)
+ if pos2 < 0 || err != nil {
+ return err
+ }
+ seekSkipped = pos2 - pos1
+ }
+ }
+
+ copySkipped, err := io.CopyN(ioutil.Discard, r, n-seekSkipped)
+ if err == io.EOF && seekSkipped+copySkipped < n {
+ err = io.ErrUnexpectedEOF
+ }
+ return err
}
diff --git a/src/archive/tar/reader_test.go b/src/archive/tar/reader_test.go
index 338686836b6..a6832d33b1b 100644
--- a/src/archive/tar/reader_test.go
+++ b/src/archive/tar/reader_test.go
@@ -7,12 +7,15 @@ package tar
import (
"bytes"
"crypto/md5"
+ "errors"
"fmt"
"io"
"io/ioutil"
"math"
"os"
+ "path"
"reflect"
+ "strconv"
"strings"
"testing"
"time"
@@ -36,6 +39,7 @@ func TestReader(t *testing.T) {
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
+ Format: FormatGNU,
}, {
Name: "small2.txt",
Mode: 0640,
@@ -46,6 +50,7 @@ func TestReader(t *testing.T) {
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
+ Format: FormatGNU,
}},
chksums: []string{
"e38b27eaccb4391bdec553a7f3ae6b2f",
@@ -66,6 +71,7 @@ func TestReader(t *testing.T) {
Gname: "david",
Devmajor: 0,
Devminor: 0,
+ Format: FormatGNU,
}, {
Name: "sparse-posix-0.0",
Mode: 420,
@@ -79,6 +85,12 @@ func TestReader(t *testing.T) {
Gname: "david",
Devmajor: 0,
Devminor: 0,
+ PAXRecords: map[string]string{
+ "GNU.sparse.size": "200",
+ "GNU.sparse.numblocks": "95",
+ "GNU.sparse.map": "1,1,3,1,5,1,7,1,9,1,11,1,13,1,15,1,17,1,19,1,21,1,23,1,25,1,27,1,29,1,31,1,33,1,35,1,37,1,39,1,41,1,43,1,45,1,47,1,49,1,51,1,53,1,55,1,57,1,59,1,61,1,63,1,65,1,67,1,69,1,71,1,73,1,75,1,77,1,79,1,81,1,83,1,85,1,87,1,89,1,91,1,93,1,95,1,97,1,99,1,101,1,103,1,105,1,107,1,109,1,111,1,113,1,115,1,117,1,119,1,121,1,123,1,125,1,127,1,129,1,131,1,133,1,135,1,137,1,139,1,141,1,143,1,145,1,147,1,149,1,151,1,153,1,155,1,157,1,159,1,161,1,163,1,165,1,167,1,169,1,171,1,173,1,175,1,177,1,179,1,181,1,183,1,185,1,187,1,189,1",
+ },
+ Format: FormatPAX,
}, {
Name: "sparse-posix-0.1",
Mode: 420,
@@ -92,6 +104,13 @@ func TestReader(t *testing.T) {
Gname: "david",
Devmajor: 0,
Devminor: 0,
+ PAXRecords: map[string]string{
+ "GNU.sparse.size": "200",
+ "GNU.sparse.numblocks": "95",
+ "GNU.sparse.map": "1,1,3,1,5,1,7,1,9,1,11,1,13,1,15,1,17,1,19,1,21,1,23,1,25,1,27,1,29,1,31,1,33,1,35,1,37,1,39,1,41,1,43,1,45,1,47,1,49,1,51,1,53,1,55,1,57,1,59,1,61,1,63,1,65,1,67,1,69,1,71,1,73,1,75,1,77,1,79,1,81,1,83,1,85,1,87,1,89,1,91,1,93,1,95,1,97,1,99,1,101,1,103,1,105,1,107,1,109,1,111,1,113,1,115,1,117,1,119,1,121,1,123,1,125,1,127,1,129,1,131,1,133,1,135,1,137,1,139,1,141,1,143,1,145,1,147,1,149,1,151,1,153,1,155,1,157,1,159,1,161,1,163,1,165,1,167,1,169,1,171,1,173,1,175,1,177,1,179,1,181,1,183,1,185,1,187,1,189,1",
+ "GNU.sparse.name": "sparse-posix-0.1",
+ },
+ Format: FormatPAX,
}, {
Name: "sparse-posix-1.0",
Mode: 420,
@@ -105,6 +124,13 @@ func TestReader(t *testing.T) {
Gname: "david",
Devmajor: 0,
Devminor: 0,
+ PAXRecords: map[string]string{
+ "GNU.sparse.major": "1",
+ "GNU.sparse.minor": "0",
+ "GNU.sparse.realsize": "200",
+ "GNU.sparse.name": "sparse-posix-1.0",
+ },
+ Format: FormatPAX,
}, {
Name: "end",
Mode: 420,
@@ -118,6 +144,7 @@ func TestReader(t *testing.T) {
Gname: "david",
Devmajor: 0,
Devminor: 0,
+ Format: FormatGNU,
}},
chksums: []string{
"6f53234398c2449fe67c1812d993012f",
@@ -186,6 +213,13 @@ func TestReader(t *testing.T) {
ChangeTime: time.Unix(1350244992, 23960108),
AccessTime: time.Unix(1350244992, 23960108),
Typeflag: TypeReg,
+ PAXRecords: map[string]string{
+ "path": "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
+ "mtime": "1350244992.023960108",
+ "atime": "1350244992.023960108",
+ "ctime": "1350244992.023960108",
+ },
+ Format: FormatPAX,
}, {
Name: "a/b",
Mode: 0777,
@@ -199,6 +233,13 @@ func TestReader(t *testing.T) {
AccessTime: time.Unix(1350266320, 910238425),
Typeflag: TypeSymlink,
Linkname: "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
+ PAXRecords: map[string]string{
+ "linkpath": "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
+ "mtime": "1350266320.910238425",
+ "atime": "1350266320.910238425",
+ "ctime": "1350266320.910238425",
+ },
+ Format: FormatPAX,
}},
}, {
file: "testdata/pax-bad-hdr-file.tar",
@@ -218,10 +259,63 @@ func TestReader(t *testing.T) {
Typeflag: '0',
Uname: "joetsai",
Gname: "eng",
+ PAXRecords: map[string]string{
+ "size": "000000000000000000000999",
+ },
+ Format: FormatPAX,
}},
chksums: []string{
"0afb597b283fe61b5d4879669a350556",
},
+ }, {
+ file: "testdata/pax-records.tar",
+ headers: []*Header{{
+ Typeflag: TypeReg,
+ Name: "file",
+ Uname: strings.Repeat("long", 10),
+ ModTime: time.Unix(0, 0),
+ PAXRecords: map[string]string{
+ "GOLANG.pkg": "tar",
+ "comment": "Hello, 世界",
+ "uname": strings.Repeat("long", 10),
+ },
+ Format: FormatPAX,
+ }},
+ }, {
+ file: "testdata/pax-global-records.tar",
+ headers: []*Header{{
+ Typeflag: TypeXGlobalHeader,
+ Name: "global1",
+ PAXRecords: map[string]string{"path": "global1", "mtime": "1500000000.0"},
+ Format: FormatPAX,
+ }, {
+ Typeflag: TypeReg,
+ Name: "file1",
+ ModTime: time.Unix(0, 0),
+ Format: FormatUSTAR,
+ }, {
+ Typeflag: TypeReg,
+ Name: "file2",
+ PAXRecords: map[string]string{"path": "file2"},
+ ModTime: time.Unix(0, 0),
+ Format: FormatPAX,
+ }, {
+ Typeflag: TypeXGlobalHeader,
+ Name: "GlobalHead.0.0",
+ PAXRecords: map[string]string{"path": ""},
+ Format: FormatPAX,
+ }, {
+ Typeflag: TypeReg,
+ Name: "file3",
+ ModTime: time.Unix(0, 0),
+ Format: FormatUSTAR,
+ }, {
+ Typeflag: TypeReg,
+ Name: "file4",
+ ModTime: time.Unix(1400000000, 0),
+ PAXRecords: map[string]string{"mtime": "1400000000"},
+ Format: FormatPAX,
+ }},
}, {
file: "testdata/nil-uid.tar", // golang.org/issue/5290
headers: []*Header{{
@@ -237,6 +331,7 @@ func TestReader(t *testing.T) {
Gname: "eyefi",
Devmajor: 0,
Devminor: 0,
+ Format: FormatGNU,
}},
}, {
file: "testdata/xattrs.tar",
@@ -258,6 +353,15 @@ func TestReader(t *testing.T) {
// Interestingly, selinux encodes the terminating null inside the xattr
"security.selinux": "unconfined_u:object_r:default_t:s0\x00",
},
+ PAXRecords: map[string]string{
+ "mtime": "1386065770.44825232",
+ "atime": "1389782991.41987522",
+ "ctime": "1389782956.794414986",
+ "SCHILY.xattr.user.key": "value",
+ "SCHILY.xattr.user.key2": "value2",
+ "SCHILY.xattr.security.selinux": "unconfined_u:object_r:default_t:s0\x00",
+ },
+ Format: FormatPAX,
}, {
Name: "small2.txt",
Mode: 0644,
@@ -273,6 +377,13 @@ func TestReader(t *testing.T) {
Xattrs: map[string]string{
"security.selinux": "unconfined_u:object_r:default_t:s0\x00",
},
+ PAXRecords: map[string]string{
+ "mtime": "1386065770.449252304",
+ "atime": "1389782991.41987522",
+ "ctime": "1386065770.449252304",
+ "SCHILY.xattr.security.selinux": "unconfined_u:object_r:default_t:s0\x00",
+ },
+ Format: FormatPAX,
}},
}, {
// Matches the behavior of GNU, BSD, and STAR tar utilities.
@@ -282,6 +393,7 @@ func TestReader(t *testing.T) {
Linkname: "GNU4/GNU4/long-linkpath-name",
ModTime: time.Unix(0, 0),
Typeflag: '2',
+ Format: FormatGNU,
}},
}, {
// GNU tar file with atime and ctime fields set.
@@ -300,6 +412,7 @@ func TestReader(t *testing.T) {
Gname: "dsnet",
AccessTime: time.Unix(1441974501, 0),
ChangeTime: time.Unix(1441973436, 0),
+ Format: FormatGNU,
}, {
Name: "test2/foo",
Mode: 33188,
@@ -312,6 +425,7 @@ func TestReader(t *testing.T) {
Gname: "dsnet",
AccessTime: time.Unix(1441974501, 0),
ChangeTime: time.Unix(1441973436, 0),
+ Format: FormatGNU,
}, {
Name: "test2/sparse",
Mode: 33188,
@@ -324,6 +438,7 @@ func TestReader(t *testing.T) {
Gname: "dsnet",
AccessTime: time.Unix(1441991948, 0),
ChangeTime: time.Unix(1441973436, 0),
+ Format: FormatGNU,
}},
}, {
// Matches the behavior of GNU and BSD tar utilities.
@@ -333,7 +448,75 @@ func TestReader(t *testing.T) {
Linkname: "PAX4/PAX4/long-linkpath-name",
ModTime: time.Unix(0, 0),
Typeflag: '2',
+ PAXRecords: map[string]string{
+ "linkpath": "PAX4/PAX4/long-linkpath-name",
+ },
+ Format: FormatPAX,
}},
+ }, {
+ // Both BSD and GNU tar truncate long names at first NUL even
+ // if there is data following that NUL character.
+ // This is reasonable as GNU long names are C-strings.
+ file: "testdata/gnu-long-nul.tar",
+ headers: []*Header{{
+ Name: "0123456789",
+ Mode: 0644,
+ Uid: 1000,
+ Gid: 1000,
+ ModTime: time.Unix(1486082191, 0),
+ Typeflag: '0',
+ Uname: "rawr",
+ Gname: "dsnet",
+ Format: FormatGNU,
+ }},
+ }, {
+ // This archive was generated by Writer but is readable by both
+ // GNU and BSD tar utilities.
+ // The archive generated by GNU is nearly byte-for-byte identical
+ // to the Go version except the Go version sets a negative Devminor
+ // just to force the GNU format.
+ file: "testdata/gnu-utf8.tar",
+ headers: []*Header{{
+ Name: "☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹",
+ Mode: 0644,
+ Uid: 1000, Gid: 1000,
+ ModTime: time.Unix(0, 0),
+ Typeflag: '0',
+ Uname: "☺",
+ Gname: "⚹",
+ Format: FormatGNU,
+ }},
+ }, {
+ // This archive was generated by Writer but is readable by both
+ // GNU and BSD tar utilities.
+ // The archive generated by GNU is nearly byte-for-byte identical
+ // to the Go version except the Go version sets a negative Devminor
+ // just to force the GNU format.
+ file: "testdata/gnu-not-utf8.tar",
+ headers: []*Header{{
+ Name: "hi\x80\x81\x82\x83bye",
+ Mode: 0644,
+ Uid: 1000,
+ Gid: 1000,
+ ModTime: time.Unix(0, 0),
+ Typeflag: '0',
+ Uname: "rawr",
+ Gname: "dsnet",
+ Format: FormatGNU,
+ }},
+ }, {
+ // BSD tar v3.1.2 and GNU tar v1.27.1 both rejects PAX records
+ // with NULs in the key.
+ file: "testdata/pax-nul-xattrs.tar",
+ err: ErrHeader,
+ }, {
+ // BSD tar v3.1.2 rejects a PAX path with NUL in the value, while
+ // GNU tar v1.27.1 simply truncates at first NUL.
+ // We emulate the behavior of BSD since it is strange doing NUL
+ // truncations since PAX records are length-prefix strings instead
+ // of NUL-terminated C-strings.
+ file: "testdata/pax-nul-path.tar",
+ err: ErrHeader,
}, {
file: "testdata/neg-size.tar",
err: ErrHeader,
@@ -346,483 +529,214 @@ func TestReader(t *testing.T) {
}, {
file: "testdata/issue12435.tar",
err: ErrHeader,
+ }, {
+ // Ensure that we can read back the original Header as written with
+ // a buggy pre-Go1.8 tar.Writer.
+ file: "testdata/invalid-go17.tar",
+ headers: []*Header{{
+ Name: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/foo",
+ Uid: 010000000,
+ ModTime: time.Unix(0, 0),
+ }},
+ }, {
+ // USTAR archive with a regular entry with non-zero device numbers.
+ file: "testdata/ustar-file-devs.tar",
+ headers: []*Header{{
+ Name: "file",
+ Mode: 0644,
+ Typeflag: '0',
+ ModTime: time.Unix(0, 0),
+ Devmajor: 1,
+ Devminor: 1,
+ Format: FormatUSTAR,
+ }},
+ }, {
+ // Generated by Go, works on BSD tar v3.1.2 and GNU tar v.1.27.1.
+ file: "testdata/gnu-nil-sparse-data.tar",
+ headers: []*Header{{
+ Name: "sparse.db",
+ Typeflag: TypeGNUSparse,
+ Size: 1000,
+ ModTime: time.Unix(0, 0),
+ Format: FormatGNU,
+ }},
+ }, {
+ // Generated by Go, works on BSD tar v3.1.2 and GNU tar v.1.27.1.
+ file: "testdata/gnu-nil-sparse-hole.tar",
+ headers: []*Header{{
+ Name: "sparse.db",
+ Typeflag: TypeGNUSparse,
+ Size: 1000,
+ ModTime: time.Unix(0, 0),
+ Format: FormatGNU,
+ }},
+ }, {
+ // Generated by Go, works on BSD tar v3.1.2 and GNU tar v.1.27.1.
+ file: "testdata/pax-nil-sparse-data.tar",
+ headers: []*Header{{
+ Name: "sparse.db",
+ Typeflag: TypeReg,
+ Size: 1000,
+ ModTime: time.Unix(0, 0),
+ PAXRecords: map[string]string{
+ "size": "1512",
+ "GNU.sparse.major": "1",
+ "GNU.sparse.minor": "0",
+ "GNU.sparse.realsize": "1000",
+ "GNU.sparse.name": "sparse.db",
+ },
+ Format: FormatPAX,
+ }},
+ }, {
+ // Generated by Go, works on BSD tar v3.1.2 and GNU tar v.1.27.1.
+ file: "testdata/pax-nil-sparse-hole.tar",
+ headers: []*Header{{
+ Name: "sparse.db",
+ Typeflag: TypeReg,
+ Size: 1000,
+ ModTime: time.Unix(0, 0),
+ PAXRecords: map[string]string{
+ "size": "512",
+ "GNU.sparse.major": "1",
+ "GNU.sparse.minor": "0",
+ "GNU.sparse.realsize": "1000",
+ "GNU.sparse.name": "sparse.db",
+ },
+ Format: FormatPAX,
+ }},
+ }, {
+ file: "testdata/trailing-slash.tar",
+ headers: []*Header{{
+ Typeflag: TypeDir,
+ Name: strings.Repeat("123456789/", 30),
+ ModTime: time.Unix(0, 0),
+ PAXRecords: map[string]string{
+ "path": strings.Repeat("123456789/", 30),
+ },
+ Format: FormatPAX,
+ }},
}}
- for i, v := range vectors {
- f, err := os.Open(v.file)
- if err != nil {
- t.Errorf("file %s, test %d: unexpected error: %v", v.file, i, err)
- continue
- }
- defer f.Close()
-
- // Capture all headers and checksums.
- var (
- tr = NewReader(f)
- hdrs []*Header
- chksums []string
- rdbuf = make([]byte, 8)
- )
- for {
- var hdr *Header
- hdr, err = tr.Next()
+ for _, v := range vectors {
+ t.Run(path.Base(v.file), func(t *testing.T) {
+ f, err := os.Open(v.file)
if err != nil {
- if err == io.EOF {
- err = nil // Expected error
+ t.Fatalf("unexpected error: %v", err)
+ }
+ defer f.Close()
+
+ // Capture all headers and checksums.
+ var (
+ tr = NewReader(f)
+ hdrs []*Header
+ chksums []string
+ rdbuf = make([]byte, 8)
+ )
+ for {
+ var hdr *Header
+ hdr, err = tr.Next()
+ if err != nil {
+ if err == io.EOF {
+ err = nil // Expected error
+ }
+ break
}
- break
- }
- hdrs = append(hdrs, hdr)
+ hdrs = append(hdrs, hdr)
- if v.chksums == nil {
- continue
+ if v.chksums == nil {
+ continue
+ }
+ h := md5.New()
+ _, err = io.CopyBuffer(h, tr, rdbuf) // Effectively an incremental read
+ if err != nil {
+ break
+ }
+ chksums = append(chksums, fmt.Sprintf("%x", h.Sum(nil)))
}
- h := md5.New()
- _, err = io.CopyBuffer(h, tr, rdbuf) // Effectively an incremental read
- if err != nil {
- break
- }
- chksums = append(chksums, fmt.Sprintf("%x", h.Sum(nil)))
- }
- for j, hdr := range hdrs {
- if j >= len(v.headers) {
- t.Errorf("file %s, test %d, entry %d: unexpected header:\ngot %+v",
- v.file, i, j, *hdr)
- continue
+ for i, hdr := range hdrs {
+ if i >= len(v.headers) {
+ t.Fatalf("entry %d: unexpected header:\ngot %+v", i, *hdr)
+ continue
+ }
+ if !reflect.DeepEqual(*hdr, *v.headers[i]) {
+ t.Fatalf("entry %d: incorrect header:\ngot %+v\nwant %+v", i, *hdr, *v.headers[i])
+ }
}
- if !reflect.DeepEqual(*hdr, *v.headers[j]) {
- t.Errorf("file %s, test %d, entry %d: incorrect header:\ngot %+v\nwant %+v",
- v.file, i, j, *hdr, *v.headers[j])
+ if len(hdrs) != len(v.headers) {
+ t.Fatalf("got %d headers, want %d headers", len(hdrs), len(v.headers))
}
- }
- if len(hdrs) != len(v.headers) {
- t.Errorf("file %s, test %d: got %d headers, want %d headers",
- v.file, i, len(hdrs), len(v.headers))
- }
- for j, sum := range chksums {
- if j >= len(v.chksums) {
- t.Errorf("file %s, test %d, entry %d: unexpected sum: got %s",
- v.file, i, j, sum)
- continue
+ for i, sum := range chksums {
+ if i >= len(v.chksums) {
+ t.Fatalf("entry %d: unexpected sum: got %s", i, sum)
+ continue
+ }
+ if sum != v.chksums[i] {
+ t.Fatalf("entry %d: incorrect checksum: got %s, want %s", i, sum, v.chksums[i])
+ }
}
- if sum != v.chksums[j] {
- t.Errorf("file %s, test %d, entry %d: incorrect checksum: got %s, want %s",
- v.file, i, j, sum, v.chksums[j])
- }
- }
- if err != v.err {
- t.Errorf("file %s, test %d: unexpected error: got %v, want %v",
- v.file, i, err, v.err)
- }
- f.Close()
+ if err != v.err {
+ t.Fatalf("unexpected error: got %v, want %v", err, v.err)
+ }
+ f.Close()
+ })
}
}
func TestPartialRead(t *testing.T) {
- f, err := os.Open("testdata/gnu.tar")
- if err != nil {
- t.Fatalf("Unexpected error: %v", err)
+ type testCase struct {
+ cnt int // Number of bytes to read
+ output string // Expected value of string read
}
- defer f.Close()
-
- tr := NewReader(f)
-
- // Read the first four bytes; Next() should skip the last byte.
- hdr, err := tr.Next()
- if err != nil || hdr == nil {
- t.Fatalf("Didn't get first file: %v", err)
- }
- buf := make([]byte, 4)
- if _, err := io.ReadFull(tr, buf); err != nil {
- t.Fatalf("Unexpected error: %v", err)
- }
- if expected := []byte("Kilt"); !bytes.Equal(buf, expected) {
- t.Errorf("Contents = %v, want %v", buf, expected)
- }
-
- // Second file
- hdr, err = tr.Next()
- if err != nil || hdr == nil {
- t.Fatalf("Didn't get second file: %v", err)
- }
- buf = make([]byte, 6)
- if _, err := io.ReadFull(tr, buf); err != nil {
- t.Fatalf("Unexpected error: %v", err)
- }
- if expected := []byte("Google"); !bytes.Equal(buf, expected) {
- t.Errorf("Contents = %v, want %v", buf, expected)
- }
-}
-
-func TestSparseFileReader(t *testing.T) {
vectors := []struct {
- realSize int64 // Real size of the output file
- sparseMap []sparseEntry // Input sparse map
- sparseData string // Input compact data
- expected string // Expected output data
- err error // Expected error outcome
+ file string
+ cases []testCase
}{{
- realSize: 8,
- sparseMap: []sparseEntry{
- {offset: 0, numBytes: 2},
- {offset: 5, numBytes: 3},
+ file: "testdata/gnu.tar",
+ cases: []testCase{
+ {4, "Kilt"},
+ {6, "Google"},
},
- sparseData: "abcde",
- expected: "ab\x00\x00\x00cde",
}, {
- realSize: 10,
- sparseMap: []sparseEntry{
- {offset: 0, numBytes: 2},
- {offset: 5, numBytes: 3},
+ file: "testdata/sparse-formats.tar",
+ cases: []testCase{
+ {2, "\x00G"},
+ {4, "\x00G\x00o"},
+ {6, "\x00G\x00o\x00G"},
+ {8, "\x00G\x00o\x00G\x00o"},
+ {4, "end\n"},
},
- sparseData: "abcde",
- expected: "ab\x00\x00\x00cde\x00\x00",
- }, {
- realSize: 8,
- sparseMap: []sparseEntry{
- {offset: 1, numBytes: 3},
- {offset: 6, numBytes: 2},
- },
- sparseData: "abcde",
- expected: "\x00abc\x00\x00de",
- }, {
- realSize: 8,
- sparseMap: []sparseEntry{
- {offset: 1, numBytes: 3},
- {offset: 6, numBytes: 0},
- {offset: 6, numBytes: 0},
- {offset: 6, numBytes: 2},
- },
- sparseData: "abcde",
- expected: "\x00abc\x00\x00de",
- }, {
- realSize: 10,
- sparseMap: []sparseEntry{
- {offset: 1, numBytes: 3},
- {offset: 6, numBytes: 2},
- },
- sparseData: "abcde",
- expected: "\x00abc\x00\x00de\x00\x00",
- }, {
- realSize: 10,
- sparseMap: []sparseEntry{
- {offset: 1, numBytes: 3},
- {offset: 6, numBytes: 2},
- {offset: 8, numBytes: 0},
- {offset: 8, numBytes: 0},
- {offset: 8, numBytes: 0},
- {offset: 8, numBytes: 0},
- },
- sparseData: "abcde",
- expected: "\x00abc\x00\x00de\x00\x00",
- }, {
- realSize: 2,
- sparseMap: []sparseEntry{},
- sparseData: "",
- expected: "\x00\x00",
- }, {
- realSize: -2,
- sparseMap: []sparseEntry{},
- err: ErrHeader,
- }, {
- realSize: -10,
- sparseMap: []sparseEntry{
- {offset: 1, numBytes: 3},
- {offset: 6, numBytes: 2},
- },
- sparseData: "abcde",
- err: ErrHeader,
- }, {
- realSize: 10,
- sparseMap: []sparseEntry{
- {offset: 1, numBytes: 3},
- {offset: 6, numBytes: 5},
- },
- sparseData: "abcde",
- err: ErrHeader,
- }, {
- realSize: 35,
- sparseMap: []sparseEntry{
- {offset: 1, numBytes: 3},
- {offset: 6, numBytes: 5},
- },
- sparseData: "abcde",
- err: io.ErrUnexpectedEOF,
- }, {
- realSize: 35,
- sparseMap: []sparseEntry{
- {offset: 1, numBytes: 3},
- {offset: 6, numBytes: -5},
- },
- sparseData: "abcde",
- err: ErrHeader,
- }, {
- realSize: 35,
- sparseMap: []sparseEntry{
- {offset: math.MaxInt64, numBytes: 3},
- {offset: 6, numBytes: -5},
- },
- sparseData: "abcde",
- err: ErrHeader,
- }, {
- realSize: 10,
- sparseMap: []sparseEntry{
- {offset: 1, numBytes: 3},
- {offset: 2, numBytes: 2},
- },
- sparseData: "abcde",
- err: ErrHeader,
}}
- for i, v := range vectors {
- r := bytes.NewReader([]byte(v.sparseData))
- rfr := ®FileReader{r: r, nb: int64(len(v.sparseData))}
+ for _, v := range vectors {
+ t.Run(path.Base(v.file), func(t *testing.T) {
+ f, err := os.Open(v.file)
+ if err != nil {
+ t.Fatalf("Open() error: %v", err)
+ }
+ defer f.Close()
- var (
- sfr *sparseFileReader
- err error
- buf []byte
- )
+ tr := NewReader(f)
+ for i, tc := range v.cases {
+ hdr, err := tr.Next()
+ if err != nil || hdr == nil {
+ t.Fatalf("entry %d, Next(): got %v, want %v", i, err, nil)
+ }
+ buf := make([]byte, tc.cnt)
+ if _, err := io.ReadFull(tr, buf); err != nil {
+ t.Fatalf("entry %d, ReadFull(): got %v, want %v", i, err, nil)
+ }
+ if string(buf) != tc.output {
+ t.Fatalf("entry %d, ReadFull(): got %q, want %q", i, string(buf), tc.output)
+ }
+ }
- sfr, err = newSparseFileReader(rfr, v.sparseMap, v.realSize)
- if err != nil {
- goto fail
- }
- if sfr.numBytes() != int64(len(v.sparseData)) {
- t.Errorf("test %d, numBytes() before reading: got %d, want %d", i, sfr.numBytes(), len(v.sparseData))
- }
- buf, err = ioutil.ReadAll(sfr)
- if err != nil {
- goto fail
- }
- if string(buf) != v.expected {
- t.Errorf("test %d, ReadAll(): got %q, want %q", i, string(buf), v.expected)
- }
- if sfr.numBytes() != 0 {
- t.Errorf("test %d, numBytes() after reading: got %d, want %d", i, sfr.numBytes(), 0)
- }
-
- fail:
- if err != v.err {
- t.Errorf("test %d, unexpected error: got %v, want %v", i, err, v.err)
- }
- }
-}
-
-func TestReadOldGNUSparseMap(t *testing.T) {
- const (
- t00 = "00000000000\x0000000000000\x00"
- t11 = "00000000001\x0000000000001\x00"
- t12 = "00000000001\x0000000000002\x00"
- t21 = "00000000002\x0000000000001\x00"
- )
-
- mkBlk := func(size, sp0, sp1, sp2, sp3, ext string, format int) *block {
- var blk block
- copy(blk.GNU().RealSize(), size)
- copy(blk.GNU().Sparse().Entry(0), sp0)
- copy(blk.GNU().Sparse().Entry(1), sp1)
- copy(blk.GNU().Sparse().Entry(2), sp2)
- copy(blk.GNU().Sparse().Entry(3), sp3)
- copy(blk.GNU().Sparse().IsExtended(), ext)
- if format != formatUnknown {
- blk.SetFormat(format)
- }
- return &blk
- }
-
- vectors := []struct {
- data string // Input data
- rawHdr *block // Input raw header
- want []sparseEntry // Expected sparse entries to be outputted
- err error // Expected error to be returned
- }{
- {"", mkBlk("", "", "", "", "", "", formatUnknown), nil, ErrHeader},
- {"", mkBlk("1234", "fewa", "", "", "", "", formatGNU), nil, ErrHeader},
- {"", mkBlk("0031", "", "", "", "", "", formatGNU), nil, nil},
- {"", mkBlk("1234", t00, t11, "", "", "", formatGNU),
- []sparseEntry{{0, 0}, {1, 1}}, nil},
- {"", mkBlk("1234", t11, t12, t21, t11, "", formatGNU),
- []sparseEntry{{1, 1}, {1, 2}, {2, 1}, {1, 1}}, nil},
- {"", mkBlk("1234", t11, t12, t21, t11, "\x80", formatGNU),
- []sparseEntry{}, io.ErrUnexpectedEOF},
- {t11 + t11,
- mkBlk("1234", t11, t12, t21, t11, "\x80", formatGNU),
- []sparseEntry{}, io.ErrUnexpectedEOF},
- {t11 + t21 + strings.Repeat("\x00", 512),
- mkBlk("1234", t11, t12, t21, t11, "\x80", formatGNU),
- []sparseEntry{{1, 1}, {1, 2}, {2, 1}, {1, 1}, {1, 1}, {2, 1}}, nil},
- }
-
- for i, v := range vectors {
- tr := Reader{r: strings.NewReader(v.data)}
- hdr := new(Header)
- got, err := tr.readOldGNUSparseMap(hdr, v.rawHdr)
- if !reflect.DeepEqual(got, v.want) && !(len(got) == 0 && len(v.want) == 0) {
- t.Errorf("test %d, readOldGNUSparseMap(...): got %v, want %v", i, got, v.want)
- }
- if err != v.err {
- t.Errorf("test %d, unexpected error: got %v, want %v", i, err, v.err)
- }
- }
-}
-
-func TestReadGNUSparseMap0x1(t *testing.T) {
- const (
- maxUint = ^uint(0)
- maxInt = int(maxUint >> 1)
- )
- var (
- big1 = fmt.Sprintf("%d", int64(maxInt))
- big2 = fmt.Sprintf("%d", (int64(maxInt)/2)+1)
- big3 = fmt.Sprintf("%d", (int64(maxInt) / 3))
- )
-
- vectors := []struct {
- extHdrs map[string]string // Input data
- sparseMap []sparseEntry // Expected sparse entries to be outputted
- err error // Expected errors that may be raised
- }{{
- extHdrs: map[string]string{paxGNUSparseNumBlocks: "-4"},
- err: ErrHeader,
- }, {
- extHdrs: map[string]string{paxGNUSparseNumBlocks: "fee "},
- err: ErrHeader,
- }, {
- extHdrs: map[string]string{
- paxGNUSparseNumBlocks: big1,
- paxGNUSparseMap: "0,5,10,5,20,5,30,5",
- },
- err: ErrHeader,
- }, {
- extHdrs: map[string]string{
- paxGNUSparseNumBlocks: big2,
- paxGNUSparseMap: "0,5,10,5,20,5,30,5",
- },
- err: ErrHeader,
- }, {
- extHdrs: map[string]string{
- paxGNUSparseNumBlocks: big3,
- paxGNUSparseMap: "0,5,10,5,20,5,30,5",
- },
- err: ErrHeader,
- }, {
- extHdrs: map[string]string{
- paxGNUSparseNumBlocks: "4",
- paxGNUSparseMap: "0.5,5,10,5,20,5,30,5",
- },
- err: ErrHeader,
- }, {
- extHdrs: map[string]string{
- paxGNUSparseNumBlocks: "4",
- paxGNUSparseMap: "0,5.5,10,5,20,5,30,5",
- },
- err: ErrHeader,
- }, {
- extHdrs: map[string]string{
- paxGNUSparseNumBlocks: "4",
- paxGNUSparseMap: "0,fewafewa.5,fewafw,5,20,5,30,5",
- },
- err: ErrHeader,
- }, {
- extHdrs: map[string]string{
- paxGNUSparseNumBlocks: "4",
- paxGNUSparseMap: "0,5,10,5,20,5,30,5",
- },
- sparseMap: []sparseEntry{{0, 5}, {10, 5}, {20, 5}, {30, 5}},
- }}
-
- for i, v := range vectors {
- sp, err := readGNUSparseMap0x1(v.extHdrs)
- if !reflect.DeepEqual(sp, v.sparseMap) && !(len(sp) == 0 && len(v.sparseMap) == 0) {
- t.Errorf("test %d, readGNUSparseMap0x1(...): got %v, want %v", i, sp, v.sparseMap)
- }
- if err != v.err {
- t.Errorf("test %d, unexpected error: got %v, want %v", i, err, v.err)
- }
- }
-}
-
-func TestReadGNUSparseMap1x0(t *testing.T) {
- sp := []sparseEntry{{1, 2}, {3, 4}}
- for i := 0; i < 98; i++ {
- sp = append(sp, sparseEntry{54321, 12345})
- }
-
- vectors := []struct {
- input string // Input data
- sparseMap []sparseEntry // Expected sparse entries to be outputted
- cnt int // Expected number of bytes read
- err error // Expected errors that may be raised
- }{{
- input: "",
- cnt: 0,
- err: io.ErrUnexpectedEOF,
- }, {
- input: "ab",
- cnt: 2,
- err: io.ErrUnexpectedEOF,
- }, {
- input: strings.Repeat("\x00", 512),
- cnt: 512,
- err: io.ErrUnexpectedEOF,
- }, {
- input: strings.Repeat("\x00", 511) + "\n",
- cnt: 512,
- err: ErrHeader,
- }, {
- input: strings.Repeat("\n", 512),
- cnt: 512,
- err: ErrHeader,
- }, {
- input: "0\n" + strings.Repeat("\x00", 510) + strings.Repeat("a", 512),
- sparseMap: []sparseEntry{},
- cnt: 512,
- }, {
- input: strings.Repeat("0", 512) + "0\n" + strings.Repeat("\x00", 510),
- sparseMap: []sparseEntry{},
- cnt: 1024,
- }, {
- input: strings.Repeat("0", 1024) + "1\n2\n3\n" + strings.Repeat("\x00", 506),
- sparseMap: []sparseEntry{{2, 3}},
- cnt: 1536,
- }, {
- input: strings.Repeat("0", 1024) + "1\n2\n\n" + strings.Repeat("\x00", 509),
- cnt: 1536,
- err: ErrHeader,
- }, {
- input: strings.Repeat("0", 1024) + "1\n2\n" + strings.Repeat("\x00", 508),
- cnt: 1536,
- err: io.ErrUnexpectedEOF,
- }, {
- input: "-1\n2\n\n" + strings.Repeat("\x00", 506),
- cnt: 512,
- err: ErrHeader,
- }, {
- input: "1\nk\n2\n" + strings.Repeat("\x00", 506),
- cnt: 512,
- err: ErrHeader,
- }, {
- input: "100\n1\n2\n3\n4\n" + strings.Repeat("54321\n0000000000000012345\n", 98) + strings.Repeat("\x00", 512),
- cnt: 2560,
- sparseMap: sp,
- }}
-
- for i, v := range vectors {
- r := strings.NewReader(v.input)
- sp, err := readGNUSparseMap1x0(r)
- if !reflect.DeepEqual(sp, v.sparseMap) && !(len(sp) == 0 && len(v.sparseMap) == 0) {
- t.Errorf("test %d, readGNUSparseMap1x0(...): got %v, want %v", i, sp, v.sparseMap)
- }
- if numBytes := len(v.input) - r.Len(); numBytes != v.cnt {
- t.Errorf("test %d, bytes read: got %v, want %v", i, numBytes, v.cnt)
- }
- if err != v.err {
- t.Errorf("test %d, unexpected error: got %v, want %v", i, err, v.err)
- }
+ if _, err := tr.Next(); err != io.EOF {
+ t.Fatalf("Next(): got %v, want EOF", err)
+ }
+ })
}
}
@@ -950,17 +864,17 @@ func TestReadTruncation(t *testing.T) {
}
cnt++
if s2 == "manual" {
- if _, err = io.Copy(ioutil.Discard, tr); err != nil {
+ if _, err = tr.writeTo(ioutil.Discard); err != nil {
break
}
}
}
if err != v.err {
- t.Errorf("test %d, NewReader(%s(...)) with %s discard: got %v, want %v",
+ t.Errorf("test %d, NewReader(%s) with %s discard: got %v, want %v",
i, s1, s2, err, v.err)
}
if cnt != v.cnt {
- t.Errorf("test %d, NewReader(%s(...)) with %s discard: got %d headers, want %d headers",
+ t.Errorf("test %d, NewReader(%s) with %s discard: got %d headers, want %d headers",
i, s1, s2, cnt, v.cnt)
}
}
@@ -1025,12 +939,18 @@ func TestMergePAX(t *testing.T) {
Name: "a/b/c",
Uid: 1000,
ModTime: time.Unix(1350244992, 23960108),
+ PAXRecords: map[string]string{
+ "path": "a/b/c",
+ "uid": "1000",
+ "mtime": "1350244992.023960108",
+ },
},
ok: true,
}, {
in: map[string]string{
"gid": "gtgergergersagersgers",
},
+ ok: false,
}, {
in: map[string]string{
"missing": "missing",
@@ -1038,6 +958,10 @@ func TestMergePAX(t *testing.T) {
},
want: &Header{
Xattrs: map[string]string{"key": "value"},
+ PAXRecords: map[string]string{
+ "missing": "missing",
+ "SCHILY.xattr.key": "value",
+ },
},
ok: true,
}}
@@ -1070,7 +994,7 @@ func TestParsePAX(t *testing.T) {
{"13 key1=haha\n13 key2=nana\n13 key3=kaka\n",
map[string]string{"key1": "haha", "key2": "nana", "key3": "kaka"}, true},
{"13 key1=val1\n13 key2=val2\n8 key1=\n",
- map[string]string{"key2": "val2"}, true},
+ map[string]string{"key1": "", "key2": "val2"}, true},
{"22 GNU.sparse.size=10\n26 GNU.sparse.numblocks=2\n" +
"23 GNU.sparse.offset=1\n25 GNU.sparse.numbytes=2\n" +
"23 GNU.sparse.offset=3\n25 GNU.sparse.numbytes=4\n",
@@ -1087,10 +1011,600 @@ func TestParsePAX(t *testing.T) {
r := strings.NewReader(v.in)
got, err := parsePAX(r)
if !reflect.DeepEqual(got, v.want) && !(len(got) == 0 && len(v.want) == 0) {
- t.Errorf("test %d, parsePAX(...):\ngot %v\nwant %v", i, got, v.want)
+ t.Errorf("test %d, parsePAX():\ngot %v\nwant %v", i, got, v.want)
}
if ok := err == nil; ok != v.ok {
- t.Errorf("test %d, parsePAX(...): got %v, want %v", i, ok, v.ok)
+ t.Errorf("test %d, parsePAX(): got %v, want %v", i, ok, v.ok)
+ }
+ }
+}
+
+func TestReadOldGNUSparseMap(t *testing.T) {
+ populateSparseMap := func(sa sparseArray, sps []string) []string {
+ for i := 0; len(sps) > 0 && i < sa.MaxEntries(); i++ {
+ copy(sa.Entry(i), sps[0])
+ sps = sps[1:]
+ }
+ if len(sps) > 0 {
+ copy(sa.IsExtended(), "\x80")
+ }
+ return sps
+ }
+
+ makeInput := func(format Format, size string, sps ...string) (out []byte) {
+ // Write the initial GNU header.
+ var blk block
+ gnu := blk.GNU()
+ sparse := gnu.Sparse()
+ copy(gnu.RealSize(), size)
+ sps = populateSparseMap(sparse, sps)
+ if format != FormatUnknown {
+ blk.SetFormat(format)
+ }
+ out = append(out, blk[:]...)
+
+ // Write extended sparse blocks.
+ for len(sps) > 0 {
+ var blk block
+ sps = populateSparseMap(blk.Sparse(), sps)
+ out = append(out, blk[:]...)
+ }
+ return out
+ }
+
+ makeSparseStrings := func(sp []sparseEntry) (out []string) {
+ var f formatter
+ for _, s := range sp {
+ var b [24]byte
+ f.formatNumeric(b[:12], s.Offset)
+ f.formatNumeric(b[12:], s.Length)
+ out = append(out, string(b[:]))
+ }
+ return out
+ }
+
+ vectors := []struct {
+ input []byte
+ wantMap sparseDatas
+ wantSize int64
+ wantErr error
+ }{{
+ input: makeInput(FormatUnknown, ""),
+ wantErr: ErrHeader,
+ }, {
+ input: makeInput(FormatGNU, "1234", "fewa"),
+ wantSize: 01234,
+ wantErr: ErrHeader,
+ }, {
+ input: makeInput(FormatGNU, "0031"),
+ wantSize: 031,
+ }, {
+ input: makeInput(FormatGNU, "80"),
+ wantErr: ErrHeader,
+ }, {
+ input: makeInput(FormatGNU, "1234",
+ makeSparseStrings(sparseDatas{{0, 0}, {1, 1}})...),
+ wantMap: sparseDatas{{0, 0}, {1, 1}},
+ wantSize: 01234,
+ }, {
+ input: makeInput(FormatGNU, "1234",
+ append(makeSparseStrings(sparseDatas{{0, 0}, {1, 1}}), []string{"", "blah"}...)...),
+ wantMap: sparseDatas{{0, 0}, {1, 1}},
+ wantSize: 01234,
+ }, {
+ input: makeInput(FormatGNU, "3333",
+ makeSparseStrings(sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}})...),
+ wantMap: sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}},
+ wantSize: 03333,
+ }, {
+ input: makeInput(FormatGNU, "",
+ append(append(
+ makeSparseStrings(sparseDatas{{0, 1}, {2, 1}}),
+ []string{"", ""}...),
+ makeSparseStrings(sparseDatas{{4, 1}, {6, 1}})...)...),
+ wantMap: sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}},
+ }, {
+ input: makeInput(FormatGNU, "",
+ makeSparseStrings(sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}, {8, 1}, {10, 1}})...)[:blockSize],
+ wantErr: io.ErrUnexpectedEOF,
+ }, {
+ input: makeInput(FormatGNU, "",
+ makeSparseStrings(sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}, {8, 1}, {10, 1}})...)[:3*blockSize/2],
+ wantErr: io.ErrUnexpectedEOF,
+ }, {
+ input: makeInput(FormatGNU, "",
+ makeSparseStrings(sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}, {8, 1}, {10, 1}})...),
+ wantMap: sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}, {8, 1}, {10, 1}},
+ }, {
+ input: makeInput(FormatGNU, "",
+ makeSparseStrings(sparseDatas{{10 << 30, 512}, {20 << 30, 512}})...),
+ wantMap: sparseDatas{{10 << 30, 512}, {20 << 30, 512}},
+ }}
+
+ for i, v := range vectors {
+ var blk block
+ var hdr Header
+ v.input = v.input[copy(blk[:], v.input):]
+ tr := Reader{r: bytes.NewReader(v.input)}
+ got, err := tr.readOldGNUSparseMap(&hdr, &blk)
+ if !equalSparseEntries(got, v.wantMap) {
+ t.Errorf("test %d, readOldGNUSparseMap(): got %v, want %v", i, got, v.wantMap)
+ }
+ if err != v.wantErr {
+ t.Errorf("test %d, readOldGNUSparseMap() = %v, want %v", i, err, v.wantErr)
+ }
+ if hdr.Size != v.wantSize {
+ t.Errorf("test %d, Header.Size = %d, want %d", i, hdr.Size, v.wantSize)
+ }
+ }
+}
+
+func TestReadGNUSparsePAXHeaders(t *testing.T) {
+ padInput := func(s string) string {
+ return s + string(zeroBlock[:blockPadding(int64(len(s)))])
+ }
+
+ vectors := []struct {
+ inputData string
+ inputHdrs map[string]string
+ wantMap sparseDatas
+ wantSize int64
+ wantName string
+ wantErr error
+ }{{
+ inputHdrs: nil,
+ wantErr: nil,
+ }, {
+ inputHdrs: map[string]string{
+ paxGNUSparseNumBlocks: strconv.FormatInt(math.MaxInt64, 10),
+ paxGNUSparseMap: "0,1,2,3",
+ },
+ wantErr: ErrHeader,
+ }, {
+ inputHdrs: map[string]string{
+ paxGNUSparseNumBlocks: "4\x00",
+ paxGNUSparseMap: "0,1,2,3",
+ },
+ wantErr: ErrHeader,
+ }, {
+ inputHdrs: map[string]string{
+ paxGNUSparseNumBlocks: "4",
+ paxGNUSparseMap: "0,1,2,3",
+ },
+ wantErr: ErrHeader,
+ }, {
+ inputHdrs: map[string]string{
+ paxGNUSparseNumBlocks: "2",
+ paxGNUSparseMap: "0,1,2,3",
+ },
+ wantMap: sparseDatas{{0, 1}, {2, 3}},
+ }, {
+ inputHdrs: map[string]string{
+ paxGNUSparseNumBlocks: "2",
+ paxGNUSparseMap: "0, 1,2,3",
+ },
+ wantErr: ErrHeader,
+ }, {
+ inputHdrs: map[string]string{
+ paxGNUSparseNumBlocks: "2",
+ paxGNUSparseMap: "0,1,02,3",
+ paxGNUSparseRealSize: "4321",
+ },
+ wantMap: sparseDatas{{0, 1}, {2, 3}},
+ wantSize: 4321,
+ }, {
+ inputHdrs: map[string]string{
+ paxGNUSparseNumBlocks: "2",
+ paxGNUSparseMap: "0,one1,2,3",
+ },
+ wantErr: ErrHeader,
+ }, {
+ inputHdrs: map[string]string{
+ paxGNUSparseMajor: "0",
+ paxGNUSparseMinor: "0",
+ paxGNUSparseNumBlocks: "2",
+ paxGNUSparseMap: "0,1,2,3",
+ paxGNUSparseSize: "1234",
+ paxGNUSparseRealSize: "4321",
+ paxGNUSparseName: "realname",
+ },
+ wantMap: sparseDatas{{0, 1}, {2, 3}},
+ wantSize: 1234,
+ wantName: "realname",
+ }, {
+ inputHdrs: map[string]string{
+ paxGNUSparseMajor: "0",
+ paxGNUSparseMinor: "0",
+ paxGNUSparseNumBlocks: "1",
+ paxGNUSparseMap: "10737418240,512",
+ paxGNUSparseSize: "10737418240",
+ paxGNUSparseName: "realname",
+ },
+ wantMap: sparseDatas{{10737418240, 512}},
+ wantSize: 10737418240,
+ wantName: "realname",
+ }, {
+ inputHdrs: map[string]string{
+ paxGNUSparseMajor: "0",
+ paxGNUSparseMinor: "0",
+ paxGNUSparseNumBlocks: "0",
+ paxGNUSparseMap: "",
+ },
+ wantMap: sparseDatas{},
+ }, {
+ inputHdrs: map[string]string{
+ paxGNUSparseMajor: "0",
+ paxGNUSparseMinor: "1",
+ paxGNUSparseNumBlocks: "4",
+ paxGNUSparseMap: "0,5,10,5,20,5,30,5",
+ },
+ wantMap: sparseDatas{{0, 5}, {10, 5}, {20, 5}, {30, 5}},
+ }, {
+ inputHdrs: map[string]string{
+ paxGNUSparseMajor: "1",
+ paxGNUSparseMinor: "0",
+ paxGNUSparseNumBlocks: "4",
+ paxGNUSparseMap: "0,5,10,5,20,5,30,5",
+ },
+ wantErr: io.ErrUnexpectedEOF,
+ }, {
+ inputData: padInput("0\n"),
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
+ wantMap: sparseDatas{},
+ }, {
+ inputData: padInput("0\n")[:blockSize-1] + "#",
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
+ wantMap: sparseDatas{},
+ }, {
+ inputData: padInput("0"),
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
+ wantErr: io.ErrUnexpectedEOF,
+ }, {
+ inputData: padInput("ab\n"),
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
+ wantErr: ErrHeader,
+ }, {
+ inputData: padInput("1\n2\n3\n"),
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
+ wantMap: sparseDatas{{2, 3}},
+ }, {
+ inputData: padInput("1\n2\n"),
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
+ wantErr: io.ErrUnexpectedEOF,
+ }, {
+ inputData: padInput("1\n2\n\n"),
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
+ wantErr: ErrHeader,
+ }, {
+ inputData: string(zeroBlock[:]) + padInput("0\n"),
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
+ wantErr: ErrHeader,
+ }, {
+ inputData: strings.Repeat("0", blockSize) + padInput("1\n5\n1\n"),
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
+ wantMap: sparseDatas{{5, 1}},
+ }, {
+ inputData: padInput(fmt.Sprintf("%d\n", int64(math.MaxInt64))),
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
+ wantErr: ErrHeader,
+ }, {
+ inputData: padInput(strings.Repeat("0", 300) + "1\n" + strings.Repeat("0", 1000) + "5\n" + strings.Repeat("0", 800) + "2\n"),
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
+ wantMap: sparseDatas{{5, 2}},
+ }, {
+ inputData: padInput("2\n10737418240\n512\n21474836480\n512\n"),
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
+ wantMap: sparseDatas{{10737418240, 512}, {21474836480, 512}},
+ }, {
+ inputData: padInput("100\n" + func() string {
+ var ss []string
+ for i := 0; i < 100; i++ {
+ ss = append(ss, fmt.Sprintf("%d\n%d\n", int64(i)<<30, 512))
+ }
+ return strings.Join(ss, "")
+ }()),
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
+ wantMap: func() (spd sparseDatas) {
+ for i := 0; i < 100; i++ {
+ spd = append(spd, sparseEntry{int64(i) << 30, 512})
+ }
+ return spd
+ }(),
+ }}
+
+ for i, v := range vectors {
+ var hdr Header
+ hdr.PAXRecords = v.inputHdrs
+ r := strings.NewReader(v.inputData + "#") // Add canary byte
+ tr := Reader{curr: ®FileReader{r, int64(r.Len())}}
+ got, err := tr.readGNUSparsePAXHeaders(&hdr)
+ if !equalSparseEntries(got, v.wantMap) {
+ t.Errorf("test %d, readGNUSparsePAXHeaders(): got %v, want %v", i, got, v.wantMap)
+ }
+ if err != v.wantErr {
+ t.Errorf("test %d, readGNUSparsePAXHeaders() = %v, want %v", i, err, v.wantErr)
+ }
+ if hdr.Size != v.wantSize {
+ t.Errorf("test %d, Header.Size = %d, want %d", i, hdr.Size, v.wantSize)
+ }
+ if hdr.Name != v.wantName {
+ t.Errorf("test %d, Header.Name = %s, want %s", i, hdr.Name, v.wantName)
+ }
+ if v.wantErr == nil && r.Len() == 0 {
+ t.Errorf("test %d, canary byte unexpectedly consumed", i)
+ }
+ }
+}
+
+// testNonEmptyReader wraps an io.Reader and ensures that
+// Read is never called with an empty buffer.
+type testNonEmptyReader struct{ io.Reader }
+
+func (r testNonEmptyReader) Read(b []byte) (int, error) {
+ if len(b) == 0 {
+ return 0, errors.New("unexpected empty Read call")
+ }
+ return r.Reader.Read(b)
+}
+
+func TestFileReader(t *testing.T) {
+ type (
+ testRead struct { // Read(cnt) == (wantStr, wantErr)
+ cnt int
+ wantStr string
+ wantErr error
+ }
+ testWriteTo struct { // WriteTo(testFile{ops}) == (wantCnt, wantErr)
+ ops fileOps
+ wantCnt int64
+ wantErr error
+ }
+ testRemaining struct { // LogicalRemaining() == wantLCnt, PhysicalRemaining() == wantPCnt
+ wantLCnt int64
+ wantPCnt int64
+ }
+ testFnc interface{} // testRead | testWriteTo | testRemaining
+ )
+
+ type (
+ makeReg struct {
+ str string
+ size int64
+ }
+ makeSparse struct {
+ makeReg makeReg
+ spd sparseDatas
+ size int64
+ }
+ fileMaker interface{} // makeReg | makeSparse
+ )
+
+ vectors := []struct {
+ maker fileMaker
+ tests []testFnc
+ }{{
+ maker: makeReg{"", 0},
+ tests: []testFnc{
+ testRemaining{0, 0},
+ testRead{0, "", io.EOF},
+ testRead{1, "", io.EOF},
+ testWriteTo{nil, 0, nil},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeReg{"", 1},
+ tests: []testFnc{
+ testRemaining{1, 1},
+ testRead{5, "", io.ErrUnexpectedEOF},
+ testWriteTo{nil, 0, io.ErrUnexpectedEOF},
+ testRemaining{1, 1},
+ },
+ }, {
+ maker: makeReg{"hello", 5},
+ tests: []testFnc{
+ testRemaining{5, 5},
+ testRead{5, "hello", io.EOF},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeReg{"hello, world", 50},
+ tests: []testFnc{
+ testRemaining{50, 50},
+ testRead{7, "hello, ", nil},
+ testRemaining{43, 43},
+ testRead{5, "world", nil},
+ testRemaining{38, 38},
+ testWriteTo{nil, 0, io.ErrUnexpectedEOF},
+ testRead{1, "", io.ErrUnexpectedEOF},
+ testRemaining{38, 38},
+ },
+ }, {
+ maker: makeReg{"hello, world", 5},
+ tests: []testFnc{
+ testRemaining{5, 5},
+ testRead{0, "", nil},
+ testRead{4, "hell", nil},
+ testRemaining{1, 1},
+ testWriteTo{fileOps{"o"}, 1, nil},
+ testRemaining{0, 0},
+ testWriteTo{nil, 0, nil},
+ testRead{0, "", io.EOF},
+ },
+ }, {
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{0, 2}, {5, 3}}, 8},
+ tests: []testFnc{
+ testRemaining{8, 5},
+ testRead{3, "ab\x00", nil},
+ testRead{10, "\x00\x00cde", io.EOF},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{0, 2}, {5, 3}}, 8},
+ tests: []testFnc{
+ testRemaining{8, 5},
+ testWriteTo{fileOps{"ab", int64(3), "cde"}, 8, nil},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{0, 2}, {5, 3}}, 10},
+ tests: []testFnc{
+ testRemaining{10, 5},
+ testRead{100, "ab\x00\x00\x00cde\x00\x00", io.EOF},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{"abc", 5}, sparseDatas{{0, 2}, {5, 3}}, 10},
+ tests: []testFnc{
+ testRemaining{10, 5},
+ testRead{100, "ab\x00\x00\x00c", io.ErrUnexpectedEOF},
+ testRemaining{4, 2},
+ },
+ }, {
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 2}}, 8},
+ tests: []testFnc{
+ testRemaining{8, 5},
+ testRead{8, "\x00abc\x00\x00de", io.EOF},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 0}, {6, 0}, {6, 2}}, 8},
+ tests: []testFnc{
+ testRemaining{8, 5},
+ testRead{8, "\x00abc\x00\x00de", io.EOF},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 0}, {6, 0}, {6, 2}}, 8},
+ tests: []testFnc{
+ testRemaining{8, 5},
+ testWriteTo{fileOps{int64(1), "abc", int64(2), "de"}, 8, nil},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 2}}, 10},
+ tests: []testFnc{
+ testRead{100, "\x00abc\x00\x00de\x00\x00", io.EOF},
+ },
+ }, {
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 2}}, 10},
+ tests: []testFnc{
+ testWriteTo{fileOps{int64(1), "abc", int64(2), "de", int64(1), "\x00"}, 10, nil},
+ },
+ }, {
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 2}, {8, 0}, {8, 0}, {8, 0}, {8, 0}}, 10},
+ tests: []testFnc{
+ testRead{100, "\x00abc\x00\x00de\x00\x00", io.EOF},
+ },
+ }, {
+ maker: makeSparse{makeReg{"", 0}, sparseDatas{}, 2},
+ tests: []testFnc{
+ testRead{100, "\x00\x00", io.EOF},
+ },
+ }, {
+ maker: makeSparse{makeReg{"", 8}, sparseDatas{{1, 3}, {6, 5}}, 15},
+ tests: []testFnc{
+ testRead{100, "\x00", io.ErrUnexpectedEOF},
+ },
+ }, {
+ maker: makeSparse{makeReg{"ab", 2}, sparseDatas{{1, 3}, {6, 5}}, 15},
+ tests: []testFnc{
+ testRead{100, "\x00ab", errMissData},
+ },
+ }, {
+ maker: makeSparse{makeReg{"ab", 8}, sparseDatas{{1, 3}, {6, 5}}, 15},
+ tests: []testFnc{
+ testRead{100, "\x00ab", io.ErrUnexpectedEOF},
+ },
+ }, {
+ maker: makeSparse{makeReg{"abc", 3}, sparseDatas{{1, 3}, {6, 5}}, 15},
+ tests: []testFnc{
+ testRead{100, "\x00abc\x00\x00", errMissData},
+ },
+ }, {
+ maker: makeSparse{makeReg{"abc", 8}, sparseDatas{{1, 3}, {6, 5}}, 15},
+ tests: []testFnc{
+ testRead{100, "\x00abc\x00\x00", io.ErrUnexpectedEOF},
+ },
+ }, {
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 5}}, 15},
+ tests: []testFnc{
+ testRead{100, "\x00abc\x00\x00de", errMissData},
+ },
+ }, {
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 5}}, 15},
+ tests: []testFnc{
+ testWriteTo{fileOps{int64(1), "abc", int64(2), "de"}, 8, errMissData},
+ },
+ }, {
+ maker: makeSparse{makeReg{"abcde", 8}, sparseDatas{{1, 3}, {6, 5}}, 15},
+ tests: []testFnc{
+ testRead{100, "\x00abc\x00\x00de", io.ErrUnexpectedEOF},
+ },
+ }, {
+ maker: makeSparse{makeReg{"abcdefghEXTRA", 13}, sparseDatas{{1, 3}, {6, 5}}, 15},
+ tests: []testFnc{
+ testRemaining{15, 13},
+ testRead{100, "\x00abc\x00\x00defgh\x00\x00\x00\x00", errUnrefData},
+ testWriteTo{nil, 0, errUnrefData},
+ testRemaining{0, 5},
+ },
+ }, {
+ maker: makeSparse{makeReg{"abcdefghEXTRA", 13}, sparseDatas{{1, 3}, {6, 5}}, 15},
+ tests: []testFnc{
+ testRemaining{15, 13},
+ testWriteTo{fileOps{int64(1), "abc", int64(2), "defgh", int64(4)}, 15, errUnrefData},
+ testRead{100, "", errUnrefData},
+ testRemaining{0, 5},
+ },
+ }}
+
+ for i, v := range vectors {
+ var fr fileReader
+ switch maker := v.maker.(type) {
+ case makeReg:
+ r := testNonEmptyReader{strings.NewReader(maker.str)}
+ fr = ®FileReader{r, maker.size}
+ case makeSparse:
+ if !validateSparseEntries(maker.spd, maker.size) {
+ t.Fatalf("invalid sparse map: %v", maker.spd)
+ }
+ sph := invertSparseEntries(maker.spd, maker.size)
+ r := testNonEmptyReader{strings.NewReader(maker.makeReg.str)}
+ fr = ®FileReader{r, maker.makeReg.size}
+ fr = &sparseFileReader{fr, sph, 0}
+ default:
+ t.Fatalf("test %d, unknown make operation: %T", i, maker)
+ }
+
+ for j, tf := range v.tests {
+ switch tf := tf.(type) {
+ case testRead:
+ b := make([]byte, tf.cnt)
+ n, err := fr.Read(b)
+ if got := string(b[:n]); got != tf.wantStr || err != tf.wantErr {
+ t.Errorf("test %d.%d, Read(%d):\ngot (%q, %v)\nwant (%q, %v)", i, j, tf.cnt, got, err, tf.wantStr, tf.wantErr)
+ }
+ case testWriteTo:
+ f := &testFile{ops: tf.ops}
+ got, err := fr.WriteTo(f)
+ if _, ok := err.(testError); ok {
+ t.Errorf("test %d.%d, WriteTo(): %v", i, j, err)
+ } else if got != tf.wantCnt || err != tf.wantErr {
+ t.Errorf("test %d.%d, WriteTo() = (%d, %v), want (%d, %v)", i, j, got, err, tf.wantCnt, tf.wantErr)
+ }
+ if len(f.ops) > 0 {
+ t.Errorf("test %d.%d, expected %d more operations", i, j, len(f.ops))
+ }
+ case testRemaining:
+ if got := fr.LogicalRemaining(); got != tf.wantLCnt {
+ t.Errorf("test %d.%d, LogicalRemaining() = %d, want %d", i, j, got, tf.wantLCnt)
+ }
+ if got := fr.PhysicalRemaining(); got != tf.wantPCnt {
+ t.Errorf("test %d.%d, PhysicalRemaining() = %d, want %d", i, j, got, tf.wantPCnt)
+ }
+ default:
+ t.Fatalf("test %d.%d, unknown test operation: %T", i, j, tf)
+ }
}
}
}
diff --git a/src/archive/tar/stat_atim.go b/src/archive/tar/stat_actime1.go
similarity index 100%
rename from src/archive/tar/stat_atim.go
rename to src/archive/tar/stat_actime1.go
diff --git a/src/archive/tar/stat_atimespec.go b/src/archive/tar/stat_actime2.go
similarity index 100%
rename from src/archive/tar/stat_atimespec.go
rename to src/archive/tar/stat_actime2.go
diff --git a/src/archive/tar/stat_unix.go b/src/archive/tar/stat_unix.go
index cb843db4cfd..868105f338e 100644
--- a/src/archive/tar/stat_unix.go
+++ b/src/archive/tar/stat_unix.go
@@ -8,6 +8,10 @@ package tar
import (
"os"
+ "os/user"
+ "runtime"
+ "strconv"
+ "sync"
"syscall"
)
@@ -15,6 +19,10 @@ func init() {
sysStat = statUnix
}
+// userMap and groupMap caches UID and GID lookups for performance reasons.
+// The downside is that renaming uname or gname by the OS never takes effect.
+var userMap, groupMap sync.Map // map[int]string
+
func statUnix(fi os.FileInfo, h *Header) error {
sys, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
@@ -22,11 +30,67 @@ func statUnix(fi os.FileInfo, h *Header) error {
}
h.Uid = int(sys.Uid)
h.Gid = int(sys.Gid)
- // TODO(bradfitz): populate username & group. os/user
- // doesn't cache LookupId lookups, and lacks group
- // lookup functions.
+
+ // Best effort at populating Uname and Gname.
+ // The os/user functions may fail for any number of reasons
+ // (not implemented on that platform, cgo not enabled, etc).
+ if u, ok := userMap.Load(h.Uid); ok {
+ h.Uname = u.(string)
+ } else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil {
+ h.Uname = u.Username
+ userMap.Store(h.Uid, h.Uname)
+ }
+ if g, ok := groupMap.Load(h.Gid); ok {
+ h.Gname = g.(string)
+ } else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil {
+ h.Gname = g.Name
+ groupMap.Store(h.Gid, h.Gname)
+ }
+
h.AccessTime = statAtime(sys)
h.ChangeTime = statCtime(sys)
- // TODO(bradfitz): major/minor device numbers?
+
+ // Best effort at populating Devmajor and Devminor.
+ if h.Typeflag == TypeChar || h.Typeflag == TypeBlock {
+ dev := uint64(sys.Rdev) // May be int32 or uint32
+ switch runtime.GOOS {
+ case "linux":
+ // Copied from golang.org/x/sys/unix/dev_linux.go.
+ major := uint32((dev & 0x00000000000fff00) >> 8)
+ major |= uint32((dev & 0xfffff00000000000) >> 32)
+ minor := uint32((dev & 0x00000000000000ff) >> 0)
+ minor |= uint32((dev & 0x00000ffffff00000) >> 12)
+ h.Devmajor, h.Devminor = int64(major), int64(minor)
+ case "darwin":
+ // Copied from golang.org/x/sys/unix/dev_darwin.go.
+ major := uint32((dev >> 24) & 0xff)
+ minor := uint32(dev & 0xffffff)
+ h.Devmajor, h.Devminor = int64(major), int64(minor)
+ case "dragonfly":
+ // Copied from golang.org/x/sys/unix/dev_dragonfly.go.
+ major := uint32((dev >> 8) & 0xff)
+ minor := uint32(dev & 0xffff00ff)
+ h.Devmajor, h.Devminor = int64(major), int64(minor)
+ case "freebsd":
+ // Copied from golang.org/x/sys/unix/dev_freebsd.go.
+ major := uint32((dev >> 8) & 0xff)
+ minor := uint32(dev & 0xffff00ff)
+ h.Devmajor, h.Devminor = int64(major), int64(minor)
+ case "netbsd":
+ // Copied from golang.org/x/sys/unix/dev_netbsd.go.
+ major := uint32((dev & 0x000fff00) >> 8)
+ minor := uint32((dev & 0x000000ff) >> 0)
+ minor |= uint32((dev & 0xfff00000) >> 12)
+ h.Devmajor, h.Devminor = int64(major), int64(minor)
+ case "openbsd":
+ // Copied from golang.org/x/sys/unix/dev_openbsd.go.
+ major := uint32((dev & 0x0000ff00) >> 8)
+ minor := uint32((dev & 0x000000ff) >> 0)
+ minor |= uint32((dev & 0xffff0000) >> 8)
+ h.Devmajor, h.Devminor = int64(major), int64(minor)
+ default:
+ // TODO: Implement solaris (see https://golang.org/issue/8106)
+ }
+ }
return nil
}
diff --git a/src/archive/tar/strconv.go b/src/archive/tar/strconv.go
index bb5b51c02de..d144485a492 100644
--- a/src/archive/tar/strconv.go
+++ b/src/archive/tar/strconv.go
@@ -12,26 +12,34 @@ import (
"time"
)
+// hasNUL reports whether the NUL character exists within s.
+func hasNUL(s string) bool {
+ return strings.IndexByte(s, 0) >= 0
+}
+
+// isASCII reports whether the input is an ASCII C-style string.
func isASCII(s string) bool {
for _, c := range s {
- if c >= 0x80 {
+ if c >= 0x80 || c == 0x00 {
return false
}
}
return true
}
+// toASCII converts the input to an ASCII C-style string.
+// This a best effort conversion, so invalid characters are dropped.
func toASCII(s string) string {
if isASCII(s) {
return s
}
- var buf bytes.Buffer
+ b := make([]byte, 0, len(s))
for _, c := range s {
- if c < 0x80 {
- buf.WriteByte(byte(c))
+ if c < 0x80 && c != 0x00 {
+ b = append(b, byte(c))
}
}
- return buf.String()
+ return string(b)
}
type parser struct {
@@ -45,23 +53,28 @@ type formatter struct {
// parseString parses bytes as a NUL-terminated C-style string.
// If a NUL byte is not found then the whole slice is returned as a string.
func (*parser) parseString(b []byte) string {
- n := 0
- for n < len(b) && b[n] != 0 {
- n++
+ if i := bytes.IndexByte(b, 0); i >= 0 {
+ return string(b[:i])
}
- return string(b[0:n])
+ return string(b)
}
-// Write s into b, terminating it with a NUL if there is room.
+// formatString copies s into b, NUL-terminating if possible.
func (f *formatter) formatString(b []byte, s string) {
if len(s) > len(b) {
f.err = ErrFieldTooLong
- return
}
- ascii := toASCII(s)
- copy(b, ascii)
- if len(ascii) < len(b) {
- b[len(ascii)] = 0
+ copy(b, s)
+ if len(s) < len(b) {
+ b[len(s)] = 0
+ }
+
+ // Some buggy readers treat regular files with a trailing slash
+ // in the V7 path field as a directory even though the full path
+ // recorded elsewhere (e.g., via PAX record) contains no trailing slash.
+ if len(s) > len(b) && b[len(b)-1] == '/' {
+ n := len(strings.TrimRight(s[:len(b)], "/"))
+ b[n] = 0 // Replace trailing slash with NUL terminator
}
}
@@ -73,7 +86,7 @@ func (f *formatter) formatString(b []byte, s string) {
// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
// equivalent to the sign bit in two's complement form.
func fitsInBase256(n int, x int64) bool {
- var binBits = uint(n-1) * 8
+ binBits := uint(n-1) * 8
return n >= 9 || (x >= -1<= 0; i-- {
b[i] = byte(x)
@@ -155,6 +174,11 @@ func (p *parser) parseOctal(b []byte) int64 {
}
func (f *formatter) formatOctal(b []byte, x int64) {
+ if !fitsInOctal(len(b), x) {
+ x = 0 // Last resort, just write zero
+ f.err = ErrFieldTooLong
+ }
+
s := strconv.FormatInt(x, 8)
// Add leading zeros, but leave room for a NUL.
if n := len(b) - len(s) - 1; n > 0 {
@@ -163,6 +187,13 @@ func (f *formatter) formatOctal(b []byte, x int64) {
f.formatString(b, s)
}
+// fitsInOctal reports whether the integer x fits in a field n-bytes long
+// using octal encoding with the appropriate NUL terminator.
+func fitsInOctal(n int, x int64) bool {
+ octBits := uint(n-1) * 3
+ return x >= 0 && (n >= 22 || x < 1< 0 && ss[0] == '-' {
- return time.Unix(secs, -1*int64(nsecs)), nil // Negative correction
+ return time.Unix(secs, -1*nsecs), nil // Negative correction
}
- return time.Unix(secs, int64(nsecs)), nil
+ return time.Unix(secs, nsecs), nil
}
-// TODO(dsnet): Implement formatPAXTime.
+// formatPAXTime converts ts into a time of the form %d.%d as described in the
+// PAX specification. This function is capable of negative timestamps.
+func formatPAXTime(ts time.Time) (s string) {
+ secs, nsecs := ts.Unix(), ts.Nanosecond()
+ if nsecs == 0 {
+ return strconv.FormatInt(secs, 10)
+ }
+
+ // If seconds is negative, then perform correction.
+ sign := ""
+ if secs < 0 {
+ sign = "-" // Remember sign
+ secs = -(secs + 1) // Add a second to secs
+ nsecs = -(nsecs - 1E9) // Take that second away from nsecs
+ }
+ return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0")
+}
// parsePAXRecord parses the input PAX record string into a key-value pair.
// If parsing is successful, it will slice off the currently read record and
// return the remainder as r.
-//
-// A PAX record is of the following form:
-// "%d %s=%s\n" % (size, key, value)
func parsePAXRecord(s string) (k, v, r string, err error) {
// The size field ends at the first space.
sp := strings.IndexByte(s, ' ')
@@ -232,21 +276,51 @@ func parsePAXRecord(s string) (k, v, r string, err error) {
if eq == -1 {
return "", "", s, ErrHeader
}
- return rec[:eq], rec[eq+1:], rem, nil
+ k, v = rec[:eq], rec[eq+1:]
+
+ if !validPAXRecord(k, v) {
+ return "", "", s, ErrHeader
+ }
+ return k, v, rem, nil
}
// formatPAXRecord formats a single PAX record, prefixing it with the
// appropriate length.
-func formatPAXRecord(k, v string) string {
+func formatPAXRecord(k, v string) (string, error) {
+ if !validPAXRecord(k, v) {
+ return "", ErrHeader
+ }
+
const padding = 3 // Extra padding for ' ', '=', and '\n'
size := len(k) + len(v) + padding
size += len(strconv.Itoa(size))
- record := fmt.Sprintf("%d %s=%s\n", size, k, v)
+ record := strconv.Itoa(size) + " " + k + "=" + v + "\n"
// Final adjustment if adding size field increased the record size.
if len(record) != size {
size = len(record)
- record = fmt.Sprintf("%d %s=%s\n", size, k, v)
+ record = strconv.Itoa(size) + " " + k + "=" + v + "\n"
+ }
+ return record, nil
+}
+
+// validPAXRecord reports whether the key-value pair is valid where each
+// record is formatted as:
+// "%d %s=%s\n" % (size, key, value)
+//
+// Keys and values should be UTF-8, but the number of bad writers out there
+// forces us to be a more liberal.
+// Thus, we only reject all keys with NUL, and only reject NULs in values
+// for the PAX version of the USTAR string fields.
+// The key must not contain an '=' character.
+func validPAXRecord(k, v string) bool {
+ if k == "" || strings.IndexByte(k, '=') >= 0 {
+ return false
+ }
+ switch k {
+ case paxPath, paxLinkpath, paxUname, paxGname:
+ return !hasNUL(v)
+ default:
+ return !hasNUL(k)
}
- return record
}
diff --git a/src/archive/tar/strconv_test.go b/src/archive/tar/strconv_test.go
index beb70938bfd..4cc388cb0f2 100644
--- a/src/archive/tar/strconv_test.go
+++ b/src/archive/tar/strconv_test.go
@@ -110,6 +110,25 @@ func TestFormatNumeric(t *testing.T) {
want string
ok bool
}{
+ // Test base-8 (octal) encoded values.
+ {0, "0\x00", true},
+ {7, "7\x00", true},
+ {8, "\x80\x08", true},
+ {077, "77\x00", true},
+ {0100, "\x80\x00\x40", true},
+ {0, "0000000\x00", true},
+ {0123, "0000123\x00", true},
+ {07654321, "7654321\x00", true},
+ {07777777, "7777777\x00", true},
+ {010000000, "\x80\x00\x00\x00\x00\x20\x00\x00", true},
+ {0, "00000000000\x00", true},
+ {000001234567, "00001234567\x00", true},
+ {076543210321, "76543210321\x00", true},
+ {012345670123, "12345670123\x00", true},
+ {077777777777, "77777777777\x00", true},
+ {0100000000000, "\x80\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00", true},
+ {math.MaxInt64, "777777777777777777777\x00", true},
+
// Test base-256 (binary) encoded values.
{-1, "\xff", true},
{-1, "\xff\xff", true},
@@ -155,6 +174,45 @@ func TestFormatNumeric(t *testing.T) {
}
}
+func TestFitsInOctal(t *testing.T) {
+ vectors := []struct {
+ input int64
+ width int
+ ok bool
+ }{
+ {-1, 1, false},
+ {-1, 2, false},
+ {-1, 3, false},
+ {0, 1, true},
+ {0 + 1, 1, false},
+ {0, 2, true},
+ {07, 2, true},
+ {07 + 1, 2, false},
+ {0, 4, true},
+ {0777, 4, true},
+ {0777 + 1, 4, false},
+ {0, 8, true},
+ {07777777, 8, true},
+ {07777777 + 1, 8, false},
+ {0, 12, true},
+ {077777777777, 12, true},
+ {077777777777 + 1, 12, false},
+ {math.MaxInt64, 22, true},
+ {012345670123, 12, true},
+ {01564164, 12, true},
+ {-012345670123, 12, false},
+ {-01564164, 12, false},
+ {-1564164, 30, false},
+ }
+
+ for _, v := range vectors {
+ ok := fitsInOctal(v.width, v.input)
+ if ok != v.ok {
+ t.Errorf("checkOctal(%d, %d): got %v, want %v", v.input, v.width, ok, v.ok)
+ }
+ }
+}
+
func TestParsePAXTime(t *testing.T) {
vectors := []struct {
in string
@@ -236,6 +294,51 @@ func TestParsePAXTime(t *testing.T) {
}
}
+func TestFormatPAXTime(t *testing.T) {
+ vectors := []struct {
+ sec, nsec int64
+ want string
+ }{
+ {1350244992, 0, "1350244992"},
+ {1350244992, 300000000, "1350244992.3"},
+ {1350244992, 23960100, "1350244992.0239601"},
+ {1350244992, 23960108, "1350244992.023960108"},
+ {+1, +1E9 - 1E0, "1.999999999"},
+ {+1, +1E9 - 1E3, "1.999999"},
+ {+1, +1E9 - 1E6, "1.999"},
+ {+1, +0E0 - 0E0, "1"},
+ {+1, +1E6 - 0E0, "1.001"},
+ {+1, +1E3 - 0E0, "1.000001"},
+ {+1, +1E0 - 0E0, "1.000000001"},
+ {0, 1E9 - 1E0, "0.999999999"},
+ {0, 1E9 - 1E3, "0.999999"},
+ {0, 1E9 - 1E6, "0.999"},
+ {0, 0E0, "0"},
+ {0, 1E6 + 0E0, "0.001"},
+ {0, 1E3 + 0E0, "0.000001"},
+ {0, 1E0 + 0E0, "0.000000001"},
+ {-1, -1E9 + 1E0, "-1.999999999"},
+ {-1, -1E9 + 1E3, "-1.999999"},
+ {-1, -1E9 + 1E6, "-1.999"},
+ {-1, -0E0 + 0E0, "-1"},
+ {-1, -1E6 + 0E0, "-1.001"},
+ {-1, -1E3 + 0E0, "-1.000001"},
+ {-1, -1E0 + 0E0, "-1.000000001"},
+ {-1350244992, 0, "-1350244992"},
+ {-1350244992, -300000000, "-1350244992.3"},
+ {-1350244992, -23960100, "-1350244992.0239601"},
+ {-1350244992, -23960108, "-1350244992.023960108"},
+ }
+
+ for _, v := range vectors {
+ got := formatPAXTime(time.Unix(v.sec, v.nsec))
+ if got != v.want {
+ t.Errorf("formatPAXTime(%ds, %dns): got %q, want %q",
+ v.sec, v.nsec, got, v.want)
+ }
+ }
+}
+
func TestParsePAXRecord(t *testing.T) {
medName := strings.Repeat("CD", 50)
longName := strings.Repeat("AB", 100)
@@ -256,7 +359,7 @@ func TestParsePAXRecord(t *testing.T) {
{"18 foo=b=\nar=\n==\x00\n", "", "foo", "b=\nar=\n==\x00", true},
{"27 foo=hello9 foo=ba\nworld\n", "", "foo", "hello9 foo=ba\nworld", true},
{"27 ☺☻☹=日a本b語ç\nmeow mix", "meow mix", "☺☻☹", "日a本b語ç", true},
- {"17 \x00hello=\x00world\n", "", "\x00hello", "\x00world", true},
+ {"17 \x00hello=\x00world\n", "17 \x00hello=\x00world\n", "", "", false},
{"1 k=1\n", "1 k=1\n", "", "", false},
{"6 k~1\n", "6 k~1\n", "", "", false},
{"6_k=1\n", "6_k=1\n", "", "", false},
@@ -296,21 +399,33 @@ func TestFormatPAXRecord(t *testing.T) {
inKey string
inVal string
want string
+ ok bool
}{
- {"k", "v", "6 k=v\n"},
- {"path", "/etc/hosts", "19 path=/etc/hosts\n"},
- {"path", longName, "210 path=" + longName + "\n"},
- {"path", medName, "110 path=" + medName + "\n"},
- {"foo", "ba", "9 foo=ba\n"},
- {"foo", "bar", "11 foo=bar\n"},
- {"foo", "b=\nar=\n==\x00", "18 foo=b=\nar=\n==\x00\n"},
- {"foo", "hello9 foo=ba\nworld", "27 foo=hello9 foo=ba\nworld\n"},
- {"☺☻☹", "日a本b語ç", "27 ☺☻☹=日a本b語ç\n"},
- {"\x00hello", "\x00world", "17 \x00hello=\x00world\n"},
+ {"k", "v", "6 k=v\n", true},
+ {"path", "/etc/hosts", "19 path=/etc/hosts\n", true},
+ {"path", longName, "210 path=" + longName + "\n", true},
+ {"path", medName, "110 path=" + medName + "\n", true},
+ {"foo", "ba", "9 foo=ba\n", true},
+ {"foo", "bar", "11 foo=bar\n", true},
+ {"foo", "b=\nar=\n==\x00", "18 foo=b=\nar=\n==\x00\n", true},
+ {"foo", "hello9 foo=ba\nworld", "27 foo=hello9 foo=ba\nworld\n", true},
+ {"☺☻☹", "日a本b語ç", "27 ☺☻☹=日a本b語ç\n", true},
+ {"xhello", "\x00world", "17 xhello=\x00world\n", true},
+ {"path", "null\x00", "", false},
+ {"null\x00", "value", "", false},
+ {paxSchilyXattr + "key", "null\x00", "26 SCHILY.xattr.key=null\x00\n", true},
}
for _, v := range vectors {
- got := formatPAXRecord(v.inKey, v.inVal)
+ got, err := formatPAXRecord(v.inKey, v.inVal)
+ ok := (err == nil)
+ if ok != v.ok {
+ if v.ok {
+ t.Errorf("formatPAXRecord(%q, %q): got format failure, want success", v.inKey, v.inVal)
+ } else {
+ t.Errorf("formatPAXRecord(%q, %q): got format success, want failure", v.inKey, v.inVal)
+ }
+ }
if got != v.want {
t.Errorf("formatPAXRecord(%q, %q): got %q, want %q",
v.inKey, v.inVal, got, v.want)
diff --git a/src/archive/tar/tar_test.go b/src/archive/tar/tar_test.go
index fb7a9dcfc47..af80d6e0c15 100644
--- a/src/archive/tar/tar_test.go
+++ b/src/archive/tar/tar_test.go
@@ -6,8 +6,12 @@ package tar
import (
"bytes"
+ "errors"
+ "fmt"
"internal/testenv"
+ "io"
"io/ioutil"
+ "math"
"os"
"path"
"path/filepath"
@@ -17,6 +21,193 @@ import (
"time"
)
+type testError struct{ error }
+
+type fileOps []interface{} // []T where T is (string | int64)
+
+// testFile is an io.ReadWriteSeeker where the IO operations performed
+// on it must match the list of operations in ops.
+type testFile struct {
+ ops fileOps
+ pos int64
+}
+
+func (f *testFile) Read(b []byte) (int, error) {
+ if len(b) == 0 {
+ return 0, nil
+ }
+ if len(f.ops) == 0 {
+ return 0, io.EOF
+ }
+ s, ok := f.ops[0].(string)
+ if !ok {
+ return 0, errors.New("unexpected Read operation")
+ }
+
+ n := copy(b, s)
+ if len(s) > n {
+ f.ops[0] = s[n:]
+ } else {
+ f.ops = f.ops[1:]
+ }
+ f.pos += int64(len(b))
+ return n, nil
+}
+
+func (f *testFile) Write(b []byte) (int, error) {
+ if len(b) == 0 {
+ return 0, nil
+ }
+ if len(f.ops) == 0 {
+ return 0, errors.New("unexpected Write operation")
+ }
+ s, ok := f.ops[0].(string)
+ if !ok {
+ return 0, errors.New("unexpected Write operation")
+ }
+
+ if !strings.HasPrefix(s, string(b)) {
+ return 0, testError{fmt.Errorf("got Write(%q), want Write(%q)", b, s)}
+ }
+ if len(s) > len(b) {
+ f.ops[0] = s[len(b):]
+ } else {
+ f.ops = f.ops[1:]
+ }
+ f.pos += int64(len(b))
+ return len(b), nil
+}
+
+func (f *testFile) Seek(pos int64, whence int) (int64, error) {
+ if pos == 0 && whence == io.SeekCurrent {
+ return f.pos, nil
+ }
+ if len(f.ops) == 0 {
+ return 0, errors.New("unexpected Seek operation")
+ }
+ s, ok := f.ops[0].(int64)
+ if !ok {
+ return 0, errors.New("unexpected Seek operation")
+ }
+
+ if s != pos || whence != io.SeekCurrent {
+ return 0, testError{fmt.Errorf("got Seek(%d, %d), want Seek(%d, %d)", pos, whence, s, io.SeekCurrent)}
+ }
+ f.pos += s
+ f.ops = f.ops[1:]
+ return f.pos, nil
+}
+
+func equalSparseEntries(x, y []sparseEntry) bool {
+ return (len(x) == 0 && len(y) == 0) || reflect.DeepEqual(x, y)
+}
+
+func TestSparseEntries(t *testing.T) {
+ vectors := []struct {
+ in []sparseEntry
+ size int64
+
+ wantValid bool // Result of validateSparseEntries
+ wantAligned []sparseEntry // Result of alignSparseEntries
+ wantInverted []sparseEntry // Result of invertSparseEntries
+ }{{
+ in: []sparseEntry{}, size: 0,
+ wantValid: true,
+ wantInverted: []sparseEntry{{0, 0}},
+ }, {
+ in: []sparseEntry{}, size: 5000,
+ wantValid: true,
+ wantInverted: []sparseEntry{{0, 5000}},
+ }, {
+ in: []sparseEntry{{0, 5000}}, size: 5000,
+ wantValid: true,
+ wantAligned: []sparseEntry{{0, 5000}},
+ wantInverted: []sparseEntry{{5000, 0}},
+ }, {
+ in: []sparseEntry{{1000, 4000}}, size: 5000,
+ wantValid: true,
+ wantAligned: []sparseEntry{{1024, 3976}},
+ wantInverted: []sparseEntry{{0, 1000}, {5000, 0}},
+ }, {
+ in: []sparseEntry{{0, 3000}}, size: 5000,
+ wantValid: true,
+ wantAligned: []sparseEntry{{0, 2560}},
+ wantInverted: []sparseEntry{{3000, 2000}},
+ }, {
+ in: []sparseEntry{{3000, 2000}}, size: 5000,
+ wantValid: true,
+ wantAligned: []sparseEntry{{3072, 1928}},
+ wantInverted: []sparseEntry{{0, 3000}, {5000, 0}},
+ }, {
+ in: []sparseEntry{{2000, 2000}}, size: 5000,
+ wantValid: true,
+ wantAligned: []sparseEntry{{2048, 1536}},
+ wantInverted: []sparseEntry{{0, 2000}, {4000, 1000}},
+ }, {
+ in: []sparseEntry{{0, 2000}, {8000, 2000}}, size: 10000,
+ wantValid: true,
+ wantAligned: []sparseEntry{{0, 1536}, {8192, 1808}},
+ wantInverted: []sparseEntry{{2000, 6000}, {10000, 0}},
+ }, {
+ in: []sparseEntry{{0, 2000}, {2000, 2000}, {4000, 0}, {4000, 3000}, {7000, 1000}, {8000, 0}, {8000, 2000}}, size: 10000,
+ wantValid: true,
+ wantAligned: []sparseEntry{{0, 1536}, {2048, 1536}, {4096, 2560}, {7168, 512}, {8192, 1808}},
+ wantInverted: []sparseEntry{{10000, 0}},
+ }, {
+ in: []sparseEntry{{0, 0}, {1000, 0}, {2000, 0}, {3000, 0}, {4000, 0}, {5000, 0}}, size: 5000,
+ wantValid: true,
+ wantInverted: []sparseEntry{{0, 5000}},
+ }, {
+ in: []sparseEntry{{1, 0}}, size: 0,
+ wantValid: false,
+ }, {
+ in: []sparseEntry{{-1, 0}}, size: 100,
+ wantValid: false,
+ }, {
+ in: []sparseEntry{{0, -1}}, size: 100,
+ wantValid: false,
+ }, {
+ in: []sparseEntry{{0, 0}}, size: -100,
+ wantValid: false,
+ }, {
+ in: []sparseEntry{{math.MaxInt64, 3}, {6, -5}}, size: 35,
+ wantValid: false,
+ }, {
+ in: []sparseEntry{{1, 3}, {6, -5}}, size: 35,
+ wantValid: false,
+ }, {
+ in: []sparseEntry{{math.MaxInt64, math.MaxInt64}}, size: math.MaxInt64,
+ wantValid: false,
+ }, {
+ in: []sparseEntry{{3, 3}}, size: 5,
+ wantValid: false,
+ }, {
+ in: []sparseEntry{{2, 0}, {1, 0}, {0, 0}}, size: 3,
+ wantValid: false,
+ }, {
+ in: []sparseEntry{{1, 3}, {2, 2}}, size: 10,
+ wantValid: false,
+ }}
+
+ for i, v := range vectors {
+ gotValid := validateSparseEntries(v.in, v.size)
+ if gotValid != v.wantValid {
+ t.Errorf("test %d, validateSparseEntries() = %v, want %v", i, gotValid, v.wantValid)
+ }
+ if !v.wantValid {
+ continue
+ }
+ gotAligned := alignSparseEntries(append([]sparseEntry{}, v.in...), v.size)
+ if !equalSparseEntries(gotAligned, v.wantAligned) {
+ t.Errorf("test %d, alignSparseEntries():\ngot %v\nwant %v", i, gotAligned, v.wantAligned)
+ }
+ gotInverted := invertSparseEntries(append([]sparseEntry{}, v.in...), v.size)
+ if !equalSparseEntries(gotInverted, v.wantInverted) {
+ t.Errorf("test %d, inverseSparseEntries():\ngot %v\nwant %v", i, gotInverted, v.wantInverted)
+ }
+ }
+}
+
func TestFileInfoHeader(t *testing.T) {
fi, err := os.Stat("testdata/small.txt")
if err != nil {
@@ -109,15 +300,12 @@ func TestRoundTrip(t *testing.T) {
var b bytes.Buffer
tw := NewWriter(&b)
hdr := &Header{
- Name: "file.txt",
- Uid: 1 << 21, // too big for 8 octal digits
- Size: int64(len(data)),
- // AddDate to strip monotonic clock reading,
- // and Round to discard sub-second precision,
- // both of which are not included in the tar header
- // and would otherwise break the round-trip check
- // below.
- ModTime: time.Now().AddDate(0, 0, 0).Round(1 * time.Second),
+ Name: "file.txt",
+ Uid: 1 << 21, // Too big for 8 octal digits
+ Size: int64(len(data)),
+ ModTime: time.Now().Round(time.Second),
+ PAXRecords: map[string]string{"uid": "2097152"},
+ Format: FormatPAX,
}
if err := tw.WriteHeader(hdr); err != nil {
t.Fatalf("tw.WriteHeader: %v", err)
@@ -329,3 +517,338 @@ func TestHeaderRoundTrip(t *testing.T) {
}
}
}
+
+func TestHeaderAllowedFormats(t *testing.T) {
+ vectors := []struct {
+ header *Header // Input header
+ paxHdrs map[string]string // Expected PAX headers that may be needed
+ formats Format // Expected formats that can encode the header
+ }{{
+ header: &Header{},
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
+ }, {
+ header: &Header{Size: 077777777777},
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
+ }, {
+ header: &Header{Size: 077777777777, Format: FormatUSTAR},
+ formats: FormatUSTAR,
+ }, {
+ header: &Header{Size: 077777777777, Format: FormatPAX},
+ formats: FormatUSTAR | FormatPAX,
+ }, {
+ header: &Header{Size: 077777777777, Format: FormatGNU},
+ formats: FormatGNU,
+ }, {
+ header: &Header{Size: 077777777777 + 1},
+ paxHdrs: map[string]string{paxSize: "8589934592"},
+ formats: FormatPAX | FormatGNU,
+ }, {
+ header: &Header{Size: 077777777777 + 1, Format: FormatPAX},
+ paxHdrs: map[string]string{paxSize: "8589934592"},
+ formats: FormatPAX,
+ }, {
+ header: &Header{Size: 077777777777 + 1, Format: FormatGNU},
+ paxHdrs: map[string]string{paxSize: "8589934592"},
+ formats: FormatGNU,
+ }, {
+ header: &Header{Mode: 07777777},
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
+ }, {
+ header: &Header{Mode: 07777777 + 1},
+ formats: FormatGNU,
+ }, {
+ header: &Header{Devmajor: -123},
+ formats: FormatGNU,
+ }, {
+ header: &Header{Devmajor: 1<<56 - 1},
+ formats: FormatGNU,
+ }, {
+ header: &Header{Devmajor: 1 << 56},
+ formats: FormatUnknown,
+ }, {
+ header: &Header{Devmajor: -1 << 56},
+ formats: FormatGNU,
+ }, {
+ header: &Header{Devmajor: -1<<56 - 1},
+ formats: FormatUnknown,
+ }, {
+ header: &Header{Name: "用戶名", Devmajor: -1 << 56},
+ formats: FormatGNU,
+ }, {
+ header: &Header{Size: math.MaxInt64},
+ paxHdrs: map[string]string{paxSize: "9223372036854775807"},
+ formats: FormatPAX | FormatGNU,
+ }, {
+ header: &Header{Size: math.MinInt64},
+ paxHdrs: map[string]string{paxSize: "-9223372036854775808"},
+ formats: FormatUnknown,
+ }, {
+ header: &Header{Uname: "0123456789abcdef0123456789abcdef"},
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
+ }, {
+ header: &Header{Uname: "0123456789abcdef0123456789abcdefx"},
+ paxHdrs: map[string]string{paxUname: "0123456789abcdef0123456789abcdefx"},
+ formats: FormatPAX,
+ }, {
+ header: &Header{Name: "foobar"},
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
+ }, {
+ header: &Header{Name: strings.Repeat("a", nameSize)},
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
+ }, {
+ header: &Header{Name: strings.Repeat("a", nameSize+1)},
+ paxHdrs: map[string]string{paxPath: strings.Repeat("a", nameSize+1)},
+ formats: FormatPAX | FormatGNU,
+ }, {
+ header: &Header{Linkname: "用戶名"},
+ paxHdrs: map[string]string{paxLinkpath: "用戶名"},
+ formats: FormatPAX | FormatGNU,
+ }, {
+ header: &Header{Linkname: strings.Repeat("用戶名\x00", nameSize)},
+ paxHdrs: map[string]string{paxLinkpath: strings.Repeat("用戶名\x00", nameSize)},
+ formats: FormatUnknown,
+ }, {
+ header: &Header{Linkname: "\x00hello"},
+ paxHdrs: map[string]string{paxLinkpath: "\x00hello"},
+ formats: FormatUnknown,
+ }, {
+ header: &Header{Uid: 07777777},
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
+ }, {
+ header: &Header{Uid: 07777777 + 1},
+ paxHdrs: map[string]string{paxUid: "2097152"},
+ formats: FormatPAX | FormatGNU,
+ }, {
+ header: &Header{Xattrs: nil},
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
+ }, {
+ header: &Header{Xattrs: map[string]string{"foo": "bar"}},
+ paxHdrs: map[string]string{paxSchilyXattr + "foo": "bar"},
+ formats: FormatPAX,
+ }, {
+ header: &Header{Xattrs: map[string]string{"foo": "bar"}, Format: FormatGNU},
+ paxHdrs: map[string]string{paxSchilyXattr + "foo": "bar"},
+ formats: FormatUnknown,
+ }, {
+ header: &Header{Xattrs: map[string]string{"用戶名": "\x00hello"}},
+ paxHdrs: map[string]string{paxSchilyXattr + "用戶名": "\x00hello"},
+ formats: FormatPAX,
+ }, {
+ header: &Header{Xattrs: map[string]string{"foo=bar": "baz"}},
+ formats: FormatUnknown,
+ }, {
+ header: &Header{Xattrs: map[string]string{"foo": ""}},
+ paxHdrs: map[string]string{paxSchilyXattr + "foo": ""},
+ formats: FormatPAX,
+ }, {
+ header: &Header{ModTime: time.Unix(0, 0)},
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
+ }, {
+ header: &Header{ModTime: time.Unix(077777777777, 0)},
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
+ }, {
+ header: &Header{ModTime: time.Unix(077777777777+1, 0)},
+ paxHdrs: map[string]string{paxMtime: "8589934592"},
+ formats: FormatPAX | FormatGNU,
+ }, {
+ header: &Header{ModTime: time.Unix(math.MaxInt64, 0)},
+ paxHdrs: map[string]string{paxMtime: "9223372036854775807"},
+ formats: FormatPAX | FormatGNU,
+ }, {
+ header: &Header{ModTime: time.Unix(math.MaxInt64, 0), Format: FormatUSTAR},
+ paxHdrs: map[string]string{paxMtime: "9223372036854775807"},
+ formats: FormatUnknown,
+ }, {
+ header: &Header{ModTime: time.Unix(-1, 0)},
+ paxHdrs: map[string]string{paxMtime: "-1"},
+ formats: FormatPAX | FormatGNU,
+ }, {
+ header: &Header{ModTime: time.Unix(1, 500)},
+ paxHdrs: map[string]string{paxMtime: "1.0000005"},
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
+ }, {
+ header: &Header{ModTime: time.Unix(1, 0)},
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
+ }, {
+ header: &Header{ModTime: time.Unix(1, 0), Format: FormatPAX},
+ formats: FormatUSTAR | FormatPAX,
+ }, {
+ header: &Header{ModTime: time.Unix(1, 500), Format: FormatUSTAR},
+ paxHdrs: map[string]string{paxMtime: "1.0000005"},
+ formats: FormatUSTAR,
+ }, {
+ header: &Header{ModTime: time.Unix(1, 500), Format: FormatPAX},
+ paxHdrs: map[string]string{paxMtime: "1.0000005"},
+ formats: FormatPAX,
+ }, {
+ header: &Header{ModTime: time.Unix(1, 500), Format: FormatGNU},
+ paxHdrs: map[string]string{paxMtime: "1.0000005"},
+ formats: FormatGNU,
+ }, {
+ header: &Header{ModTime: time.Unix(-1, 500)},
+ paxHdrs: map[string]string{paxMtime: "-0.9999995"},
+ formats: FormatPAX | FormatGNU,
+ }, {
+ header: &Header{ModTime: time.Unix(-1, 500), Format: FormatGNU},
+ paxHdrs: map[string]string{paxMtime: "-0.9999995"},
+ formats: FormatGNU,
+ }, {
+ header: &Header{AccessTime: time.Unix(0, 0)},
+ paxHdrs: map[string]string{paxAtime: "0"},
+ formats: FormatPAX | FormatGNU,
+ }, {
+ header: &Header{AccessTime: time.Unix(0, 0), Format: FormatUSTAR},
+ paxHdrs: map[string]string{paxAtime: "0"},
+ formats: FormatUnknown,
+ }, {
+ header: &Header{AccessTime: time.Unix(0, 0), Format: FormatPAX},
+ paxHdrs: map[string]string{paxAtime: "0"},
+ formats: FormatPAX,
+ }, {
+ header: &Header{AccessTime: time.Unix(0, 0), Format: FormatGNU},
+ paxHdrs: map[string]string{paxAtime: "0"},
+ formats: FormatGNU,
+ }, {
+ header: &Header{AccessTime: time.Unix(-123, 0)},
+ paxHdrs: map[string]string{paxAtime: "-123"},
+ formats: FormatPAX | FormatGNU,
+ }, {
+ header: &Header{AccessTime: time.Unix(-123, 0), Format: FormatPAX},
+ paxHdrs: map[string]string{paxAtime: "-123"},
+ formats: FormatPAX,
+ }, {
+ header: &Header{ChangeTime: time.Unix(123, 456)},
+ paxHdrs: map[string]string{paxCtime: "123.000000456"},
+ formats: FormatPAX | FormatGNU,
+ }, {
+ header: &Header{ChangeTime: time.Unix(123, 456), Format: FormatUSTAR},
+ paxHdrs: map[string]string{paxCtime: "123.000000456"},
+ formats: FormatUnknown,
+ }, {
+ header: &Header{ChangeTime: time.Unix(123, 456), Format: FormatGNU},
+ paxHdrs: map[string]string{paxCtime: "123.000000456"},
+ formats: FormatGNU,
+ }, {
+ header: &Header{ChangeTime: time.Unix(123, 456), Format: FormatPAX},
+ paxHdrs: map[string]string{paxCtime: "123.000000456"},
+ formats: FormatPAX,
+ }, {
+ header: &Header{Name: "foo/", Typeflag: TypeDir},
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
+ }, {
+ header: &Header{Name: "foo/", Typeflag: TypeReg},
+ formats: FormatUnknown,
+ }, {
+ header: &Header{Name: "foo/", Typeflag: TypeSymlink},
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
+ }}
+
+ for i, v := range vectors {
+ formats, paxHdrs, err := v.header.allowedFormats()
+ if formats != v.formats {
+ t.Errorf("test %d, allowedFormats(): got %v, want %v", i, formats, v.formats)
+ }
+ if formats&FormatPAX > 0 && !reflect.DeepEqual(paxHdrs, v.paxHdrs) && !(len(paxHdrs) == 0 && len(v.paxHdrs) == 0) {
+ t.Errorf("test %d, allowedFormats():\ngot %v\nwant %s", i, paxHdrs, v.paxHdrs)
+ }
+ if (formats != FormatUnknown) && (err != nil) {
+ t.Errorf("test %d, unexpected error: %v", i, err)
+ }
+ if (formats == FormatUnknown) && (err == nil) {
+ t.Errorf("test %d, got nil-error, want non-nil error", i)
+ }
+ }
+}
+
+func Benchmark(b *testing.B) {
+ type file struct {
+ hdr *Header
+ body []byte
+ }
+
+ vectors := []struct {
+ label string
+ files []file
+ }{{
+ "USTAR",
+ []file{{
+ &Header{Name: "bar", Mode: 0640, Size: int64(3)},
+ []byte("foo"),
+ }, {
+ &Header{Name: "world", Mode: 0640, Size: int64(5)},
+ []byte("hello"),
+ }},
+ }, {
+ "GNU",
+ []file{{
+ &Header{Name: "bar", Mode: 0640, Size: int64(3), Devmajor: -1},
+ []byte("foo"),
+ }, {
+ &Header{Name: "world", Mode: 0640, Size: int64(5), Devmajor: -1},
+ []byte("hello"),
+ }},
+ }, {
+ "PAX",
+ []file{{
+ &Header{Name: "bar", Mode: 0640, Size: int64(3), Xattrs: map[string]string{"foo": "bar"}},
+ []byte("foo"),
+ }, {
+ &Header{Name: "world", Mode: 0640, Size: int64(5), Xattrs: map[string]string{"foo": "bar"}},
+ []byte("hello"),
+ }},
+ }}
+
+ b.Run("Writer", func(b *testing.B) {
+ for _, v := range vectors {
+ b.Run(v.label, func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ // Writing to ioutil.Discard because we want to
+ // test purely the writer code and not bring in disk performance into this.
+ tw := NewWriter(ioutil.Discard)
+ for _, file := range v.files {
+ if err := tw.WriteHeader(file.hdr); err != nil {
+ b.Errorf("unexpected WriteHeader error: %v", err)
+ }
+ if _, err := tw.Write(file.body); err != nil {
+ b.Errorf("unexpected Write error: %v", err)
+ }
+ }
+ if err := tw.Close(); err != nil {
+ b.Errorf("unexpected Close error: %v", err)
+ }
+ }
+ })
+ }
+ })
+
+ b.Run("Reader", func(b *testing.B) {
+ for _, v := range vectors {
+ var buf bytes.Buffer
+ var r bytes.Reader
+
+ // Write the archive to a byte buffer.
+ tw := NewWriter(&buf)
+ for _, file := range v.files {
+ tw.WriteHeader(file.hdr)
+ tw.Write(file.body)
+ }
+ tw.Close()
+ b.Run(v.label, func(b *testing.B) {
+ b.ReportAllocs()
+ // Read from the byte buffer.
+ for i := 0; i < b.N; i++ {
+ r.Reset(buf.Bytes())
+ tr := NewReader(&r)
+ if _, err := tr.Next(); err != nil {
+ b.Errorf("unexpected Next error: %v", err)
+ }
+ if _, err := io.Copy(ioutil.Discard, tr); err != nil {
+ b.Errorf("unexpected Copy error : %v", err)
+ }
+ }
+ })
+ }
+ })
+
+}
diff --git a/src/archive/tar/testdata/gnu-long-nul.tar b/src/archive/tar/testdata/gnu-long-nul.tar
new file mode 100644
index 00000000000..28bc812aa60
Binary files /dev/null and b/src/archive/tar/testdata/gnu-long-nul.tar differ
diff --git a/src/archive/tar/testdata/gnu-nil-sparse-data.tar b/src/archive/tar/testdata/gnu-nil-sparse-data.tar
new file mode 100644
index 00000000000..df1aa834538
Binary files /dev/null and b/src/archive/tar/testdata/gnu-nil-sparse-data.tar differ
diff --git a/src/archive/tar/testdata/gnu-nil-sparse-hole.tar b/src/archive/tar/testdata/gnu-nil-sparse-hole.tar
new file mode 100644
index 00000000000..496abfeb78a
Binary files /dev/null and b/src/archive/tar/testdata/gnu-nil-sparse-hole.tar differ
diff --git a/src/archive/tar/testdata/gnu-not-utf8.tar b/src/archive/tar/testdata/gnu-not-utf8.tar
new file mode 100644
index 00000000000..81cec67d330
Binary files /dev/null and b/src/archive/tar/testdata/gnu-not-utf8.tar differ
diff --git a/src/archive/tar/testdata/gnu-sparse-big.tar b/src/archive/tar/testdata/gnu-sparse-big.tar
new file mode 100644
index 00000000000..1a5cfc96d92
Binary files /dev/null and b/src/archive/tar/testdata/gnu-sparse-big.tar differ
diff --git a/src/archive/tar/testdata/gnu-utf8.tar b/src/archive/tar/testdata/gnu-utf8.tar
new file mode 100644
index 00000000000..2c9c8079cf6
Binary files /dev/null and b/src/archive/tar/testdata/gnu-utf8.tar differ
diff --git a/src/archive/tar/testdata/invalid-go17.tar b/src/archive/tar/testdata/invalid-go17.tar
new file mode 100644
index 00000000000..58f2488e78f
Binary files /dev/null and b/src/archive/tar/testdata/invalid-go17.tar differ
diff --git a/src/archive/tar/testdata/pax-global-records.tar b/src/archive/tar/testdata/pax-global-records.tar
new file mode 100644
index 00000000000..3d3d241e65c
Binary files /dev/null and b/src/archive/tar/testdata/pax-global-records.tar differ
diff --git a/src/archive/tar/testdata/pax-nil-sparse-data.tar b/src/archive/tar/testdata/pax-nil-sparse-data.tar
new file mode 100644
index 00000000000..e59bd94117d
Binary files /dev/null and b/src/archive/tar/testdata/pax-nil-sparse-data.tar differ
diff --git a/src/archive/tar/testdata/ustar.issue12594.tar b/src/archive/tar/testdata/pax-nil-sparse-hole.tar
similarity index 70%
rename from src/archive/tar/testdata/ustar.issue12594.tar
rename to src/archive/tar/testdata/pax-nil-sparse-hole.tar
index 50fcd009760..b44327bdbfb 100644
Binary files a/src/archive/tar/testdata/ustar.issue12594.tar and b/src/archive/tar/testdata/pax-nil-sparse-hole.tar differ
diff --git a/src/archive/tar/testdata/pax-nul-path.tar b/src/archive/tar/testdata/pax-nul-path.tar
new file mode 100644
index 00000000000..c78f82b16e8
Binary files /dev/null and b/src/archive/tar/testdata/pax-nul-path.tar differ
diff --git a/src/archive/tar/testdata/pax-nul-xattrs.tar b/src/archive/tar/testdata/pax-nul-xattrs.tar
new file mode 100644
index 00000000000..881f51768f9
Binary files /dev/null and b/src/archive/tar/testdata/pax-nul-xattrs.tar differ
diff --git a/src/archive/tar/testdata/pax-pos-size-file.tar b/src/archive/tar/testdata/pax-pos-size-file.tar
index aed9a8aa48f..ea5ccf91642 100644
Binary files a/src/archive/tar/testdata/pax-pos-size-file.tar and b/src/archive/tar/testdata/pax-pos-size-file.tar differ
diff --git a/src/archive/tar/testdata/pax-records.tar b/src/archive/tar/testdata/pax-records.tar
new file mode 100644
index 00000000000..276c211baa3
Binary files /dev/null and b/src/archive/tar/testdata/pax-records.tar differ
diff --git a/src/archive/tar/testdata/pax-sparse-big.tar b/src/archive/tar/testdata/pax-sparse-big.tar
new file mode 100644
index 00000000000..65d1f8eceb0
Binary files /dev/null and b/src/archive/tar/testdata/pax-sparse-big.tar differ
diff --git a/src/archive/tar/testdata/trailing-slash.tar b/src/archive/tar/testdata/trailing-slash.tar
new file mode 100644
index 00000000000..bf1b2ec426b
Binary files /dev/null and b/src/archive/tar/testdata/trailing-slash.tar differ
diff --git a/src/archive/tar/testdata/ustar-file-devs.tar b/src/archive/tar/testdata/ustar-file-devs.tar
new file mode 100644
index 00000000000..146e25b79d8
Binary files /dev/null and b/src/archive/tar/testdata/ustar-file-devs.tar differ
diff --git a/src/archive/tar/testdata/writer-big-long.tar b/src/archive/tar/testdata/writer-big-long.tar
index ea9bfa88bbb..09fc5dd3dd7 100644
Binary files a/src/archive/tar/testdata/writer-big-long.tar and b/src/archive/tar/testdata/writer-big-long.tar differ
diff --git a/src/archive/tar/testdata/writer-big.tar b/src/archive/tar/testdata/writer-big.tar
index 753e883cebf..435dcbce6ab 100644
Binary files a/src/archive/tar/testdata/writer-big.tar and b/src/archive/tar/testdata/writer-big.tar differ
diff --git a/src/archive/tar/writer.go b/src/archive/tar/writer.go
index c51c243a8b8..97d23f80388 100644
--- a/src/archive/tar/writer.go
+++ b/src/archive/tar/writer.go
@@ -4,255 +4,391 @@
package tar
-// TODO(dsymonds):
-// - catch more errors (no first header, etc.)
-
import (
"bytes"
- "errors"
"fmt"
"io"
"path"
"sort"
- "strconv"
"strings"
"time"
)
-var (
- ErrWriteTooLong = errors.New("archive/tar: write too long")
- ErrFieldTooLong = errors.New("archive/tar: header field too long")
- ErrWriteAfterClose = errors.New("archive/tar: write after close")
- errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
-)
-
-// A Writer provides sequential writing of a tar archive in POSIX.1 format.
-// A tar archive consists of a sequence of files.
-// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
-// writing at most hdr.Size bytes in total.
+// Writer provides sequential writing of a tar archive.
+// Write.WriteHeader begins a new file with the provided Header,
+// and then Writer can be treated as an io.Writer to supply that file's data.
type Writer struct {
- w io.Writer
- err error
- nb int64 // number of unwritten bytes for current file entry
- pad int64 // amount of padding to write after current file entry
- closed bool
- usedBinary bool // whether the binary numeric field extension was used
- preferPax bool // use PAX header instead of binary numeric header
- hdrBuff block // buffer to use in writeHeader when writing a regular header
- paxHdrBuff block // buffer to use in writeHeader when writing a PAX header
+ w io.Writer
+ pad int64 // Amount of padding to write after current file entry
+ curr fileWriter // Writer for current file entry
+ hdr Header // Shallow copy of Header that is safe for mutations
+ blk block // Buffer to use as temporary local storage
+
+ // err is a persistent error.
+ // It is only the responsibility of every exported method of Writer to
+ // ensure that this error is sticky.
+ err error
}
// NewWriter creates a new Writer writing to w.
-func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{w: w, curr: ®FileWriter{w, 0}}
+}
-// Flush finishes writing the current file (optional).
+type fileWriter interface {
+ io.Writer
+ fileState
+
+ ReadFrom(io.Reader) (int64, error)
+}
+
+// Flush finishes writing the current file's block padding.
+// The current file must be fully written before Flush can be called.
+//
+// This is unnecessary as the next call to WriteHeader or Close
+// will implicitly flush out the file's padding.
func (tw *Writer) Flush() error {
- if tw.nb > 0 {
- tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
- return tw.err
- }
-
- n := tw.nb + tw.pad
- for n > 0 && tw.err == nil {
- nr := n
- if nr > blockSize {
- nr = blockSize
- }
- var nw int
- nw, tw.err = tw.w.Write(zeroBlock[0:nr])
- n -= int64(nw)
- }
- tw.nb = 0
- tw.pad = 0
- return tw.err
-}
-
-var (
- minTime = time.Unix(0, 0)
- // There is room for 11 octal digits (33 bits) of mtime.
- maxTime = minTime.Add((1<<33 - 1) * time.Second)
-)
-
-// WriteHeader writes hdr and prepares to accept the file's contents.
-// WriteHeader calls Flush if it is not the first header.
-// Calling after a Close will return ErrWriteAfterClose.
-func (tw *Writer) WriteHeader(hdr *Header) error {
- return tw.writeHeader(hdr, true)
-}
-
-// WriteHeader writes hdr and prepares to accept the file's contents.
-// WriteHeader calls Flush if it is not the first header.
-// Calling after a Close will return ErrWriteAfterClose.
-// As this method is called internally by writePax header to allow it to
-// suppress writing the pax header.
-func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
- if tw.closed {
- return ErrWriteAfterClose
- }
- if tw.err == nil {
- tw.Flush()
- }
if tw.err != nil {
return tw.err
}
-
- // a map to hold pax header records, if any are needed
- paxHeaders := make(map[string]string)
-
- // TODO(dsnet): we might want to use PAX headers for
- // subsecond time resolution, but for now let's just capture
- // too long fields or non ascii characters
-
- // We need to select which scratch buffer to use carefully,
- // since this method is called recursively to write PAX headers.
- // If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
- // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
- // already being used by the non-recursive call, so we must use paxHdrBuff.
- header := &tw.hdrBuff
- if !allowPax {
- header = &tw.paxHdrBuff
+ if nb := tw.curr.LogicalRemaining(); nb > 0 {
+ return fmt.Errorf("archive/tar: missed writing %d bytes", nb)
}
- copy(header[:], zeroBlock[:])
-
- // Wrappers around formatter that automatically sets paxHeaders if the
- // argument extends beyond the capacity of the input byte slice.
- var f formatter
- var formatString = func(b []byte, s string, paxKeyword string) {
- needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
- if needsPaxHeader {
- paxHeaders[paxKeyword] = s
- }
-
- // Write string in a best-effort manner to satisfy readers that expect
- // the field to be non-empty.
- s = toASCII(s)
- if len(s) > len(b) {
- s = s[:len(b)]
- }
- f.formatString(b, s) // Should never error
- }
- var formatNumeric = func(b []byte, x int64, paxKeyword string) {
- // Try octal first.
- s := strconv.FormatInt(x, 8)
- if len(s) < len(b) {
- f.formatOctal(b, x)
- return
- }
-
- // If it is too long for octal, and PAX is preferred, use a PAX header.
- if paxKeyword != paxNone && tw.preferPax {
- f.formatOctal(b, 0)
- s := strconv.FormatInt(x, 10)
- paxHeaders[paxKeyword] = s
- return
- }
-
- tw.usedBinary = true
- f.formatNumeric(b, x)
- }
-
- // Handle out of range ModTime carefully.
- var modTime int64
- if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) {
- modTime = hdr.ModTime.Unix()
- }
-
- v7 := header.V7()
- formatString(v7.Name(), hdr.Name, paxPath)
- // TODO(dsnet): The GNU format permits the mode field to be encoded in
- // base-256 format. Thus, we can use formatNumeric instead of formatOctal.
- f.formatOctal(v7.Mode(), hdr.Mode)
- formatNumeric(v7.UID(), int64(hdr.Uid), paxUid)
- formatNumeric(v7.GID(), int64(hdr.Gid), paxGid)
- formatNumeric(v7.Size(), hdr.Size, paxSize)
- // TODO(dsnet): Consider using PAX for finer time granularity.
- formatNumeric(v7.ModTime(), modTime, paxNone)
- v7.TypeFlag()[0] = hdr.Typeflag
- formatString(v7.LinkName(), hdr.Linkname, paxLinkpath)
-
- ustar := header.USTAR()
- formatString(ustar.UserName(), hdr.Uname, paxUname)
- formatString(ustar.GroupName(), hdr.Gname, paxGname)
- formatNumeric(ustar.DevMajor(), hdr.Devmajor, paxNone)
- formatNumeric(ustar.DevMinor(), hdr.Devminor, paxNone)
-
- // TODO(dsnet): The logic surrounding the prefix field is broken when trying
- // to encode the header as GNU format. The challenge with the current logic
- // is that we are unsure what format we are using at any given moment until
- // we have processed *all* of the fields. The problem is that by the time
- // all fields have been processed, some work has already been done to handle
- // each field under the assumption that it is for one given format or
- // another. In some situations, this causes the Writer to be confused and
- // encode a prefix field when the format being used is GNU. Thus, producing
- // an invalid tar file.
- //
- // As a short-term fix, we disable the logic to use the prefix field, which
- // will force the badly generated GNU files to become encoded as being
- // the PAX format.
- //
- // As an alternative fix, we could hard-code preferPax to be true. However,
- // this is problematic for the following reasons:
- // * The preferPax functionality is not tested at all.
- // * This can result in headers that try to use both the GNU and PAX
- // features at the same time, which is also wrong.
- //
- // The proper fix for this is to use a two-pass method:
- // * The first pass simply determines what set of formats can possibly
- // encode the given header.
- // * The second pass actually encodes the header as that given format
- // without worrying about violating the format.
- //
- // See the following:
- // https://golang.org/issue/12594
- // https://golang.org/issue/17630
- // https://golang.org/issue/9683
- const usePrefix = false
-
- // try to use a ustar header when only the name is too long
- _, paxPathUsed := paxHeaders[paxPath]
- if usePrefix && !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
- prefix, suffix, ok := splitUSTARPath(hdr.Name)
- if ok {
- // Since we can encode in USTAR format, disable PAX header.
- delete(paxHeaders, paxPath)
-
- // Update the path fields
- formatString(v7.Name(), suffix, paxNone)
- formatString(ustar.Prefix(), prefix, paxNone)
- }
- }
-
- if tw.usedBinary {
- header.SetFormat(formatGNU)
- } else {
- header.SetFormat(formatUSTAR)
- }
-
- // Check if there were any formatting errors.
- if f.err != nil {
- tw.err = f.err
+ if _, tw.err = tw.w.Write(zeroBlock[:tw.pad]); tw.err != nil {
return tw.err
}
+ tw.pad = 0
+ return nil
+}
- if allowPax {
- for k, v := range hdr.Xattrs {
- paxHeaders[paxXattr+k] = v
+// WriteHeader writes hdr and prepares to accept the file's contents.
+// The Header.Size determines how many bytes can be written for the next file.
+// If the current file is not fully written, then this returns an error.
+// This implicitly flushes any padding necessary before writing the header.
+func (tw *Writer) WriteHeader(hdr *Header) error {
+ if err := tw.Flush(); err != nil {
+ return err
+ }
+ tw.hdr = *hdr // Shallow copy of Header
+
+ // Round ModTime and ignore AccessTime and ChangeTime unless
+ // the format is explicitly chosen.
+ // This ensures nominal usage of WriteHeader (without specifying the format)
+ // does not always result in the PAX format being chosen, which
+ // causes a 1KiB increase to every header.
+ if tw.hdr.Format == FormatUnknown {
+ tw.hdr.ModTime = tw.hdr.ModTime.Round(time.Second)
+ tw.hdr.AccessTime = time.Time{}
+ tw.hdr.ChangeTime = time.Time{}
+ }
+
+ allowedFormats, paxHdrs, err := tw.hdr.allowedFormats()
+ switch {
+ case allowedFormats.has(FormatUSTAR):
+ tw.err = tw.writeUSTARHeader(&tw.hdr)
+ return tw.err
+ case allowedFormats.has(FormatPAX):
+ tw.err = tw.writePAXHeader(&tw.hdr, paxHdrs)
+ return tw.err
+ case allowedFormats.has(FormatGNU):
+ tw.err = tw.writeGNUHeader(&tw.hdr)
+ return tw.err
+ default:
+ return err // Non-fatal error
+ }
+}
+
+func (tw *Writer) writeUSTARHeader(hdr *Header) error {
+ // Check if we can use USTAR prefix/suffix splitting.
+ var namePrefix string
+ if prefix, suffix, ok := splitUSTARPath(hdr.Name); ok {
+ namePrefix, hdr.Name = prefix, suffix
+ }
+
+ // Pack the main header.
+ var f formatter
+ blk := tw.templateV7Plus(hdr, f.formatString, f.formatOctal)
+ f.formatString(blk.USTAR().Prefix(), namePrefix)
+ blk.SetFormat(FormatUSTAR)
+ if f.err != nil {
+ return f.err // Should never happen since header is validated
+ }
+ return tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag)
+}
+
+func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
+ realName, realSize := hdr.Name, hdr.Size
+
+ // TODO(dsnet): Re-enable this when adding sparse support.
+ // See https://golang.org/issue/22735
+ /*
+ // Handle sparse files.
+ var spd sparseDatas
+ var spb []byte
+ if len(hdr.SparseHoles) > 0 {
+ sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
+ sph = alignSparseEntries(sph, hdr.Size)
+ spd = invertSparseEntries(sph, hdr.Size)
+
+ // Format the sparse map.
+ hdr.Size = 0 // Replace with encoded size
+ spb = append(strconv.AppendInt(spb, int64(len(spd)), 10), '\n')
+ for _, s := range spd {
+ hdr.Size += s.Length
+ spb = append(strconv.AppendInt(spb, s.Offset, 10), '\n')
+ spb = append(strconv.AppendInt(spb, s.Length, 10), '\n')
+ }
+ pad := blockPadding(int64(len(spb)))
+ spb = append(spb, zeroBlock[:pad]...)
+ hdr.Size += int64(len(spb)) // Accounts for encoded sparse map
+
+ // Add and modify appropriate PAX records.
+ dir, file := path.Split(realName)
+ hdr.Name = path.Join(dir, "GNUSparseFile.0", file)
+ paxHdrs[paxGNUSparseMajor] = "1"
+ paxHdrs[paxGNUSparseMinor] = "0"
+ paxHdrs[paxGNUSparseName] = realName
+ paxHdrs[paxGNUSparseRealSize] = strconv.FormatInt(realSize, 10)
+ paxHdrs[paxSize] = strconv.FormatInt(hdr.Size, 10)
+ delete(paxHdrs, paxPath) // Recorded by paxGNUSparseName
+ }
+ */
+ _ = realSize
+
+ // Write PAX records to the output.
+ isGlobal := hdr.Typeflag == TypeXGlobalHeader
+ if len(paxHdrs) > 0 || isGlobal {
+ // Sort keys for deterministic ordering.
+ var keys []string
+ for k := range paxHdrs {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ // Write each record to a buffer.
+ var buf bytes.Buffer
+ for _, k := range keys {
+ rec, err := formatPAXRecord(k, paxHdrs[k])
+ if err != nil {
+ return err
+ }
+ buf.WriteString(rec)
+ }
+
+ // Write the extended header file.
+ var name string
+ var flag byte
+ if isGlobal {
+ name = realName
+ if name == "" {
+ name = "GlobalHead.0.0"
+ }
+ flag = TypeXGlobalHeader
+ } else {
+ dir, file := path.Split(realName)
+ name = path.Join(dir, "PaxHeaders.0", file)
+ flag = TypeXHeader
+ }
+ data := buf.String()
+ if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal {
+ return err // Global headers return here
}
}
- if len(paxHeaders) > 0 {
- if !allowPax {
- return errInvalidHeader
+ // Pack the main header.
+ var f formatter // Ignore errors since they are expected
+ fmtStr := func(b []byte, s string) { f.formatString(b, toASCII(s)) }
+ blk := tw.templateV7Plus(hdr, fmtStr, f.formatOctal)
+ blk.SetFormat(FormatPAX)
+ if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
+ return err
+ }
+
+ // TODO(dsnet): Re-enable this when adding sparse support.
+ // See https://golang.org/issue/22735
+ /*
+ // Write the sparse map and setup the sparse writer if necessary.
+ if len(spd) > 0 {
+ // Use tw.curr since the sparse map is accounted for in hdr.Size.
+ if _, err := tw.curr.Write(spb); err != nil {
+ return err
+ }
+ tw.curr = &sparseFileWriter{tw.curr, spd, 0}
}
- if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
+ */
+ return nil
+}
+
+func (tw *Writer) writeGNUHeader(hdr *Header) error {
+ // Use long-link files if Name or Linkname exceeds the field size.
+ const longName = "././@LongLink"
+ if len(hdr.Name) > nameSize {
+ data := hdr.Name + "\x00"
+ if err := tw.writeRawFile(longName, data, TypeGNULongName, FormatGNU); err != nil {
+ return err
+ }
+ }
+ if len(hdr.Linkname) > nameSize {
+ data := hdr.Linkname + "\x00"
+ if err := tw.writeRawFile(longName, data, TypeGNULongLink, FormatGNU); err != nil {
return err
}
}
- tw.nb = hdr.Size
- tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
- _, tw.err = tw.w.Write(header[:])
- return tw.err
+ // Pack the main header.
+ var f formatter // Ignore errors since they are expected
+ var spd sparseDatas
+ var spb []byte
+ blk := tw.templateV7Plus(hdr, f.formatString, f.formatNumeric)
+ if !hdr.AccessTime.IsZero() {
+ f.formatNumeric(blk.GNU().AccessTime(), hdr.AccessTime.Unix())
+ }
+ if !hdr.ChangeTime.IsZero() {
+ f.formatNumeric(blk.GNU().ChangeTime(), hdr.ChangeTime.Unix())
+ }
+ // TODO(dsnet): Re-enable this when adding sparse support.
+ // See https://golang.org/issue/22735
+ /*
+ if hdr.Typeflag == TypeGNUSparse {
+ sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
+ sph = alignSparseEntries(sph, hdr.Size)
+ spd = invertSparseEntries(sph, hdr.Size)
+
+ // Format the sparse map.
+ formatSPD := func(sp sparseDatas, sa sparseArray) sparseDatas {
+ for i := 0; len(sp) > 0 && i < sa.MaxEntries(); i++ {
+ f.formatNumeric(sa.Entry(i).Offset(), sp[0].Offset)
+ f.formatNumeric(sa.Entry(i).Length(), sp[0].Length)
+ sp = sp[1:]
+ }
+ if len(sp) > 0 {
+ sa.IsExtended()[0] = 1
+ }
+ return sp
+ }
+ sp2 := formatSPD(spd, blk.GNU().Sparse())
+ for len(sp2) > 0 {
+ var spHdr block
+ sp2 = formatSPD(sp2, spHdr.Sparse())
+ spb = append(spb, spHdr[:]...)
+ }
+
+ // Update size fields in the header block.
+ realSize := hdr.Size
+ hdr.Size = 0 // Encoded size; does not account for encoded sparse map
+ for _, s := range spd {
+ hdr.Size += s.Length
+ }
+ copy(blk.V7().Size(), zeroBlock[:]) // Reset field
+ f.formatNumeric(blk.V7().Size(), hdr.Size)
+ f.formatNumeric(blk.GNU().RealSize(), realSize)
+ }
+ */
+ blk.SetFormat(FormatGNU)
+ if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
+ return err
+ }
+
+ // Write the extended sparse map and setup the sparse writer if necessary.
+ if len(spd) > 0 {
+ // Use tw.w since the sparse map is not accounted for in hdr.Size.
+ if _, err := tw.w.Write(spb); err != nil {
+ return err
+ }
+ tw.curr = &sparseFileWriter{tw.curr, spd, 0}
+ }
+ return nil
+}
+
+type (
+ stringFormatter func([]byte, string)
+ numberFormatter func([]byte, int64)
+)
+
+// templateV7Plus fills out the V7 fields of a block using values from hdr.
+// It also fills out fields (uname, gname, devmajor, devminor) that are
+// shared in the USTAR, PAX, and GNU formats using the provided formatters.
+//
+// The block returned is only valid until the next call to
+// templateV7Plus or writeRawFile.
+func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum numberFormatter) *block {
+ tw.blk.Reset()
+
+ modTime := hdr.ModTime
+ if modTime.IsZero() {
+ modTime = time.Unix(0, 0)
+ }
+
+ v7 := tw.blk.V7()
+ v7.TypeFlag()[0] = hdr.Typeflag
+ fmtStr(v7.Name(), hdr.Name)
+ fmtStr(v7.LinkName(), hdr.Linkname)
+ fmtNum(v7.Mode(), hdr.Mode)
+ fmtNum(v7.UID(), int64(hdr.Uid))
+ fmtNum(v7.GID(), int64(hdr.Gid))
+ fmtNum(v7.Size(), hdr.Size)
+ fmtNum(v7.ModTime(), modTime.Unix())
+
+ ustar := tw.blk.USTAR()
+ fmtStr(ustar.UserName(), hdr.Uname)
+ fmtStr(ustar.GroupName(), hdr.Gname)
+ fmtNum(ustar.DevMajor(), hdr.Devmajor)
+ fmtNum(ustar.DevMinor(), hdr.Devminor)
+
+ return &tw.blk
+}
+
+// writeRawFile writes a minimal file with the given name and flag type.
+// It uses format to encode the header format and will write data as the body.
+// It uses default values for all of the other fields (as BSD and GNU tar does).
+func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) error {
+ tw.blk.Reset()
+
+ // Best effort for the filename.
+ name = toASCII(name)
+ if len(name) > nameSize {
+ name = name[:nameSize]
+ }
+ name = strings.TrimRight(name, "/")
+
+ var f formatter
+ v7 := tw.blk.V7()
+ v7.TypeFlag()[0] = flag
+ f.formatString(v7.Name(), name)
+ f.formatOctal(v7.Mode(), 0)
+ f.formatOctal(v7.UID(), 0)
+ f.formatOctal(v7.GID(), 0)
+ f.formatOctal(v7.Size(), int64(len(data))) // Must be < 8GiB
+ f.formatOctal(v7.ModTime(), 0)
+ tw.blk.SetFormat(format)
+ if f.err != nil {
+ return f.err // Only occurs if size condition is violated
+ }
+
+ // Write the header and data.
+ if err := tw.writeRawHeader(&tw.blk, int64(len(data)), flag); err != nil {
+ return err
+ }
+ _, err := io.WriteString(tw, data)
+ return err
+}
+
+// writeRawHeader writes the value of blk, regardless of its value.
+// It sets up the Writer such that it can accept a file of the given size.
+// If the flag is a special header-only flag, then the size is treated as zero.
+func (tw *Writer) writeRawHeader(blk *block, size int64, flag byte) error {
+ if err := tw.Flush(); err != nil {
+ return err
+ }
+ if _, err := tw.w.Write(blk[:]); err != nil {
+ return err
+ }
+ if isHeaderOnlyType(flag) {
+ size = 0
+ }
+ tw.curr = ®FileWriter{tw.w, size}
+ tw.pad = blockPadding(size)
+ return nil
}
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
@@ -276,95 +412,233 @@ func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
return name[:i], name[i+1:], true
}
-// writePaxHeader writes an extended pax header to the
-// archive.
-func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
- // Prepare extended header
- ext := new(Header)
- ext.Typeflag = TypeXHeader
- // Setting ModTime is required for reader parsing to
- // succeed, and seems harmless enough.
- ext.ModTime = hdr.ModTime
- // The spec asks that we namespace our pseudo files
- // with the current pid. However, this results in differing outputs
- // for identical inputs. As such, the constant 0 is now used instead.
- // golang.org/issue/12358
- dir, file := path.Split(hdr.Name)
- fullName := path.Join(dir, "PaxHeaders.0", file)
-
- ascii := toASCII(fullName)
- if len(ascii) > nameSize {
- ascii = ascii[:nameSize]
- }
- ext.Name = ascii
- // Construct the body
- var buf bytes.Buffer
-
- // Keys are sorted before writing to body to allow deterministic output.
- keys := make([]string, 0, len(paxHeaders))
- for k := range paxHeaders {
- keys = append(keys, k)
- }
- sort.Strings(keys)
-
- for _, k := range keys {
- fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
- }
-
- ext.Size = int64(len(buf.Bytes()))
- if err := tw.writeHeader(ext, false); err != nil {
- return err
- }
- if _, err := tw.Write(buf.Bytes()); err != nil {
- return err
- }
- if err := tw.Flush(); err != nil {
- return err
- }
- return nil
-}
-
-// Write writes to the current entry in the tar archive.
+// Write writes to the current file in the tar archive.
// Write returns the error ErrWriteTooLong if more than
-// hdr.Size bytes are written after WriteHeader.
-func (tw *Writer) Write(b []byte) (n int, err error) {
- if tw.closed {
- err = ErrWriteAfterClose
- return
+// Header.Size bytes are written after WriteHeader.
+//
+// Calling Write on special types like TypeLink, TypeSymlink, TypeChar,
+// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless
+// of what the Header.Size claims.
+func (tw *Writer) Write(b []byte) (int, error) {
+ if tw.err != nil {
+ return 0, tw.err
}
- overwrite := false
- if int64(len(b)) > tw.nb {
- b = b[0:tw.nb]
- overwrite = true
+ n, err := tw.curr.Write(b)
+ if err != nil && err != ErrWriteTooLong {
+ tw.err = err
}
- n, err = tw.w.Write(b)
- tw.nb -= int64(n)
- if err == nil && overwrite {
- err = ErrWriteTooLong
- return
- }
- tw.err = err
- return
+ return n, err
}
-// Close closes the tar archive, flushing any unwritten
-// data to the underlying writer.
-func (tw *Writer) Close() error {
- if tw.err != nil || tw.closed {
- return tw.err
+// readFrom populates the content of the current file by reading from r.
+// The bytes read must match the number of remaining bytes in the current file.
+//
+// If the current file is sparse and r is an io.ReadSeeker,
+// then readFrom uses Seek to skip past holes defined in Header.SparseHoles,
+// assuming that skipped regions are all NULs.
+// This always reads the last byte to ensure r is the right size.
+//
+// TODO(dsnet): Re-export this when adding sparse file support.
+// See https://golang.org/issue/22735
+func (tw *Writer) readFrom(r io.Reader) (int64, error) {
+ if tw.err != nil {
+ return 0, tw.err
+ }
+ n, err := tw.curr.ReadFrom(r)
+ if err != nil && err != ErrWriteTooLong {
+ tw.err = err
+ }
+ return n, err
+}
+
+// Close closes the tar archive by flushing the padding, and writing the footer.
+// If the current file (from a prior call to WriteHeader) is not fully written,
+// then this returns an error.
+func (tw *Writer) Close() error {
+ if tw.err == ErrWriteAfterClose {
+ return nil
}
- tw.Flush()
- tw.closed = true
if tw.err != nil {
return tw.err
}
- // trailer: two zero blocks
- for i := 0; i < 2; i++ {
- _, tw.err = tw.w.Write(zeroBlock[:])
- if tw.err != nil {
- break
+ // Trailer: two zero blocks.
+ err := tw.Flush()
+ for i := 0; i < 2 && err == nil; i++ {
+ _, err = tw.w.Write(zeroBlock[:])
+ }
+
+ // Ensure all future actions are invalid.
+ tw.err = ErrWriteAfterClose
+ return err // Report IO errors
+}
+
+// regFileWriter is a fileWriter for writing data to a regular file entry.
+type regFileWriter struct {
+ w io.Writer // Underlying Writer
+ nb int64 // Number of remaining bytes to write
+}
+
+func (fw *regFileWriter) Write(b []byte) (n int, err error) {
+ overwrite := int64(len(b)) > fw.nb
+ if overwrite {
+ b = b[:fw.nb]
+ }
+ if len(b) > 0 {
+ n, err = fw.w.Write(b)
+ fw.nb -= int64(n)
+ }
+ switch {
+ case err != nil:
+ return n, err
+ case overwrite:
+ return n, ErrWriteTooLong
+ default:
+ return n, nil
+ }
+}
+
+func (fw *regFileWriter) ReadFrom(r io.Reader) (int64, error) {
+ return io.Copy(struct{ io.Writer }{fw}, r)
+}
+
+func (fw regFileWriter) LogicalRemaining() int64 {
+ return fw.nb
+}
+func (fw regFileWriter) PhysicalRemaining() int64 {
+ return fw.nb
+}
+
+// sparseFileWriter is a fileWriter for writing data to a sparse file entry.
+type sparseFileWriter struct {
+ fw fileWriter // Underlying fileWriter
+ sp sparseDatas // Normalized list of data fragments
+ pos int64 // Current position in sparse file
+}
+
+func (sw *sparseFileWriter) Write(b []byte) (n int, err error) {
+ overwrite := int64(len(b)) > sw.LogicalRemaining()
+ if overwrite {
+ b = b[:sw.LogicalRemaining()]
+ }
+
+ b0 := b
+ endPos := sw.pos + int64(len(b))
+ for endPos > sw.pos && err == nil {
+ var nf int // Bytes written in fragment
+ dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset()
+ if sw.pos < dataStart { // In a hole fragment
+ bf := b[:min(int64(len(b)), dataStart-sw.pos)]
+ nf, err = zeroWriter{}.Write(bf)
+ } else { // In a data fragment
+ bf := b[:min(int64(len(b)), dataEnd-sw.pos)]
+ nf, err = sw.fw.Write(bf)
+ }
+ b = b[nf:]
+ sw.pos += int64(nf)
+ if sw.pos >= dataEnd && len(sw.sp) > 1 {
+ sw.sp = sw.sp[1:] // Ensure last fragment always remains
}
}
- return tw.err
+
+ n = len(b0) - len(b)
+ switch {
+ case err == ErrWriteTooLong:
+ return n, errMissData // Not possible; implies bug in validation logic
+ case err != nil:
+ return n, err
+ case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0:
+ return n, errUnrefData // Not possible; implies bug in validation logic
+ case overwrite:
+ return n, ErrWriteTooLong
+ default:
+ return n, nil
+ }
+}
+
+func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
+ rs, ok := r.(io.ReadSeeker)
+ if ok {
+ if _, err := rs.Seek(0, io.SeekCurrent); err != nil {
+ ok = false // Not all io.Seeker can really seek
+ }
+ }
+ if !ok {
+ return io.Copy(struct{ io.Writer }{sw}, r)
+ }
+
+ var readLastByte bool
+ pos0 := sw.pos
+ for sw.LogicalRemaining() > 0 && !readLastByte && err == nil {
+ var nf int64 // Size of fragment
+ dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset()
+ if sw.pos < dataStart { // In a hole fragment
+ nf = dataStart - sw.pos
+ if sw.PhysicalRemaining() == 0 {
+ readLastByte = true
+ nf--
+ }
+ _, err = rs.Seek(nf, io.SeekCurrent)
+ } else { // In a data fragment
+ nf = dataEnd - sw.pos
+ nf, err = io.CopyN(sw.fw, rs, nf)
+ }
+ sw.pos += nf
+ if sw.pos >= dataEnd && len(sw.sp) > 1 {
+ sw.sp = sw.sp[1:] // Ensure last fragment always remains
+ }
+ }
+
+ // If the last fragment is a hole, then seek to 1-byte before EOF, and
+ // read a single byte to ensure the file is the right size.
+ if readLastByte && err == nil {
+ _, err = mustReadFull(rs, []byte{0})
+ sw.pos++
+ }
+
+ n = sw.pos - pos0
+ switch {
+ case err == io.EOF:
+ return n, io.ErrUnexpectedEOF
+ case err == ErrWriteTooLong:
+ return n, errMissData // Not possible; implies bug in validation logic
+ case err != nil:
+ return n, err
+ case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0:
+ return n, errUnrefData // Not possible; implies bug in validation logic
+ default:
+ return n, ensureEOF(rs)
+ }
+}
+
+func (sw sparseFileWriter) LogicalRemaining() int64 {
+ return sw.sp[len(sw.sp)-1].endOffset() - sw.pos
+}
+func (sw sparseFileWriter) PhysicalRemaining() int64 {
+ return sw.fw.PhysicalRemaining()
+}
+
+// zeroWriter may only be written with NULs, otherwise it returns errWriteHole.
+type zeroWriter struct{}
+
+func (zeroWriter) Write(b []byte) (int, error) {
+ for i, c := range b {
+ if c != 0 {
+ return i, errWriteHole
+ }
+ }
+ return len(b), nil
+}
+
+// ensureEOF checks whether r is at EOF, reporting ErrWriteTooLong if not so.
+func ensureEOF(r io.Reader) error {
+ n, err := tryReadFull(r, []byte{0})
+ switch {
+ case n > 0:
+ return ErrWriteTooLong
+ case err == io.EOF:
+ return nil
+ default:
+ return err
+ }
}
diff --git a/src/archive/tar/writer_test.go b/src/archive/tar/writer_test.go
index d88b8f41ca8..24e8da271c2 100644
--- a/src/archive/tar/writer_test.go
+++ b/src/archive/tar/writer_test.go
@@ -6,10 +6,12 @@ package tar
import (
"bytes"
- "fmt"
+ "encoding/hex"
+ "errors"
"io"
"io/ioutil"
"os"
+ "path"
"reflect"
"sort"
"strings"
@@ -18,120 +20,127 @@ import (
"time"
)
-// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
-func bytestr(offset int, b []byte) string {
- const rowLen = 32
- s := fmt.Sprintf("%04x ", offset)
- for _, ch := range b {
- switch {
- case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
- s += fmt.Sprintf(" %c", ch)
- default:
- s += fmt.Sprintf(" %02x", ch)
+func bytediff(a, b []byte) string {
+ const (
+ uniqueA = "- "
+ uniqueB = "+ "
+ identity = " "
+ )
+ var ss []string
+ sa := strings.Split(strings.TrimSpace(hex.Dump(a)), "\n")
+ sb := strings.Split(strings.TrimSpace(hex.Dump(b)), "\n")
+ for len(sa) > 0 && len(sb) > 0 {
+ if sa[0] == sb[0] {
+ ss = append(ss, identity+sa[0])
+ } else {
+ ss = append(ss, uniqueA+sa[0])
+ ss = append(ss, uniqueB+sb[0])
}
+ sa, sb = sa[1:], sb[1:]
}
- return s
-}
-
-// Render a pseudo-diff between two blocks of bytes.
-func bytediff(a []byte, b []byte) string {
- const rowLen = 32
- s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b))
- for offset := 0; len(a)+len(b) > 0; offset += rowLen {
- na, nb := rowLen, rowLen
- if na > len(a) {
- na = len(a)
- }
- if nb > len(b) {
- nb = len(b)
- }
- sa := bytestr(offset, a[0:na])
- sb := bytestr(offset, b[0:nb])
- if sa != sb {
- s += fmt.Sprintf("-%v\n+%v\n", sa, sb)
- }
- a = a[na:]
- b = b[nb:]
+ for len(sa) > 0 {
+ ss = append(ss, uniqueA+sa[0])
+ sa = sa[1:]
}
- return s
+ for len(sb) > 0 {
+ ss = append(ss, uniqueB+sb[0])
+ sb = sb[1:]
+ }
+ return strings.Join(ss, "\n")
}
func TestWriter(t *testing.T) {
- type entry struct {
- header *Header
- contents string
- }
+ type (
+ testHeader struct { // WriteHeader(hdr) == wantErr
+ hdr Header
+ wantErr error
+ }
+ testWrite struct { // Write(str) == (wantCnt, wantErr)
+ str string
+ wantCnt int
+ wantErr error
+ }
+ testReadFrom struct { // ReadFrom(testFile{ops}) == (wantCnt, wantErr)
+ ops fileOps
+ wantCnt int64
+ wantErr error
+ }
+ testClose struct { // Close() == wantErr
+ wantErr error
+ }
+ testFnc interface{} // testHeader | testWrite | testReadFrom | testClose
+ )
vectors := []struct {
- file string // filename of expected output
- entries []*entry
+ file string // Optional filename of expected output
+ tests []testFnc
}{{
// The writer test file was produced with this command:
// tar (GNU tar) 1.26
// ln -s small.txt link.txt
// tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
file: "testdata/writer.tar",
- entries: []*entry{{
- header: &Header{
+ tests: []testFnc{
+ testHeader{Header{
+ Typeflag: TypeReg,
Name: "small.txt",
- Mode: 0640,
- Uid: 73025,
- Gid: 5000,
Size: 5,
- ModTime: time.Unix(1246508266, 0),
- Typeflag: '0',
- Uname: "dsymonds",
- Gname: "eng",
- },
- contents: "Kilts",
- }, {
- header: &Header{
- Name: "small2.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
- Size: 11,
- ModTime: time.Unix(1245217492, 0),
- Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
- },
- contents: "Google.com\n",
- }, {
- header: &Header{
+ ModTime: time.Unix(1246508266, 0),
+ }, nil},
+ testWrite{"Kilts", 5, nil},
+
+ testHeader{Header{
+ Typeflag: TypeReg,
+ Name: "small2.txt",
+ Size: 11,
+ Mode: 0640,
+ Uid: 73025,
+ Uname: "dsymonds",
+ Gname: "eng",
+ Gid: 5000,
+ ModTime: time.Unix(1245217492, 0),
+ }, nil},
+ testWrite{"Google.com\n", 11, nil},
+
+ testHeader{Header{
+ Typeflag: TypeSymlink,
Name: "link.txt",
+ Linkname: "small.txt",
Mode: 0777,
Uid: 1000,
Gid: 1000,
- Size: 0,
- ModTime: time.Unix(1314603082, 0),
- Typeflag: '2',
- Linkname: "small.txt",
Uname: "strings",
Gname: "strings",
- },
- // no contents
- }},
+ ModTime: time.Unix(1314603082, 0),
+ }, nil},
+ testWrite{"", 0, nil},
+
+ testClose{nil},
+ },
}, {
// The truncated test file was produced using these commands:
// dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
// tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
file: "testdata/writer-big.tar",
- entries: []*entry{{
- header: &Header{
+ tests: []testFnc{
+ testHeader{Header{
+ Typeflag: TypeReg,
Name: "tmp/16gig.txt",
+ Size: 16 << 30,
Mode: 0640,
Uid: 73025,
Gid: 5000,
- Size: 16 << 30,
- ModTime: time.Unix(1254699560, 0),
- Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
- },
- // fake contents
- contents: strings.Repeat("\x00", 4<<10),
- }},
+ ModTime: time.Unix(1254699560, 0),
+ Format: FormatGNU,
+ }, nil},
+ },
}, {
// This truncated file was produced using this library.
// It was verified to work with GNU tar 1.27.1 and BSD tar 3.1.2.
@@ -141,117 +150,377 @@ func TestWriter(t *testing.T) {
//
// This file is in PAX format.
file: "testdata/writer-big-long.tar",
- entries: []*entry{{
- header: &Header{
+ tests: []testFnc{
+ testHeader{Header{
+ Typeflag: TypeReg,
Name: strings.Repeat("longname/", 15) + "16gig.txt",
+ Size: 16 << 30,
Mode: 0644,
Uid: 1000,
Gid: 1000,
- Size: 16 << 30,
- ModTime: time.Unix(1399583047, 0),
- Typeflag: '0',
Uname: "guillaume",
Gname: "guillaume",
- },
- // fake contents
- contents: strings.Repeat("\x00", 4<<10),
- }},
+ ModTime: time.Unix(1399583047, 0),
+ }, nil},
+ },
}, {
- // TODO(dsnet): The Writer output should match the following file.
- // To fix an issue (see https://golang.org/issue/12594), we disabled
- // prefix support, which alters the generated output.
- /*
- // This file was produced using gnu tar 1.17
- // gnutar -b 4 --format=ustar (longname/)*15 + file.txt
- file: "testdata/ustar.tar"
- */
- file: "testdata/ustar.issue12594.tar", // This is a valid tar file, but not expected
- entries: []*entry{{
- header: &Header{
+ // This file was produced using GNU tar v1.17.
+ // gnutar -b 4 --format=ustar (longname/)*15 + file.txt
+ file: "testdata/ustar.tar",
+ tests: []testFnc{
+ testHeader{Header{
+ Typeflag: TypeReg,
Name: strings.Repeat("longname/", 15) + "file.txt",
+ Size: 6,
Mode: 0644,
- Uid: 0765,
- Gid: 024,
- Size: 06,
- ModTime: time.Unix(1360135598, 0),
- Typeflag: '0',
+ Uid: 501,
+ Gid: 20,
Uname: "shane",
Gname: "staff",
- },
- contents: "hello\n",
- }},
+ ModTime: time.Unix(1360135598, 0),
+ }, nil},
+ testWrite{"hello\n", 6, nil},
+ testClose{nil},
+ },
}, {
- // This file was produced using gnu tar 1.26
- // echo "Slartibartfast" > file.txt
- // ln file.txt hard.txt
- // tar -b 1 --format=ustar -c -f hardlink.tar file.txt hard.txt
+ // This file was produced using GNU tar v1.26:
+ // echo "Slartibartfast" > file.txt
+ // ln file.txt hard.txt
+ // tar -b 1 --format=ustar -c -f hardlink.tar file.txt hard.txt
file: "testdata/hardlink.tar",
- entries: []*entry{{
- header: &Header{
+ tests: []testFnc{
+ testHeader{Header{
+ Typeflag: TypeReg,
Name: "file.txt",
- Mode: 0644,
- Uid: 1000,
- Gid: 100,
Size: 15,
- ModTime: time.Unix(1425484303, 0),
- Typeflag: '0',
- Uname: "vbatts",
- Gname: "users",
- },
- contents: "Slartibartfast\n",
- }, {
- header: &Header{
- Name: "hard.txt",
Mode: 0644,
Uid: 1000,
Gid: 100,
- Size: 0,
- ModTime: time.Unix(1425484303, 0),
- Typeflag: '1',
- Linkname: "file.txt",
Uname: "vbatts",
Gname: "users",
- },
- // no contents
- }},
+ ModTime: time.Unix(1425484303, 0),
+ }, nil},
+ testWrite{"Slartibartfast\n", 15, nil},
+
+ testHeader{Header{
+ Typeflag: TypeLink,
+ Name: "hard.txt",
+ Linkname: "file.txt",
+ Mode: 0644,
+ Uid: 1000,
+ Gid: 100,
+ Uname: "vbatts",
+ Gname: "users",
+ ModTime: time.Unix(1425484303, 0),
+ }, nil},
+ testWrite{"", 0, nil},
+
+ testClose{nil},
+ },
+ }, {
+ tests: []testFnc{
+ testHeader{Header{
+ Typeflag: TypeReg,
+ Name: "bad-null.txt",
+ Xattrs: map[string]string{"null\x00null\x00": "fizzbuzz"},
+ }, headerError{}},
+ },
+ }, {
+ tests: []testFnc{
+ testHeader{Header{
+ Typeflag: TypeReg,
+ Name: "null\x00.txt",
+ }, headerError{}},
+ },
+ }, {
+ file: "testdata/pax-records.tar",
+ tests: []testFnc{
+ testHeader{Header{
+ Typeflag: TypeReg,
+ Name: "file",
+ Uname: strings.Repeat("long", 10),
+ PAXRecords: map[string]string{
+ "path": "FILE", // Should be ignored
+ "GNU.sparse.map": "0,0", // Should be ignored
+ "comment": "Hello, 世界",
+ "GOLANG.pkg": "tar",
+ },
+ }, nil},
+ testClose{nil},
+ },
+ }, {
+ // Craft a theoretically valid PAX archive with global headers.
+ // The GNU and BSD tar tools do not parse these the same way.
+ //
+ // BSD tar v3.1.2 parses and ignores all global headers;
+ // the behavior is verified by researching the source code.
+ //
+ // $ bsdtar -tvf pax-global-records.tar
+ // ---------- 0 0 0 0 Dec 31 1969 file1
+ // ---------- 0 0 0 0 Dec 31 1969 file2
+ // ---------- 0 0 0 0 Dec 31 1969 file3
+ // ---------- 0 0 0 0 May 13 2014 file4
+ //
+ // GNU tar v1.27.1 applies global headers to subsequent records,
+ // but does not do the following properly:
+ // * It does not treat an empty record as deletion.
+ // * It does not use subsequent global headers to update previous ones.
+ //
+ // $ gnutar -tvf pax-global-records.tar
+ // ---------- 0/0 0 2017-07-13 19:40 global1
+ // ---------- 0/0 0 2017-07-13 19:40 file2
+ // gnutar: Substituting `.' for empty member name
+ // ---------- 0/0 0 1969-12-31 16:00
+ // gnutar: Substituting `.' for empty member name
+ // ---------- 0/0 0 2014-05-13 09:53
+ //
+ // According to the PAX specification, this should have been the result:
+ // ---------- 0/0 0 2017-07-13 19:40 global1
+ // ---------- 0/0 0 2017-07-13 19:40 file2
+ // ---------- 0/0 0 2017-07-13 19:40 file3
+ // ---------- 0/0 0 2014-05-13 09:53 file4
+ file: "testdata/pax-global-records.tar",
+ tests: []testFnc{
+ testHeader{Header{
+ Typeflag: TypeXGlobalHeader,
+ PAXRecords: map[string]string{"path": "global1", "mtime": "1500000000.0"},
+ }, nil},
+ testHeader{Header{
+ Typeflag: TypeReg, Name: "file1",
+ }, nil},
+ testHeader{Header{
+ Typeflag: TypeReg,
+ Name: "file2",
+ PAXRecords: map[string]string{"path": "file2"},
+ }, nil},
+ testHeader{Header{
+ Typeflag: TypeXGlobalHeader,
+ PAXRecords: map[string]string{"path": ""}, // Should delete "path", but keep "mtime"
+ }, nil},
+ testHeader{Header{
+ Typeflag: TypeReg, Name: "file3",
+ }, nil},
+ testHeader{Header{
+ Typeflag: TypeReg,
+ Name: "file4",
+ ModTime: time.Unix(1400000000, 0),
+ PAXRecords: map[string]string{"mtime": "1400000000"},
+ }, nil},
+ testClose{nil},
+ },
+ }, {
+ file: "testdata/gnu-utf8.tar",
+ tests: []testFnc{
+ testHeader{Header{
+ Typeflag: TypeReg,
+ Name: "☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹",
+ Mode: 0644,
+ Uid: 1000, Gid: 1000,
+ Uname: "☺",
+ Gname: "⚹",
+ ModTime: time.Unix(0, 0),
+ Format: FormatGNU,
+ }, nil},
+ testClose{nil},
+ },
+ }, {
+ file: "testdata/gnu-not-utf8.tar",
+ tests: []testFnc{
+ testHeader{Header{
+ Typeflag: TypeReg,
+ Name: "hi\x80\x81\x82\x83bye",
+ Mode: 0644,
+ Uid: 1000,
+ Gid: 1000,
+ Uname: "rawr",
+ Gname: "dsnet",
+ ModTime: time.Unix(0, 0),
+ Format: FormatGNU,
+ }, nil},
+ testClose{nil},
+ },
+ // TODO(dsnet): Re-enable this test when adding sparse support.
+ // See https://golang.org/issue/22735
+ /*
+ }, {
+ file: "testdata/gnu-nil-sparse-data.tar",
+ tests: []testFnc{
+ testHeader{Header{
+ Typeflag: TypeGNUSparse,
+ Name: "sparse.db",
+ Size: 1000,
+ SparseHoles: []sparseEntry{{Offset: 1000, Length: 0}},
+ }, nil},
+ testWrite{strings.Repeat("0123456789", 100), 1000, nil},
+ testClose{},
+ },
+ }, {
+ file: "testdata/gnu-nil-sparse-hole.tar",
+ tests: []testFnc{
+ testHeader{Header{
+ Typeflag: TypeGNUSparse,
+ Name: "sparse.db",
+ Size: 1000,
+ SparseHoles: []sparseEntry{{Offset: 0, Length: 1000}},
+ }, nil},
+ testWrite{strings.Repeat("\x00", 1000), 1000, nil},
+ testClose{},
+ },
+ }, {
+ file: "testdata/pax-nil-sparse-data.tar",
+ tests: []testFnc{
+ testHeader{Header{
+ Typeflag: TypeReg,
+ Name: "sparse.db",
+ Size: 1000,
+ SparseHoles: []sparseEntry{{Offset: 1000, Length: 0}},
+ }, nil},
+ testWrite{strings.Repeat("0123456789", 100), 1000, nil},
+ testClose{},
+ },
+ }, {
+ file: "testdata/pax-nil-sparse-hole.tar",
+ tests: []testFnc{
+ testHeader{Header{
+ Typeflag: TypeReg,
+ Name: "sparse.db",
+ Size: 1000,
+ SparseHoles: []sparseEntry{{Offset: 0, Length: 1000}},
+ }, nil},
+ testWrite{strings.Repeat("\x00", 1000), 1000, nil},
+ testClose{},
+ },
+ }, {
+ file: "testdata/gnu-sparse-big.tar",
+ tests: []testFnc{
+ testHeader{Header{
+ Typeflag: TypeGNUSparse,
+ Name: "gnu-sparse",
+ Size: 6e10,
+ SparseHoles: []sparseEntry{
+ {Offset: 0e10, Length: 1e10 - 100},
+ {Offset: 1e10, Length: 1e10 - 100},
+ {Offset: 2e10, Length: 1e10 - 100},
+ {Offset: 3e10, Length: 1e10 - 100},
+ {Offset: 4e10, Length: 1e10 - 100},
+ {Offset: 5e10, Length: 1e10 - 100},
+ },
+ }, nil},
+ testReadFrom{fileOps{
+ int64(1e10 - blockSize),
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
+ int64(1e10 - blockSize),
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
+ int64(1e10 - blockSize),
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
+ int64(1e10 - blockSize),
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
+ int64(1e10 - blockSize),
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
+ int64(1e10 - blockSize),
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
+ }, 6e10, nil},
+ testClose{nil},
+ },
+ }, {
+ file: "testdata/pax-sparse-big.tar",
+ tests: []testFnc{
+ testHeader{Header{
+ Typeflag: TypeReg,
+ Name: "pax-sparse",
+ Size: 6e10,
+ SparseHoles: []sparseEntry{
+ {Offset: 0e10, Length: 1e10 - 100},
+ {Offset: 1e10, Length: 1e10 - 100},
+ {Offset: 2e10, Length: 1e10 - 100},
+ {Offset: 3e10, Length: 1e10 - 100},
+ {Offset: 4e10, Length: 1e10 - 100},
+ {Offset: 5e10, Length: 1e10 - 100},
+ },
+ }, nil},
+ testReadFrom{fileOps{
+ int64(1e10 - blockSize),
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
+ int64(1e10 - blockSize),
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
+ int64(1e10 - blockSize),
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
+ int64(1e10 - blockSize),
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
+ int64(1e10 - blockSize),
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
+ int64(1e10 - blockSize),
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
+ }, 6e10, nil},
+ testClose{nil},
+ },
+ */
+ }, {
+ file: "testdata/trailing-slash.tar",
+ tests: []testFnc{
+ testHeader{Header{Name: strings.Repeat("123456789/", 30)}, nil},
+ testClose{nil},
+ },
}}
-testLoop:
- for i, v := range vectors {
- expected, err := ioutil.ReadFile(v.file)
- if err != nil {
- t.Errorf("test %d: Unexpected error: %v", i, err)
- continue
+ equalError := func(x, y error) bool {
+ _, ok1 := x.(headerError)
+ _, ok2 := y.(headerError)
+ if ok1 || ok2 {
+ return ok1 && ok2
}
+ return x == y
+ }
+ for _, v := range vectors {
+ t.Run(path.Base(v.file), func(t *testing.T) {
+ const maxSize = 10 << 10 // 10KiB
+ buf := new(bytes.Buffer)
+ tw := NewWriter(iotest.TruncateWriter(buf, maxSize))
- buf := new(bytes.Buffer)
- tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB
- big := false
- for j, entry := range v.entries {
- big = big || entry.header.Size > 1<<10
- if err := tw.WriteHeader(entry.header); err != nil {
- t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err)
- continue testLoop
+ for i, tf := range v.tests {
+ switch tf := tf.(type) {
+ case testHeader:
+ err := tw.WriteHeader(&tf.hdr)
+ if !equalError(err, tf.wantErr) {
+ t.Fatalf("test %d, WriteHeader() = %v, want %v", i, err, tf.wantErr)
+ }
+ case testWrite:
+ got, err := tw.Write([]byte(tf.str))
+ if got != tf.wantCnt || !equalError(err, tf.wantErr) {
+ t.Fatalf("test %d, Write() = (%d, %v), want (%d, %v)", i, got, err, tf.wantCnt, tf.wantErr)
+ }
+ case testReadFrom:
+ f := &testFile{ops: tf.ops}
+ got, err := tw.readFrom(f)
+ if _, ok := err.(testError); ok {
+ t.Errorf("test %d, ReadFrom(): %v", i, err)
+ } else if got != tf.wantCnt || !equalError(err, tf.wantErr) {
+ t.Errorf("test %d, ReadFrom() = (%d, %v), want (%d, %v)", i, got, err, tf.wantCnt, tf.wantErr)
+ }
+ if len(f.ops) > 0 {
+ t.Errorf("test %d, expected %d more operations", i, len(f.ops))
+ }
+ case testClose:
+ err := tw.Close()
+ if !equalError(err, tf.wantErr) {
+ t.Fatalf("test %d, Close() = %v, want %v", i, err, tf.wantErr)
+ }
+ default:
+ t.Fatalf("test %d, unknown test operation: %T", i, tf)
+ }
}
- if _, err := io.WriteString(tw, entry.contents); err != nil {
- t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err)
- continue testLoop
- }
- }
- // Only interested in Close failures for the small tests.
- if err := tw.Close(); err != nil && !big {
- t.Errorf("test %d: Failed closing archive: %v", i, err)
- continue testLoop
- }
- actual := buf.Bytes()
- if !bytes.Equal(expected, actual) {
- t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
- i, bytediff(expected, actual))
- }
- if testing.Short() { // The second test is expensive.
- break
- }
+ if v.file != "" {
+ want, err := ioutil.ReadFile(v.file)
+ if err != nil {
+ t.Fatalf("ReadFile() = %v, want nil", err)
+ }
+ got := buf.Bytes()
+ if !bytes.Equal(want, got) {
+ t.Fatalf("incorrect result: (-got +want)\n%v", bytediff(got, want))
+ }
+ }
+ })
}
}
@@ -546,21 +815,104 @@ func TestValidTypeflagWithPAXHeader(t *testing.T) {
}
}
-func TestWriteAfterClose(t *testing.T) {
- var buffer bytes.Buffer
- tw := NewWriter(&buffer)
+// failOnceWriter fails exactly once and then always reports success.
+type failOnceWriter bool
- hdr := &Header{
- Name: "small.txt",
- Size: 5,
- }
- if err := tw.WriteHeader(hdr); err != nil {
- t.Fatalf("Failed to write header: %s", err)
- }
- tw.Close()
- if _, err := tw.Write([]byte("Kilts")); err != ErrWriteAfterClose {
- t.Fatalf("Write: got %v; want ErrWriteAfterClose", err)
+func (w *failOnceWriter) Write(b []byte) (int, error) {
+ if !*w {
+ return 0, io.ErrShortWrite
}
+ *w = true
+ return len(b), nil
+}
+
+func TestWriterErrors(t *testing.T) {
+ t.Run("HeaderOnly", func(t *testing.T) {
+ tw := NewWriter(new(bytes.Buffer))
+ hdr := &Header{Name: "dir/", Typeflag: TypeDir}
+ if err := tw.WriteHeader(hdr); err != nil {
+ t.Fatalf("WriteHeader() = %v, want nil", err)
+ }
+ if _, err := tw.Write([]byte{0x00}); err != ErrWriteTooLong {
+ t.Fatalf("Write() = %v, want %v", err, ErrWriteTooLong)
+ }
+ })
+
+ t.Run("NegativeSize", func(t *testing.T) {
+ tw := NewWriter(new(bytes.Buffer))
+ hdr := &Header{Name: "small.txt", Size: -1}
+ if err := tw.WriteHeader(hdr); err == nil {
+ t.Fatalf("WriteHeader() = nil, want non-nil error")
+ }
+ })
+
+ t.Run("BeforeHeader", func(t *testing.T) {
+ tw := NewWriter(new(bytes.Buffer))
+ if _, err := tw.Write([]byte("Kilts")); err != ErrWriteTooLong {
+ t.Fatalf("Write() = %v, want %v", err, ErrWriteTooLong)
+ }
+ })
+
+ t.Run("AfterClose", func(t *testing.T) {
+ tw := NewWriter(new(bytes.Buffer))
+ hdr := &Header{Name: "small.txt"}
+ if err := tw.WriteHeader(hdr); err != nil {
+ t.Fatalf("WriteHeader() = %v, want nil", err)
+ }
+ if err := tw.Close(); err != nil {
+ t.Fatalf("Close() = %v, want nil", err)
+ }
+ if _, err := tw.Write([]byte("Kilts")); err != ErrWriteAfterClose {
+ t.Fatalf("Write() = %v, want %v", err, ErrWriteAfterClose)
+ }
+ if err := tw.Flush(); err != ErrWriteAfterClose {
+ t.Fatalf("Flush() = %v, want %v", err, ErrWriteAfterClose)
+ }
+ if err := tw.Close(); err != nil {
+ t.Fatalf("Close() = %v, want nil", err)
+ }
+ })
+
+ t.Run("PrematureFlush", func(t *testing.T) {
+ tw := NewWriter(new(bytes.Buffer))
+ hdr := &Header{Name: "small.txt", Size: 5}
+ if err := tw.WriteHeader(hdr); err != nil {
+ t.Fatalf("WriteHeader() = %v, want nil", err)
+ }
+ if err := tw.Flush(); err == nil {
+ t.Fatalf("Flush() = %v, want non-nil error", err)
+ }
+ })
+
+ t.Run("PrematureClose", func(t *testing.T) {
+ tw := NewWriter(new(bytes.Buffer))
+ hdr := &Header{Name: "small.txt", Size: 5}
+ if err := tw.WriteHeader(hdr); err != nil {
+ t.Fatalf("WriteHeader() = %v, want nil", err)
+ }
+ if err := tw.Close(); err == nil {
+ t.Fatalf("Close() = %v, want non-nil error", err)
+ }
+ })
+
+ t.Run("Persistence", func(t *testing.T) {
+ tw := NewWriter(new(failOnceWriter))
+ if err := tw.WriteHeader(&Header{}); err != io.ErrShortWrite {
+ t.Fatalf("WriteHeader() = %v, want %v", err, io.ErrShortWrite)
+ }
+ if err := tw.WriteHeader(&Header{Name: "small.txt"}); err == nil {
+ t.Errorf("WriteHeader() = got %v, want non-nil error", err)
+ }
+ if _, err := tw.Write(nil); err == nil {
+ t.Errorf("Write() = %v, want non-nil error", err)
+ }
+ if err := tw.Flush(); err == nil {
+ t.Errorf("Flush() = %v, want non-nil error", err)
+ }
+ if err := tw.Close(); err == nil {
+ t.Errorf("Close() = %v, want non-nil error", err)
+ }
+ })
}
func TestSplitUSTARPath(t *testing.T) {
@@ -631,7 +983,7 @@ func TestIssue12594(t *testing.T) {
if i := strings.IndexByte(prefix, 0); i >= 0 {
prefix = prefix[:i] // Truncate at the NUL terminator
}
- if blk.GetFormat() == formatGNU && len(prefix) > 0 && strings.HasPrefix(name, prefix) {
+ if blk.GetFormat() == FormatGNU && len(prefix) > 0 && strings.HasPrefix(name, prefix) {
t.Errorf("test %d, found prefix in GNU format: %s", i, prefix)
}
@@ -645,3 +997,306 @@ func TestIssue12594(t *testing.T) {
}
}
}
+
+// testNonEmptyWriter wraps an io.Writer and ensures that
+// Write is never called with an empty buffer.
+type testNonEmptyWriter struct{ io.Writer }
+
+func (w testNonEmptyWriter) Write(b []byte) (int, error) {
+ if len(b) == 0 {
+ return 0, errors.New("unexpected empty Write call")
+ }
+ return w.Writer.Write(b)
+}
+
+func TestFileWriter(t *testing.T) {
+ type (
+ testWrite struct { // Write(str) == (wantCnt, wantErr)
+ str string
+ wantCnt int
+ wantErr error
+ }
+ testReadFrom struct { // ReadFrom(testFile{ops}) == (wantCnt, wantErr)
+ ops fileOps
+ wantCnt int64
+ wantErr error
+ }
+ testRemaining struct { // LogicalRemaining() == wantLCnt, PhysicalRemaining() == wantPCnt
+ wantLCnt int64
+ wantPCnt int64
+ }
+ testFnc interface{} // testWrite | testReadFrom | testRemaining
+ )
+
+ type (
+ makeReg struct {
+ size int64
+ wantStr string
+ }
+ makeSparse struct {
+ makeReg makeReg
+ sph sparseHoles
+ size int64
+ }
+ fileMaker interface{} // makeReg | makeSparse
+ )
+
+ vectors := []struct {
+ maker fileMaker
+ tests []testFnc
+ }{{
+ maker: makeReg{0, ""},
+ tests: []testFnc{
+ testRemaining{0, 0},
+ testWrite{"", 0, nil},
+ testWrite{"a", 0, ErrWriteTooLong},
+ testReadFrom{fileOps{""}, 0, nil},
+ testReadFrom{fileOps{"a"}, 0, ErrWriteTooLong},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeReg{1, "a"},
+ tests: []testFnc{
+ testRemaining{1, 1},
+ testWrite{"", 0, nil},
+ testWrite{"a", 1, nil},
+ testWrite{"bcde", 0, ErrWriteTooLong},
+ testWrite{"", 0, nil},
+ testReadFrom{fileOps{""}, 0, nil},
+ testReadFrom{fileOps{"a"}, 0, ErrWriteTooLong},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeReg{5, "hello"},
+ tests: []testFnc{
+ testRemaining{5, 5},
+ testWrite{"hello", 5, nil},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeReg{5, "\x00\x00\x00\x00\x00"},
+ tests: []testFnc{
+ testRemaining{5, 5},
+ testReadFrom{fileOps{"\x00\x00\x00\x00\x00"}, 5, nil},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeReg{5, "\x00\x00\x00\x00\x00"},
+ tests: []testFnc{
+ testRemaining{5, 5},
+ testReadFrom{fileOps{"\x00\x00\x00\x00\x00extra"}, 5, ErrWriteTooLong},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeReg{5, "abc\x00\x00"},
+ tests: []testFnc{
+ testRemaining{5, 5},
+ testWrite{"abc", 3, nil},
+ testRemaining{2, 2},
+ testReadFrom{fileOps{"\x00\x00"}, 2, nil},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeReg{5, "\x00\x00abc"},
+ tests: []testFnc{
+ testRemaining{5, 5},
+ testWrite{"\x00\x00", 2, nil},
+ testRemaining{3, 3},
+ testWrite{"abc", 3, nil},
+ testReadFrom{fileOps{"z"}, 0, ErrWriteTooLong},
+ testWrite{"z", 0, ErrWriteTooLong},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8},
+ tests: []testFnc{
+ testRemaining{8, 5},
+ testWrite{"ab\x00\x00\x00cde", 8, nil},
+ testWrite{"a", 0, ErrWriteTooLong},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8},
+ tests: []testFnc{
+ testWrite{"ab\x00\x00\x00cdez", 8, ErrWriteTooLong},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8},
+ tests: []testFnc{
+ testWrite{"ab\x00", 3, nil},
+ testRemaining{5, 3},
+ testWrite{"\x00\x00cde", 5, nil},
+ testWrite{"a", 0, ErrWriteTooLong},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8},
+ tests: []testFnc{
+ testWrite{"ab", 2, nil},
+ testRemaining{6, 3},
+ testReadFrom{fileOps{int64(3), "cde"}, 6, nil},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8},
+ tests: []testFnc{
+ testReadFrom{fileOps{"ab", int64(3), "cde"}, 8, nil},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8},
+ tests: []testFnc{
+ testReadFrom{fileOps{"ab", int64(3), "cdeX"}, 8, ErrWriteTooLong},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{4, "abcd"}, sparseHoles{{2, 3}}, 8},
+ tests: []testFnc{
+ testReadFrom{fileOps{"ab", int64(3), "cd"}, 7, io.ErrUnexpectedEOF},
+ testRemaining{1, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{4, "abcd"}, sparseHoles{{2, 3}}, 8},
+ tests: []testFnc{
+ testReadFrom{fileOps{"ab", int64(3), "cde"}, 7, errMissData},
+ testRemaining{1, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{6, "abcde"}, sparseHoles{{2, 3}}, 8},
+ tests: []testFnc{
+ testReadFrom{fileOps{"ab", int64(3), "cde"}, 8, errUnrefData},
+ testRemaining{0, 1},
+ },
+ }, {
+ maker: makeSparse{makeReg{4, "abcd"}, sparseHoles{{2, 3}}, 8},
+ tests: []testFnc{
+ testWrite{"ab", 2, nil},
+ testRemaining{6, 2},
+ testWrite{"\x00\x00\x00", 3, nil},
+ testRemaining{3, 2},
+ testWrite{"cde", 2, errMissData},
+ testRemaining{1, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{6, "abcde"}, sparseHoles{{2, 3}}, 8},
+ tests: []testFnc{
+ testWrite{"ab", 2, nil},
+ testRemaining{6, 4},
+ testWrite{"\x00\x00\x00", 3, nil},
+ testRemaining{3, 4},
+ testWrite{"cde", 3, errUnrefData},
+ testRemaining{0, 1},
+ },
+ }, {
+ maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7},
+ tests: []testFnc{
+ testRemaining{7, 3},
+ testWrite{"\x00\x00abc\x00\x00", 7, nil},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7},
+ tests: []testFnc{
+ testRemaining{7, 3},
+ testReadFrom{fileOps{int64(2), "abc", int64(1), "\x00"}, 7, nil},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{3, ""}, sparseHoles{{0, 2}, {5, 2}}, 7},
+ tests: []testFnc{
+ testWrite{"abcdefg", 0, errWriteHole},
+ },
+ }, {
+ maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7},
+ tests: []testFnc{
+ testWrite{"\x00\x00abcde", 5, errWriteHole},
+ },
+ }, {
+ maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7},
+ tests: []testFnc{
+ testWrite{"\x00\x00abc\x00\x00z", 7, ErrWriteTooLong},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7},
+ tests: []testFnc{
+ testWrite{"\x00\x00", 2, nil},
+ testRemaining{5, 3},
+ testWrite{"abc", 3, nil},
+ testRemaining{2, 0},
+ testWrite{"\x00\x00", 2, nil},
+ testRemaining{0, 0},
+ },
+ }, {
+ maker: makeSparse{makeReg{2, "ab"}, sparseHoles{{0, 2}, {5, 2}}, 7},
+ tests: []testFnc{
+ testWrite{"\x00\x00", 2, nil},
+ testWrite{"abc", 2, errMissData},
+ testWrite{"\x00\x00", 0, errMissData},
+ },
+ }, {
+ maker: makeSparse{makeReg{4, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7},
+ tests: []testFnc{
+ testWrite{"\x00\x00", 2, nil},
+ testWrite{"abc", 3, nil},
+ testWrite{"\x00\x00", 2, errUnrefData},
+ },
+ }}
+
+ for i, v := range vectors {
+ var wantStr string
+ bb := new(bytes.Buffer)
+ w := testNonEmptyWriter{bb}
+ var fw fileWriter
+ switch maker := v.maker.(type) {
+ case makeReg:
+ fw = ®FileWriter{w, maker.size}
+ wantStr = maker.wantStr
+ case makeSparse:
+ if !validateSparseEntries(maker.sph, maker.size) {
+ t.Fatalf("invalid sparse map: %v", maker.sph)
+ }
+ spd := invertSparseEntries(maker.sph, maker.size)
+ fw = ®FileWriter{w, maker.makeReg.size}
+ fw = &sparseFileWriter{fw, spd, 0}
+ wantStr = maker.makeReg.wantStr
+ default:
+ t.Fatalf("test %d, unknown make operation: %T", i, maker)
+ }
+
+ for j, tf := range v.tests {
+ switch tf := tf.(type) {
+ case testWrite:
+ got, err := fw.Write([]byte(tf.str))
+ if got != tf.wantCnt || err != tf.wantErr {
+ t.Errorf("test %d.%d, Write(%s):\ngot (%d, %v)\nwant (%d, %v)", i, j, tf.str, got, err, tf.wantCnt, tf.wantErr)
+ }
+ case testReadFrom:
+ f := &testFile{ops: tf.ops}
+ got, err := fw.ReadFrom(f)
+ if _, ok := err.(testError); ok {
+ t.Errorf("test %d.%d, ReadFrom(): %v", i, j, err)
+ } else if got != tf.wantCnt || err != tf.wantErr {
+ t.Errorf("test %d.%d, ReadFrom() = (%d, %v), want (%d, %v)", i, j, got, err, tf.wantCnt, tf.wantErr)
+ }
+ if len(f.ops) > 0 {
+ t.Errorf("test %d.%d, expected %d more operations", i, j, len(f.ops))
+ }
+ case testRemaining:
+ if got := fw.LogicalRemaining(); got != tf.wantLCnt {
+ t.Errorf("test %d.%d, LogicalRemaining() = %d, want %d", i, j, got, tf.wantLCnt)
+ }
+ if got := fw.PhysicalRemaining(); got != tf.wantPCnt {
+ t.Errorf("test %d.%d, PhysicalRemaining() = %d, want %d", i, j, got, tf.wantPCnt)
+ }
+ default:
+ t.Fatalf("test %d.%d, unknown test operation: %T", i, j, tf)
+ }
+ }
+
+ if got := bb.String(); got != wantStr {
+ t.Fatalf("test %d, String() = %q, want %q", i, got, wantStr)
+ }
+ }
+}
diff --git a/src/archive/zip/reader.go b/src/archive/zip/reader.go
index f6c3ead3bea..1563e74dfce 100644
--- a/src/archive/zip/reader.go
+++ b/src/archive/zip/reader.go
@@ -13,6 +13,7 @@ import (
"hash/crc32"
"io"
"os"
+ "time"
)
var (
@@ -94,7 +95,7 @@ func (z *Reader) init(r io.ReaderAt, size int64) error {
// The count of files inside a zip is truncated to fit in a uint16.
// Gloss over this by reading headers until we encounter
- // a bad one, and then only report a ErrFormat or UnexpectedEOF if
+ // a bad one, and then only report an ErrFormat or UnexpectedEOF if
// the file count modulo 65536 is incorrect.
for {
f := &File{zip: z, zipr: r, zipsize: size}
@@ -280,52 +281,128 @@ func readDirectoryHeader(f *File, r io.Reader) error {
f.Extra = d[filenameLen : filenameLen+extraLen]
f.Comment = string(d[filenameLen+extraLen:])
+ // Determine the character encoding.
+ utf8Valid1, utf8Require1 := detectUTF8(f.Name)
+ utf8Valid2, utf8Require2 := detectUTF8(f.Comment)
+ switch {
+ case !utf8Valid1 || !utf8Valid2:
+ // Name and Comment definitely not UTF-8.
+ f.NonUTF8 = true
+ case !utf8Require1 && !utf8Require2:
+ // Name and Comment use only single-byte runes that overlap with UTF-8.
+ f.NonUTF8 = false
+ default:
+ // Might be UTF-8, might be some other encoding; preserve existing flag.
+ // Some ZIP writers use UTF-8 encoding without setting the UTF-8 flag.
+ // Since it is impossible to always distinguish valid UTF-8 from some
+ // other encoding (e.g., GBK or Shift-JIS), we trust the flag.
+ f.NonUTF8 = f.Flags&0x800 == 0
+ }
+
needUSize := f.UncompressedSize == ^uint32(0)
needCSize := f.CompressedSize == ^uint32(0)
needHeaderOffset := f.headerOffset == int64(^uint32(0))
- if len(f.Extra) > 0 {
- // Best effort to find what we need.
- // Other zip authors might not even follow the basic format,
- // and we'll just ignore the Extra content in that case.
- b := readBuf(f.Extra)
- for len(b) >= 4 { // need at least tag and size
- tag := b.uint16()
- size := b.uint16()
- if int(size) > len(b) {
- break
- }
- if tag == zip64ExtraId {
- // update directory values from the zip64 extra block.
- // They should only be consulted if the sizes read earlier
- // are maxed out.
- // See golang.org/issue/13367.
- eb := readBuf(b[:size])
+ // Best effort to find what we need.
+ // Other zip authors might not even follow the basic format,
+ // and we'll just ignore the Extra content in that case.
+ var modified time.Time
+parseExtras:
+ for extra := readBuf(f.Extra); len(extra) >= 4; { // need at least tag and size
+ fieldTag := extra.uint16()
+ fieldSize := int(extra.uint16())
+ if len(extra) < fieldSize {
+ break
+ }
+ fieldBuf := extra.sub(fieldSize)
- if needUSize {
- needUSize = false
- if len(eb) < 8 {
- return ErrFormat
- }
- f.UncompressedSize64 = eb.uint64()
+ switch fieldTag {
+ case zip64ExtraID:
+ // update directory values from the zip64 extra block.
+ // They should only be consulted if the sizes read earlier
+ // are maxed out.
+ // See golang.org/issue/13367.
+ if needUSize {
+ needUSize = false
+ if len(fieldBuf) < 8 {
+ return ErrFormat
}
- if needCSize {
- needCSize = false
- if len(eb) < 8 {
- return ErrFormat
- }
- f.CompressedSize64 = eb.uint64()
- }
- if needHeaderOffset {
- needHeaderOffset = false
- if len(eb) < 8 {
- return ErrFormat
- }
- f.headerOffset = int64(eb.uint64())
- }
- break
+ f.UncompressedSize64 = fieldBuf.uint64()
}
- b = b[size:]
+ if needCSize {
+ needCSize = false
+ if len(fieldBuf) < 8 {
+ return ErrFormat
+ }
+ f.CompressedSize64 = fieldBuf.uint64()
+ }
+ if needHeaderOffset {
+ needHeaderOffset = false
+ if len(fieldBuf) < 8 {
+ return ErrFormat
+ }
+ f.headerOffset = int64(fieldBuf.uint64())
+ }
+ case ntfsExtraID:
+ if len(fieldBuf) < 4 {
+ continue parseExtras
+ }
+ fieldBuf.uint32() // reserved (ignored)
+ for len(fieldBuf) >= 4 { // need at least tag and size
+ attrTag := fieldBuf.uint16()
+ attrSize := int(fieldBuf.uint16())
+ if len(fieldBuf) < attrSize {
+ continue parseExtras
+ }
+ attrBuf := fieldBuf.sub(attrSize)
+ if attrTag != 1 || attrSize != 24 {
+ continue // Ignore irrelevant attributes
+ }
+
+ const ticksPerSecond = 1e7 // Windows timestamp resolution
+ ts := int64(attrBuf.uint64()) // ModTime since Windows epoch
+ secs := int64(ts / ticksPerSecond)
+ nsecs := (1e9 / ticksPerSecond) * int64(ts%ticksPerSecond)
+ epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
+ modified = time.Unix(epoch.Unix()+secs, nsecs)
+ }
+ case unixExtraID:
+ if len(fieldBuf) < 8 {
+ continue parseExtras
+ }
+ fieldBuf.uint32() // AcTime (ignored)
+ ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
+ modified = time.Unix(ts, 0)
+ case extTimeExtraID:
+ if len(fieldBuf) < 5 || fieldBuf.uint8()&1 == 0 {
+ continue parseExtras
+ }
+ ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
+ modified = time.Unix(ts, 0)
+ case infoZipUnixExtraID:
+ if len(fieldBuf) < 4 {
+ continue parseExtras
+ }
+ ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
+ modified = time.Unix(ts, 0)
+ }
+ }
+
+ msdosModified := msDosTimeToTime(f.ModifiedDate, f.ModifiedTime)
+ f.Modified = msdosModified
+ if !modified.IsZero() {
+ f.Modified = modified.UTC()
+
+ // If legacy MS-DOS timestamps are set, we can use the delta between
+ // the legacy and extended versions to estimate timezone offset.
+ //
+ // A non-UTC timezone is always used (even if offset is zero).
+ // Thus, FileHeader.Modified.Location() == time.UTC is useful for
+ // determining whether extended timestamps are present.
+ // This is necessary for users that need to do additional time
+ // calculations when dealing with legacy ZIP formats.
+ if f.ModifiedTime != 0 || f.ModifiedDate != 0 {
+ f.Modified = modified.In(timeZone(msdosModified.Sub(modified)))
}
}
@@ -508,6 +585,12 @@ func findSignatureInBlock(b []byte) int {
type readBuf []byte
+func (b *readBuf) uint8() uint8 {
+ v := (*b)[0]
+ *b = (*b)[1:]
+ return v
+}
+
func (b *readBuf) uint16() uint16 {
v := binary.LittleEndian.Uint16(*b)
*b = (*b)[2:]
@@ -525,3 +608,9 @@ func (b *readBuf) uint64() uint64 {
*b = (*b)[8:]
return v
}
+
+func (b *readBuf) sub(n int) readBuf {
+ b2 := (*b)[:n]
+ *b = (*b)[n:]
+ return b2
+}
diff --git a/src/archive/zip/reader_test.go b/src/archive/zip/reader_test.go
index dfaae784361..0d9040f7674 100644
--- a/src/archive/zip/reader_test.go
+++ b/src/archive/zip/reader_test.go
@@ -27,9 +27,11 @@ type ZipTest struct {
}
type ZipTestFile struct {
- Name string
- Mode os.FileMode
- Mtime string // optional, modified time in format "mm-dd-yy hh:mm:ss"
+ Name string
+ Mode os.FileMode
+ NonUTF8 bool
+ ModTime time.Time
+ Modified time.Time
// Information describing expected zip file content.
// First, reading the entire content should produce the error ContentErr.
@@ -47,32 +49,22 @@ type ZipTestFile struct {
Size uint64
}
-// Caution: The Mtime values found for the test files should correspond to
-// the values listed with unzip -l . However, the values
-// listed by unzip appear to be off by some hours. When creating
-// fresh test files and testing them, this issue is not present.
-// The test files were created in Sydney, so there might be a time
-// zone issue. The time zone information does have to be encoded
-// somewhere, because otherwise unzip -l could not provide a different
-// time from what the archive/zip package provides, but there appears
-// to be no documentation about this.
-
var tests = []ZipTest{
{
Name: "test.zip",
Comment: "This is a zipfile comment.",
File: []ZipTestFile{
{
- Name: "test.txt",
- Content: []byte("This is a test text file.\n"),
- Mtime: "09-05-10 12:12:02",
- Mode: 0644,
+ Name: "test.txt",
+ Content: []byte("This is a test text file.\n"),
+ Modified: time.Date(2010, 9, 5, 12, 12, 1, 0, timeZone(+10*time.Hour)),
+ Mode: 0644,
},
{
- Name: "gophercolor16x16.png",
- File: "gophercolor16x16.png",
- Mtime: "09-05-10 15:52:58",
- Mode: 0644,
+ Name: "gophercolor16x16.png",
+ File: "gophercolor16x16.png",
+ Modified: time.Date(2010, 9, 5, 15, 52, 58, 0, timeZone(+10*time.Hour)),
+ Mode: 0644,
},
},
},
@@ -81,16 +73,16 @@ var tests = []ZipTest{
Comment: "This is a zipfile comment.",
File: []ZipTestFile{
{
- Name: "test.txt",
- Content: []byte("This is a test text file.\n"),
- Mtime: "09-05-10 12:12:02",
- Mode: 0644,
+ Name: "test.txt",
+ Content: []byte("This is a test text file.\n"),
+ Modified: time.Date(2010, 9, 5, 12, 12, 1, 0, timeZone(+10*time.Hour)),
+ Mode: 0644,
},
{
- Name: "gophercolor16x16.png",
- File: "gophercolor16x16.png",
- Mtime: "09-05-10 15:52:58",
- Mode: 0644,
+ Name: "gophercolor16x16.png",
+ File: "gophercolor16x16.png",
+ Modified: time.Date(2010, 9, 5, 15, 52, 58, 0, timeZone(+10*time.Hour)),
+ Mode: 0644,
},
},
},
@@ -99,10 +91,10 @@ var tests = []ZipTest{
Source: returnRecursiveZip,
File: []ZipTestFile{
{
- Name: "r/r.zip",
- Content: rZipBytes(),
- Mtime: "03-04-10 00:24:16",
- Mode: 0666,
+ Name: "r/r.zip",
+ Content: rZipBytes(),
+ Modified: time.Date(2010, 3, 4, 0, 24, 16, 0, time.UTC),
+ Mode: 0666,
},
},
},
@@ -110,9 +102,10 @@ var tests = []ZipTest{
Name: "symlink.zip",
File: []ZipTestFile{
{
- Name: "symlink",
- Content: []byte("../target"),
- Mode: 0777 | os.ModeSymlink,
+ Name: "symlink",
+ Content: []byte("../target"),
+ Modified: time.Date(2012, 2, 3, 19, 56, 48, 0, timeZone(-2*time.Hour)),
+ Mode: 0777 | os.ModeSymlink,
},
},
},
@@ -127,22 +120,72 @@ var tests = []ZipTest{
Name: "dd.zip",
File: []ZipTestFile{
{
- Name: "filename",
- Content: []byte("This is a test textfile.\n"),
- Mtime: "02-02-11 13:06:20",
- Mode: 0666,
+ Name: "filename",
+ Content: []byte("This is a test textfile.\n"),
+ Modified: time.Date(2011, 2, 2, 13, 6, 20, 0, time.UTC),
+ Mode: 0666,
},
},
},
{
// created in windows XP file manager.
Name: "winxp.zip",
- File: crossPlatform,
+ File: []ZipTestFile{
+ {
+ Name: "hello",
+ Content: []byte("world \r\n"),
+ Modified: time.Date(2011, 12, 8, 10, 4, 24, 0, time.UTC),
+ Mode: 0666,
+ },
+ {
+ Name: "dir/bar",
+ Content: []byte("foo \r\n"),
+ Modified: time.Date(2011, 12, 8, 10, 4, 50, 0, time.UTC),
+ Mode: 0666,
+ },
+ {
+ Name: "dir/empty/",
+ Content: []byte{},
+ Modified: time.Date(2011, 12, 8, 10, 8, 6, 0, time.UTC),
+ Mode: os.ModeDir | 0777,
+ },
+ {
+ Name: "readonly",
+ Content: []byte("important \r\n"),
+ Modified: time.Date(2011, 12, 8, 10, 6, 8, 0, time.UTC),
+ Mode: 0444,
+ },
+ },
},
{
// created by Zip 3.0 under Linux
Name: "unix.zip",
- File: crossPlatform,
+ File: []ZipTestFile{
+ {
+ Name: "hello",
+ Content: []byte("world \r\n"),
+ Modified: time.Date(2011, 12, 8, 10, 4, 24, 0, timeZone(0)),
+ Mode: 0666,
+ },
+ {
+ Name: "dir/bar",
+ Content: []byte("foo \r\n"),
+ Modified: time.Date(2011, 12, 8, 10, 4, 50, 0, timeZone(0)),
+ Mode: 0666,
+ },
+ {
+ Name: "dir/empty/",
+ Content: []byte{},
+ Modified: time.Date(2011, 12, 8, 10, 8, 6, 0, timeZone(0)),
+ Mode: os.ModeDir | 0777,
+ },
+ {
+ Name: "readonly",
+ Content: []byte("important \r\n"),
+ Modified: time.Date(2011, 12, 8, 10, 6, 8, 0, timeZone(0)),
+ Mode: 0444,
+ },
+ },
},
{
// created by Go, before we wrote the "optional" data
@@ -150,16 +193,16 @@ var tests = []ZipTest{
Name: "go-no-datadesc-sig.zip",
File: []ZipTestFile{
{
- Name: "foo.txt",
- Content: []byte("foo\n"),
- Mtime: "03-08-12 16:59:10",
- Mode: 0644,
+ Name: "foo.txt",
+ Content: []byte("foo\n"),
+ Modified: time.Date(2012, 3, 8, 16, 59, 10, 0, timeZone(-8*time.Hour)),
+ Mode: 0644,
},
{
- Name: "bar.txt",
- Content: []byte("bar\n"),
- Mtime: "03-08-12 16:59:12",
- Mode: 0644,
+ Name: "bar.txt",
+ Content: []byte("bar\n"),
+ Modified: time.Date(2012, 3, 8, 16, 59, 12, 0, timeZone(-8*time.Hour)),
+ Mode: 0644,
},
},
},
@@ -169,14 +212,16 @@ var tests = []ZipTest{
Name: "go-with-datadesc-sig.zip",
File: []ZipTestFile{
{
- Name: "foo.txt",
- Content: []byte("foo\n"),
- Mode: 0666,
+ Name: "foo.txt",
+ Content: []byte("foo\n"),
+ Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
+ Mode: 0666,
},
{
- Name: "bar.txt",
- Content: []byte("bar\n"),
- Mode: 0666,
+ Name: "bar.txt",
+ Content: []byte("bar\n"),
+ Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
+ Mode: 0666,
},
},
},
@@ -187,13 +232,15 @@ var tests = []ZipTest{
{
Name: "foo.txt",
Content: []byte("foo\n"),
+ Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
Mode: 0666,
ContentErr: ErrChecksum,
},
{
- Name: "bar.txt",
- Content: []byte("bar\n"),
- Mode: 0666,
+ Name: "bar.txt",
+ Content: []byte("bar\n"),
+ Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
+ Mode: 0666,
},
},
},
@@ -203,16 +250,16 @@ var tests = []ZipTest{
Name: "crc32-not-streamed.zip",
File: []ZipTestFile{
{
- Name: "foo.txt",
- Content: []byte("foo\n"),
- Mtime: "03-08-12 16:59:10",
- Mode: 0644,
+ Name: "foo.txt",
+ Content: []byte("foo\n"),
+ Modified: time.Date(2012, 3, 8, 16, 59, 10, 0, timeZone(-8*time.Hour)),
+ Mode: 0644,
},
{
- Name: "bar.txt",
- Content: []byte("bar\n"),
- Mtime: "03-08-12 16:59:12",
- Mode: 0644,
+ Name: "bar.txt",
+ Content: []byte("bar\n"),
+ Modified: time.Date(2012, 3, 8, 16, 59, 12, 0, timeZone(-8*time.Hour)),
+ Mode: 0644,
},
},
},
@@ -225,15 +272,15 @@ var tests = []ZipTest{
{
Name: "foo.txt",
Content: []byte("foo\n"),
- Mtime: "03-08-12 16:59:10",
+ Modified: time.Date(2012, 3, 8, 16, 59, 10, 0, timeZone(-8*time.Hour)),
Mode: 0644,
ContentErr: ErrChecksum,
},
{
- Name: "bar.txt",
- Content: []byte("bar\n"),
- Mtime: "03-08-12 16:59:12",
- Mode: 0644,
+ Name: "bar.txt",
+ Content: []byte("bar\n"),
+ Modified: time.Date(2012, 3, 8, 16, 59, 12, 0, timeZone(-8*time.Hour)),
+ Mode: 0644,
},
},
},
@@ -241,10 +288,10 @@ var tests = []ZipTest{
Name: "zip64.zip",
File: []ZipTestFile{
{
- Name: "README",
- Content: []byte("This small file is in ZIP64 format.\n"),
- Mtime: "08-10-12 14:33:32",
- Mode: 0644,
+ Name: "README",
+ Content: []byte("This small file is in ZIP64 format.\n"),
+ Modified: time.Date(2012, 8, 10, 14, 33, 32, 0, time.UTC),
+ Mode: 0644,
},
},
},
@@ -253,10 +300,10 @@ var tests = []ZipTest{
Name: "zip64-2.zip",
File: []ZipTestFile{
{
- Name: "README",
- Content: []byte("This small file is in ZIP64 format.\n"),
- Mtime: "08-10-12 14:33:32",
- Mode: 0644,
+ Name: "README",
+ Content: []byte("This small file is in ZIP64 format.\n"),
+ Modified: time.Date(2012, 8, 10, 14, 33, 32, 0, timeZone(-4*time.Hour)),
+ Mode: 0644,
},
},
},
@@ -266,41 +313,179 @@ var tests = []ZipTest{
Source: returnBigZipBytes,
File: []ZipTestFile{
{
- Name: "big.file",
- Content: nil,
- Size: 1<<32 - 1,
- Mode: 0666,
+ Name: "big.file",
+ Content: nil,
+ Size: 1<<32 - 1,
+ Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
+ Mode: 0666,
+ },
+ },
+ },
+ {
+ Name: "utf8-7zip.zip",
+ File: []ZipTestFile{
+ {
+ Name: "世界",
+ Content: []byte{},
+ Mode: 0666,
+ Modified: time.Date(2017, 11, 6, 13, 9, 27, 867862500, timeZone(-8*time.Hour)),
+ },
+ },
+ },
+ {
+ Name: "utf8-infozip.zip",
+ File: []ZipTestFile{
+ {
+ Name: "世界",
+ Content: []byte{},
+ Mode: 0644,
+ // Name is valid UTF-8, but format does not have UTF-8 flag set.
+ // We don't do UTF-8 detection for multi-byte runes due to
+ // false-positives with other encodings (e.g., Shift-JIS).
+ // Format says encoding is not UTF-8, so we trust it.
+ NonUTF8: true,
+ Modified: time.Date(2017, 11, 6, 13, 9, 27, 0, timeZone(-8*time.Hour)),
+ },
+ },
+ },
+ {
+ Name: "utf8-osx.zip",
+ File: []ZipTestFile{
+ {
+ Name: "世界",
+ Content: []byte{},
+ Mode: 0644,
+ // Name is valid UTF-8, but format does not have UTF-8 set.
+ NonUTF8: true,
+ Modified: time.Date(2017, 11, 6, 13, 9, 27, 0, timeZone(-8*time.Hour)),
+ },
+ },
+ },
+ {
+ Name: "utf8-winrar.zip",
+ File: []ZipTestFile{
+ {
+ Name: "世界",
+ Content: []byte{},
+ Mode: 0666,
+ Modified: time.Date(2017, 11, 6, 13, 9, 27, 867862500, timeZone(-8*time.Hour)),
+ },
+ },
+ },
+ {
+ Name: "utf8-winzip.zip",
+ File: []ZipTestFile{
+ {
+ Name: "世界",
+ Content: []byte{},
+ Mode: 0666,
+ Modified: time.Date(2017, 11, 6, 13, 9, 27, 867000000, timeZone(-8*time.Hour)),
+ },
+ },
+ },
+ {
+ Name: "time-7zip.zip",
+ File: []ZipTestFile{
+ {
+ Name: "test.txt",
+ Content: []byte{},
+ Size: 1<<32 - 1,
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 244817900, timeZone(-7*time.Hour)),
+ Mode: 0666,
+ },
+ },
+ },
+ {
+ Name: "time-infozip.zip",
+ File: []ZipTestFile{
+ {
+ Name: "test.txt",
+ Content: []byte{},
+ Size: 1<<32 - 1,
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
+ Mode: 0644,
+ },
+ },
+ },
+ {
+ Name: "time-osx.zip",
+ File: []ZipTestFile{
+ {
+ Name: "test.txt",
+ Content: []byte{},
+ Size: 1<<32 - 1,
+ Modified: time.Date(2017, 10, 31, 21, 17, 27, 0, timeZone(-7*time.Hour)),
+ Mode: 0644,
+ },
+ },
+ },
+ {
+ Name: "time-win7.zip",
+ File: []ZipTestFile{
+ {
+ Name: "test.txt",
+ Content: []byte{},
+ Size: 1<<32 - 1,
+ Modified: time.Date(2017, 10, 31, 21, 11, 58, 0, time.UTC),
+ Mode: 0666,
+ },
+ },
+ },
+ {
+ Name: "time-winrar.zip",
+ File: []ZipTestFile{
+ {
+ Name: "test.txt",
+ Content: []byte{},
+ Size: 1<<32 - 1,
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 244817900, timeZone(-7*time.Hour)),
+ Mode: 0666,
+ },
+ },
+ },
+ {
+ Name: "time-winzip.zip",
+ File: []ZipTestFile{
+ {
+ Name: "test.txt",
+ Content: []byte{},
+ Size: 1<<32 - 1,
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 244000000, timeZone(-7*time.Hour)),
+ Mode: 0666,
+ },
+ },
+ },
+ {
+ Name: "time-go.zip",
+ File: []ZipTestFile{
+ {
+ Name: "test.txt",
+ Content: []byte{},
+ Size: 1<<32 - 1,
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
+ Mode: 0666,
+ },
+ },
+ },
+ {
+ Name: "time-22738.zip",
+ File: []ZipTestFile{
+ {
+ Name: "file",
+ Content: []byte{},
+ Mode: 0666,
+ Modified: time.Date(1999, 12, 31, 19, 0, 0, 0, timeZone(-5*time.Hour)),
+ ModTime: time.Date(1999, 12, 31, 19, 0, 0, 0, time.UTC),
},
},
},
}
-var crossPlatform = []ZipTestFile{
- {
- Name: "hello",
- Content: []byte("world \r\n"),
- Mode: 0666,
- },
- {
- Name: "dir/bar",
- Content: []byte("foo \r\n"),
- Mode: 0666,
- },
- {
- Name: "dir/empty/",
- Content: []byte{},
- Mode: os.ModeDir | 0777,
- },
- {
- Name: "readonly",
- Content: []byte("important \r\n"),
- Mode: 0444,
- },
-}
-
func TestReader(t *testing.T) {
for _, zt := range tests {
- readTestZip(t, zt)
+ t.Run(zt.Name, func(t *testing.T) {
+ readTestZip(t, zt)
+ })
}
}
@@ -319,7 +504,7 @@ func readTestZip(t *testing.T, zt ZipTest) {
}
}
if err != zt.Error {
- t.Errorf("%s: error=%v, want %v", zt.Name, err, zt.Error)
+ t.Errorf("error=%v, want %v", err, zt.Error)
return
}
@@ -335,16 +520,19 @@ func readTestZip(t *testing.T, zt ZipTest) {
}
if z.Comment != zt.Comment {
- t.Errorf("%s: comment=%q, want %q", zt.Name, z.Comment, zt.Comment)
+ t.Errorf("comment=%q, want %q", z.Comment, zt.Comment)
}
if len(z.File) != len(zt.File) {
- t.Fatalf("%s: file count=%d, want %d", zt.Name, len(z.File), len(zt.File))
+ t.Fatalf("file count=%d, want %d", len(z.File), len(zt.File))
}
// test read of each file
for i, ft := range zt.File {
readTestFile(t, zt, ft, z.File[i])
}
+ if t.Failed() {
+ return
+ }
// test simultaneous reads
n := 0
@@ -363,23 +551,24 @@ func readTestZip(t *testing.T, zt ZipTest) {
}
}
+func equalTimeAndZone(t1, t2 time.Time) bool {
+ name1, offset1 := t1.Zone()
+ name2, offset2 := t2.Zone()
+ return t1.Equal(t2) && name1 == name2 && offset1 == offset2
+}
+
func readTestFile(t *testing.T, zt ZipTest, ft ZipTestFile, f *File) {
if f.Name != ft.Name {
- t.Errorf("%s: name=%q, want %q", zt.Name, f.Name, ft.Name)
+ t.Errorf("name=%q, want %q", f.Name, ft.Name)
+ }
+ if !ft.Modified.IsZero() && !equalTimeAndZone(f.Modified, ft.Modified) {
+ t.Errorf("%s: Modified=%s, want %s", f.Name, f.Modified, ft.Modified)
+ }
+ if !ft.ModTime.IsZero() && !equalTimeAndZone(f.ModTime(), ft.ModTime) {
+ t.Errorf("%s: ModTime=%s, want %s", f.Name, f.ModTime(), ft.ModTime)
}
- if ft.Mtime != "" {
- mtime, err := time.Parse("01-02-06 15:04:05", ft.Mtime)
- if err != nil {
- t.Error(err)
- return
- }
- if ft := f.ModTime(); !ft.Equal(mtime) {
- t.Errorf("%s: %s: mtime=%s, want %s", zt.Name, f.Name, ft, mtime)
- }
- }
-
- testFileMode(t, zt.Name, f, ft.Mode)
+ testFileMode(t, f, ft.Mode)
size := uint64(f.UncompressedSize)
if size == uint32max {
@@ -390,7 +579,7 @@ func readTestFile(t *testing.T, zt ZipTest, ft ZipTestFile, f *File) {
r, err := f.Open()
if err != nil {
- t.Errorf("%s: %v", zt.Name, err)
+ t.Errorf("%v", err)
return
}
@@ -408,7 +597,7 @@ func readTestFile(t *testing.T, zt ZipTest, ft ZipTestFile, f *File) {
var b bytes.Buffer
_, err = io.Copy(&b, r)
if err != ft.ContentErr {
- t.Errorf("%s: copying contents: %v (want %v)", zt.Name, err, ft.ContentErr)
+ t.Errorf("copying contents: %v (want %v)", err, ft.ContentErr)
}
if err != nil {
return
@@ -440,12 +629,12 @@ func readTestFile(t *testing.T, zt ZipTest, ft ZipTestFile, f *File) {
}
}
-func testFileMode(t *testing.T, zipName string, f *File, want os.FileMode) {
+func testFileMode(t *testing.T, f *File, want os.FileMode) {
mode := f.Mode()
if want == 0 {
- t.Errorf("%s: %s mode: got %v, want none", zipName, f.Name, mode)
+ t.Errorf("%s mode: got %v, want none", f.Name, mode)
} else if mode != want {
- t.Errorf("%s: %s mode: want %v, got %v", zipName, f.Name, want, mode)
+ t.Errorf("%s mode: want %v, got %v", f.Name, want, mode)
}
}
diff --git a/src/archive/zip/struct.go b/src/archive/zip/struct.go
index 0be210e8e73..f613ebdc344 100644
--- a/src/archive/zip/struct.go
+++ b/src/archive/zip/struct.go
@@ -27,8 +27,8 @@ import (
// Compression methods.
const (
- Store uint16 = 0
- Deflate uint16 = 8
+ Store uint16 = 0 // no compression
+ Deflate uint16 = 8 // DEFLATE compressed
)
const (
@@ -46,40 +46,79 @@ const (
directory64LocLen = 20 //
directory64EndLen = 56 // + extra
- // Constants for the first byte in CreatorVersion
+ // Constants for the first byte in CreatorVersion.
creatorFAT = 0
creatorUnix = 3
creatorNTFS = 11
creatorVFAT = 14
creatorMacOSX = 19
- // version numbers
+ // Version numbers.
zipVersion20 = 20 // 2.0
zipVersion45 = 45 // 4.5 (reads and writes zip64 archives)
- // limits for non zip64 files
+ // Limits for non zip64 files.
uint16max = (1 << 16) - 1
uint32max = (1 << 32) - 1
- // extra header id's
- zip64ExtraId = 0x0001 // zip64 Extended Information Extra Field
+ // Extra header IDs.
+ //
+ // IDs 0..31 are reserved for official use by PKWARE.
+ // IDs above that range are defined by third-party vendors.
+ // Since ZIP lacked high precision timestamps (nor a official specification
+ // of the timezone used for the date fields), many competing extra fields
+ // have been invented. Pervasive use effectively makes them "official".
+ //
+ // See http://mdfs.net/Docs/Comp/Archiving/Zip/ExtraField
+ zip64ExtraID = 0x0001 // Zip64 extended information
+ ntfsExtraID = 0x000a // NTFS
+ unixExtraID = 0x000d // UNIX
+ extTimeExtraID = 0x5455 // Extended timestamp
+ infoZipUnixExtraID = 0x5855 // Info-ZIP Unix extension
)
// FileHeader describes a file within a zip file.
// See the zip spec for details.
type FileHeader struct {
// Name is the name of the file.
- // It must be a relative path: it must not start with a drive
- // letter (e.g. C:) or leading slash, and only forward slashes
- // are allowed.
+ // It must be a relative path, not start with a drive letter (e.g. C:),
+ // and must use forward slashes instead of back slashes.
Name string
- CreatorVersion uint16
- ReaderVersion uint16
- Flags uint16
- Method uint16
- ModifiedTime uint16 // MS-DOS time
- ModifiedDate uint16 // MS-DOS date
+ // Comment is any arbitrary user-defined string shorter than 64KiB.
+ Comment string
+
+ // NonUTF8 indicates that Name and Comment are not encoded in UTF-8.
+ //
+ // By specification, the only other encoding permitted should be CP-437,
+ // but historically many ZIP readers interpret Name and Comment as whatever
+ // the system's local character encoding happens to be.
+ //
+ // This flag should only be set if the user intends to encode a non-portable
+ // ZIP file for a specific localized region. Otherwise, the Writer
+ // automatically sets the ZIP format's UTF-8 flag for valid UTF-8 strings.
+ NonUTF8 bool
+
+ CreatorVersion uint16
+ ReaderVersion uint16
+ Flags uint16
+
+ // Method is the compression method. If zero, Store is used.
+ Method uint16
+
+ // Modified is the modified time of the file.
+ //
+ // When reading, an extended timestamp is preferred over the legacy MS-DOS
+ // date field, and the offset between the times is used as the timezone.
+ // If only the MS-DOS date is present, the timezone is assumed to be UTC.
+ //
+ // When writing, an extended timestamp (which is timezone-agnostic) is
+ // always emitted. The legacy MS-DOS date field is encoded according to the
+ // location of the Modified time.
+ Modified time.Time
+ ModifiedTime uint16 // Deprecated: Legacy MS-DOS date; use Modified instead.
+ ModifiedDate uint16 // Deprecated: Legacy MS-DOS time; use Modified instead.
+
CRC32 uint32
CompressedSize uint32 // Deprecated: Use CompressedSize64 instead.
UncompressedSize uint32 // Deprecated: Use UncompressedSize64 instead.
@@ -87,7 +126,6 @@ type FileHeader struct {
UncompressedSize64 uint64
Extra []byte
ExternalAttrs uint32 // Meaning depends on CreatorVersion
- Comment string
}
// FileInfo returns an os.FileInfo for the FileHeader.
@@ -117,6 +155,8 @@ func (fi headerFileInfo) Sys() interface{} { return fi.fh }
// Because os.FileInfo's Name method returns only the base name of
// the file it describes, it may be necessary to modify the Name field
// of the returned header to provide the full path name of the file.
+// If compression is desired, callers should set the FileHeader.Method
+// field; it is unset by default.
func FileInfoHeader(fi os.FileInfo) (*FileHeader, error) {
size := fi.Size()
fh := &FileHeader{
@@ -144,6 +184,21 @@ type directoryEnd struct {
comment string
}
+// timeZone returns a *time.Location based on the provided offset.
+// If the offset is non-sensible, then this uses an offset of zero.
+func timeZone(offset time.Duration) *time.Location {
+ const (
+ minOffset = -12 * time.Hour // E.g., Baker island at -12:00
+ maxOffset = +14 * time.Hour // E.g., Line island at +14:00
+ offsetAlias = 15 * time.Minute // E.g., Nepal at +5:45
+ )
+ offset = offset.Round(offsetAlias)
+ if offset < minOffset || maxOffset < offset {
+ offset = 0
+ }
+ return time.FixedZone("", int(offset/time.Second))
+}
+
// msDosTimeToTime converts an MS-DOS date and time into a time.Time.
// The resolution is 2s.
// See: http://msdn.microsoft.com/en-us/library/ms724247(v=VS.85).aspx
@@ -168,21 +223,26 @@ func msDosTimeToTime(dosDate, dosTime uint16) time.Time {
// The resolution is 2s.
// See: http://msdn.microsoft.com/en-us/library/ms724274(v=VS.85).aspx
func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) {
- t = t.In(time.UTC)
fDate = uint16(t.Day() + int(t.Month())<<5 + (t.Year()-1980)<<9)
fTime = uint16(t.Second()/2 + t.Minute()<<5 + t.Hour()<<11)
return
}
-// ModTime returns the modification time in UTC.
-// The resolution is 2s.
+// ModTime returns the modification time in UTC using the legacy
+// ModifiedDate and ModifiedTime fields.
+//
+// Deprecated: Use Modified instead.
func (h *FileHeader) ModTime() time.Time {
return msDosTimeToTime(h.ModifiedDate, h.ModifiedTime)
}
-// SetModTime sets the ModifiedTime and ModifiedDate fields to the given time in UTC.
-// The resolution is 2s.
+// SetModTime sets the Modified, ModifiedTime, and ModifiedDate fields
+// to the given time in UTC.
+//
+// Deprecated: Use Modified instead.
func (h *FileHeader) SetModTime(t time.Time) {
+ t = t.UTC() // Convert to UTC for compatibility
+ h.Modified = t
h.ModifiedDate, h.ModifiedTime = timeToMsDosTime(t)
}
diff --git a/src/archive/zip/testdata/time-22738.zip b/src/archive/zip/testdata/time-22738.zip
new file mode 100644
index 00000000000..eb85b57103e
Binary files /dev/null and b/src/archive/zip/testdata/time-22738.zip differ
diff --git a/src/archive/zip/testdata/time-7zip.zip b/src/archive/zip/testdata/time-7zip.zip
new file mode 100644
index 00000000000..4f74819d11d
Binary files /dev/null and b/src/archive/zip/testdata/time-7zip.zip differ
diff --git a/src/archive/zip/testdata/time-go.zip b/src/archive/zip/testdata/time-go.zip
new file mode 100644
index 00000000000..f008805fa42
Binary files /dev/null and b/src/archive/zip/testdata/time-go.zip differ
diff --git a/src/archive/zip/testdata/time-infozip.zip b/src/archive/zip/testdata/time-infozip.zip
new file mode 100644
index 00000000000..8e6394891f0
Binary files /dev/null and b/src/archive/zip/testdata/time-infozip.zip differ
diff --git a/src/archive/zip/testdata/time-osx.zip b/src/archive/zip/testdata/time-osx.zip
new file mode 100644
index 00000000000..e82c5c229e0
Binary files /dev/null and b/src/archive/zip/testdata/time-osx.zip differ
diff --git a/src/archive/zip/testdata/time-win7.zip b/src/archive/zip/testdata/time-win7.zip
new file mode 100644
index 00000000000..8ba222b2246
Binary files /dev/null and b/src/archive/zip/testdata/time-win7.zip differ
diff --git a/src/archive/zip/testdata/time-winrar.zip b/src/archive/zip/testdata/time-winrar.zip
new file mode 100644
index 00000000000..a8a19b0f8e2
Binary files /dev/null and b/src/archive/zip/testdata/time-winrar.zip differ
diff --git a/src/archive/zip/testdata/time-winzip.zip b/src/archive/zip/testdata/time-winzip.zip
new file mode 100644
index 00000000000..f6e8f8ba067
Binary files /dev/null and b/src/archive/zip/testdata/time-winzip.zip differ
diff --git a/src/archive/zip/testdata/utf8-7zip.zip b/src/archive/zip/testdata/utf8-7zip.zip
new file mode 100644
index 00000000000..0e97884559f
Binary files /dev/null and b/src/archive/zip/testdata/utf8-7zip.zip differ
diff --git a/src/archive/zip/testdata/utf8-infozip.zip b/src/archive/zip/testdata/utf8-infozip.zip
new file mode 100644
index 00000000000..25a892646ce
Binary files /dev/null and b/src/archive/zip/testdata/utf8-infozip.zip differ
diff --git a/src/archive/zip/testdata/utf8-osx.zip b/src/archive/zip/testdata/utf8-osx.zip
new file mode 100644
index 00000000000..9b0c058b5b5
Binary files /dev/null and b/src/archive/zip/testdata/utf8-osx.zip differ
diff --git a/src/archive/zip/testdata/utf8-winrar.zip b/src/archive/zip/testdata/utf8-winrar.zip
new file mode 100644
index 00000000000..4bad6c3a5e0
Binary files /dev/null and b/src/archive/zip/testdata/utf8-winrar.zip differ
diff --git a/src/archive/zip/testdata/utf8-winzip.zip b/src/archive/zip/testdata/utf8-winzip.zip
new file mode 100644
index 00000000000..909d52ed2d9
Binary files /dev/null and b/src/archive/zip/testdata/utf8-winzip.zip differ
diff --git a/src/archive/zip/writer.go b/src/archive/zip/writer.go
index 9f4fceee844..14a5ee48c11 100644
--- a/src/archive/zip/writer.go
+++ b/src/archive/zip/writer.go
@@ -14,6 +14,11 @@ import (
"unicode/utf8"
)
+var (
+ errLongName = errors.New("zip: FileHeader.Name too long")
+ errLongExtra = errors.New("zip: FileHeader.Extra too long")
+)
+
// Writer implements a zip file writer.
type Writer struct {
cw *countWriter
@@ -21,6 +26,7 @@ type Writer struct {
last *fileWriter
closed bool
compressors map[uint16]Compressor
+ comment string
// testHookCloseSizeOffset if non-nil is called with the size
// of offset of the central directory at Close.
@@ -54,6 +60,16 @@ func (w *Writer) Flush() error {
return w.cw.w.(*bufio.Writer).Flush()
}
+// SetComment sets the end-of-central-directory comment field.
+// It can only be called before Close.
+func (w *Writer) SetComment(comment string) error {
+ if len(comment) > uint16max {
+ return errors.New("zip: Writer.Comment too long")
+ }
+ w.comment = comment
+ return nil
+}
+
// Close finishes writing the zip file by writing the central directory.
// It does not (and cannot) close the underlying writer.
func (w *Writer) Close() error {
@@ -91,7 +107,7 @@ func (w *Writer) Close() error {
// append a zip64 extra block to Extra
var buf [28]byte // 2x uint16 + 3x uint64
eb := writeBuf(buf[:])
- eb.uint16(zip64ExtraId)
+ eb.uint16(zip64ExtraID)
eb.uint16(24) // size = 3x uint64
eb.uint64(h.UncompressedSize64)
eb.uint64(h.CompressedSize64)
@@ -172,21 +188,25 @@ func (w *Writer) Close() error {
var buf [directoryEndLen]byte
b := writeBuf(buf[:])
b.uint32(uint32(directoryEndSignature))
- b = b[4:] // skip over disk number and first disk number (2x uint16)
- b.uint16(uint16(records)) // number of entries this disk
- b.uint16(uint16(records)) // number of entries total
- b.uint32(uint32(size)) // size of directory
- b.uint32(uint32(offset)) // start of directory
- // skipped size of comment (always zero)
+ b = b[4:] // skip over disk number and first disk number (2x uint16)
+ b.uint16(uint16(records)) // number of entries this disk
+ b.uint16(uint16(records)) // number of entries total
+ b.uint32(uint32(size)) // size of directory
+ b.uint32(uint32(offset)) // start of directory
+ b.uint16(uint16(len(w.comment))) // byte size of EOCD comment
if _, err := w.cw.Write(buf[:]); err != nil {
return err
}
+ if _, err := io.WriteString(w.cw, w.comment); err != nil {
+ return err
+ }
return w.cw.w.(*bufio.Writer).Flush()
}
// Create adds a file to the zip file using the provided name.
// It returns a Writer to which the file contents should be written.
+// The file contents will be compressed using the Deflate method.
// The name must be a relative path: it must not start with a drive
// letter (e.g. C:) or leading slash, and only forward slashes are
// allowed.
@@ -200,27 +220,36 @@ func (w *Writer) Create(name string) (io.Writer, error) {
return w.CreateHeader(header)
}
-func hasValidUTF8(s string) bool {
- n := 0
- for _, r := range s {
- // By default, ZIP uses CP437, which is only identical to ASCII for the printable characters.
- if r < 0x20 || r >= 0x7f {
- if !utf8.ValidRune(r) {
- return false
+// detectUTF8 reports whether s is a valid UTF-8 string, and whether the string
+// must be considered UTF-8 encoding (i.e., not compatible with CP-437, ASCII,
+// or any other common encoding).
+func detectUTF8(s string) (valid, require bool) {
+ for i := 0; i < len(s); {
+ r, size := utf8.DecodeRuneInString(s[i:])
+ i += size
+ // Officially, ZIP uses CP-437, but many readers use the system's
+ // local character encoding. Most encoding are compatible with a large
+ // subset of CP-437, which itself is ASCII-like.
+ //
+ // Forbid 0x7e and 0x5c since EUC-KR and Shift-JIS replace those
+ // characters with localized currency and overline characters.
+ if r < 0x20 || r > 0x7d || r == 0x5c {
+ if !utf8.ValidRune(r) || (r == utf8.RuneError && size == 1) {
+ return false, false
}
- n++
+ require = true
}
}
- return n > 0
+ return true, require
}
-// CreateHeader adds a file to the zip file using the provided FileHeader
-// for the file metadata.
-// It returns a Writer to which the file contents should be written.
+// CreateHeader adds a file to the zip archive using the provided FileHeader
+// for the file metadata. Writer takes ownership of fh and may mutate
+// its fields. The caller must not modify fh after calling CreateHeader.
//
+// This returns a Writer to which the file contents should be written.
// The file's contents must be written to the io.Writer before the next
-// call to Create, CreateHeader, or Close. The provided FileHeader fh
-// must not be modified after a call to CreateHeader.
+// call to Create, CreateHeader, or Close.
func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) {
if w.last != nil && !w.last.closed {
if err := w.last.close(); err != nil {
@@ -234,13 +263,62 @@ func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) {
fh.Flags |= 0x8 // we will write a data descriptor
- if hasValidUTF8(fh.Name) || hasValidUTF8(fh.Comment) {
- fh.Flags |= 0x800 // filename or comment have valid utf-8 string
+ // The ZIP format has a sad state of affairs regarding character encoding.
+ // Officially, the name and comment fields are supposed to be encoded
+ // in CP-437 (which is mostly compatible with ASCII), unless the UTF-8
+ // flag bit is set. However, there are several problems:
+ //
+ // * Many ZIP readers still do not support UTF-8.
+ // * If the UTF-8 flag is cleared, several readers simply interpret the
+ // name and comment fields as whatever the local system encoding is.
+ //
+ // In order to avoid breaking readers without UTF-8 support,
+ // we avoid setting the UTF-8 flag if the strings are CP-437 compatible.
+ // However, if the strings require multibyte UTF-8 encoding and is a
+ // valid UTF-8 string, then we set the UTF-8 bit.
+ //
+ // For the case, where the user explicitly wants to specify the encoding
+ // as UTF-8, they will need to set the flag bit themselves.
+ utf8Valid1, utf8Require1 := detectUTF8(fh.Name)
+ utf8Valid2, utf8Require2 := detectUTF8(fh.Comment)
+ switch {
+ case fh.NonUTF8:
+ fh.Flags &^= 0x800
+ case (utf8Require1 || utf8Require2) && (utf8Valid1 && utf8Valid2):
+ fh.Flags |= 0x800
}
fh.CreatorVersion = fh.CreatorVersion&0xff00 | zipVersion20 // preserve compatibility byte
fh.ReaderVersion = zipVersion20
+ // If Modified is set, this takes precedence over MS-DOS timestamp fields.
+ if !fh.Modified.IsZero() {
+ // Contrary to the FileHeader.SetModTime method, we intentionally
+ // do not convert to UTC, because we assume the user intends to encode
+ // the date using the specified timezone. A user may want this control
+ // because many legacy ZIP readers interpret the timestamp according
+ // to the local timezone.
+ //
+ // The timezone is only non-UTC if a user directly sets the Modified
+ // field directly themselves. All other approaches sets UTC.
+ fh.ModifiedDate, fh.ModifiedTime = timeToMsDosTime(fh.Modified)
+
+ // Use "extended timestamp" format since this is what Info-ZIP uses.
+ // Nearly every major ZIP implementation uses a different format,
+ // but at least most seem to be able to understand the other formats.
+ //
+ // This format happens to be identical for both local and central header
+ // if modification time is the only timestamp being encoded.
+ var mbuf [9]byte // 2*SizeOf(uint16) + SizeOf(uint8) + SizeOf(uint32)
+ mt := uint32(fh.Modified.Unix())
+ eb := writeBuf(mbuf[:])
+ eb.uint16(extTimeExtraID)
+ eb.uint16(5) // Size: SizeOf(uint8) + SizeOf(uint32)
+ eb.uint8(1) // Flags: ModTime
+ eb.uint32(mt) // ModTime
+ fh.Extra = append(fh.Extra, mbuf[:]...)
+ }
+
fw := &fileWriter{
zipw: w.cw,
compCount: &countWriter{w: w.cw},
@@ -273,6 +351,14 @@ func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) {
}
func writeHeader(w io.Writer, h *FileHeader) error {
+ const maxUint16 = 1<<16 - 1
+ if len(h.Name) > maxUint16 {
+ return errLongName
+ }
+ if len(h.Extra) > maxUint16 {
+ return errLongExtra
+ }
+
var buf [fileHeaderLen]byte
b := writeBuf(buf[:])
b.uint32(uint32(fileHeaderSignature))
@@ -402,6 +488,11 @@ func (w nopCloser) Close() error {
type writeBuf []byte
+func (b *writeBuf) uint8(v uint8) {
+ (*b)[0] = v
+ *b = (*b)[1:]
+}
+
func (b *writeBuf) uint16(v uint16) {
binary.LittleEndian.PutUint16(*b, v)
*b = (*b)[2:]
diff --git a/src/archive/zip/writer_test.go b/src/archive/zip/writer_test.go
index 92fb6ecf0ed..38f32296fa8 100644
--- a/src/archive/zip/writer_test.go
+++ b/src/archive/zip/writer_test.go
@@ -6,11 +6,14 @@ package zip
import (
"bytes"
+ "fmt"
"io"
"io/ioutil"
"math/rand"
"os"
+ "strings"
"testing"
+ "time"
)
// TODO(adg): a more sophisticated test suite
@@ -57,8 +60,8 @@ var writeTests = []WriteTest{
func TestWriter(t *testing.T) {
largeData := make([]byte, 1<<17)
- for i := range largeData {
- largeData[i] = byte(rand.Int())
+ if _, err := rand.Read(largeData); err != nil {
+ t.Fatal("rand.Read failed:", err)
}
writeTests[1].Data = largeData
defer func() {
@@ -87,31 +90,100 @@ func TestWriter(t *testing.T) {
}
}
+// TestWriterComment is test for EOCD comment read/write.
+func TestWriterComment(t *testing.T) {
+ var tests = []struct {
+ comment string
+ ok bool
+ }{
+ {"hi, hello", true},
+ {"hi, こんにちわ", true},
+ {strings.Repeat("a", uint16max), true},
+ {strings.Repeat("a", uint16max+1), false},
+ }
+
+ for _, test := range tests {
+ // write a zip file
+ buf := new(bytes.Buffer)
+ w := NewWriter(buf)
+ if err := w.SetComment(test.comment); err != nil {
+ if test.ok {
+ t.Fatalf("SetComment: unexpected error %v", err)
+ }
+ continue
+ } else {
+ if !test.ok {
+ t.Fatalf("SetComment: unexpected success, want error")
+ }
+ }
+
+ if err := w.Close(); test.ok == (err != nil) {
+ t.Fatal(err)
+ }
+
+ if w.closed != test.ok {
+ t.Fatalf("Writer.closed: got %v, want %v", w.closed, test.ok)
+ }
+
+ // skip read test in failure cases
+ if !test.ok {
+ continue
+ }
+
+ // read it back
+ r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if r.Comment != test.comment {
+ t.Fatalf("Reader.Comment: got %v, want %v", r.Comment, test.comment)
+ }
+ }
+}
+
func TestWriterUTF8(t *testing.T) {
var utf8Tests = []struct {
name string
comment string
- expect uint16
+ nonUTF8 bool
+ flags uint16
}{
{
name: "hi, hello",
comment: "in the world",
- expect: 0x8,
+ flags: 0x8,
},
{
name: "hi, こんにちわ",
comment: "in the world",
- expect: 0x808,
+ flags: 0x808,
+ },
+ {
+ name: "hi, こんにちわ",
+ comment: "in the world",
+ nonUTF8: true,
+ flags: 0x8,
},
{
name: "hi, hello",
comment: "in the 世界",
- expect: 0x808,
+ flags: 0x808,
},
{
name: "hi, こんにちわ",
comment: "in the 世界",
- expect: 0x808,
+ flags: 0x808,
+ },
+ {
+ name: "the replacement rune is �",
+ comment: "the replacement rune is �",
+ flags: 0x808,
+ },
+ {
+ // Name is Japanese encoded in Shift JIS.
+ name: "\x93\xfa\x96{\x8c\xea.txt",
+ comment: "in the 世界",
+ flags: 0x008, // UTF-8 must not be set
},
}
@@ -123,6 +195,7 @@ func TestWriterUTF8(t *testing.T) {
h := &FileHeader{
Name: test.name,
Comment: test.comment,
+ NonUTF8: test.nonUTF8,
Method: Deflate,
}
w, err := w.CreateHeader(h)
@@ -142,18 +215,41 @@ func TestWriterUTF8(t *testing.T) {
t.Fatal(err)
}
for i, test := range utf8Tests {
- got := r.File[i].Flags
- t.Logf("name %v, comment %v", test.name, test.comment)
- if got != test.expect {
- t.Fatalf("Flags: got %v, want %v", got, test.expect)
+ flags := r.File[i].Flags
+ if flags != test.flags {
+ t.Errorf("CreateHeader(name=%q comment=%q nonUTF8=%v): flags=%#x, want %#x", test.name, test.comment, test.nonUTF8, flags, test.flags)
}
}
}
+func TestWriterTime(t *testing.T) {
+ var buf bytes.Buffer
+ h := &FileHeader{
+ Name: "test.txt",
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
+ }
+ w := NewWriter(&buf)
+ if _, err := w.CreateHeader(h); err != nil {
+ t.Fatalf("unexpected CreateHeader error: %v", err)
+ }
+ if err := w.Close(); err != nil {
+ t.Fatalf("unexpected Close error: %v", err)
+ }
+
+ want, err := ioutil.ReadFile("testdata/time-go.zip")
+ if err != nil {
+ t.Fatalf("unexpected ReadFile error: %v", err)
+ }
+ if got := buf.Bytes(); !bytes.Equal(got, want) {
+ fmt.Printf("%x\n%x\n", got, want)
+ t.Error("contents of time-go.zip differ")
+ }
+}
+
func TestWriterOffset(t *testing.T) {
largeData := make([]byte, 1<<17)
- for i := range largeData {
- largeData[i] = byte(rand.Int())
+ if _, err := rand.Read(largeData); err != nil {
+ t.Fatal("rand.Read failed:", err)
}
writeTests[1].Data = largeData
defer func() {
@@ -225,7 +321,7 @@ func testReadFile(t *testing.T, f *File, wt *WriteTest) {
if f.Name != wt.Name {
t.Fatalf("File name: got %q, want %q", f.Name, wt.Name)
}
- testFileMode(t, wt.Name, f, wt.Mode)
+ testFileMode(t, f, wt.Mode)
rc, err := f.Open()
if err != nil {
t.Fatal("opening:", err)
diff --git a/src/archive/zip/zip_test.go b/src/archive/zip/zip_test.go
index 18c2171ba6c..7e02cb0eeaa 100644
--- a/src/archive/zip/zip_test.go
+++ b/src/archive/zip/zip_test.go
@@ -645,16 +645,54 @@ func TestHeaderTooShort(t *testing.T) {
h := FileHeader{
Name: "foo.txt",
Method: Deflate,
- Extra: []byte{zip64ExtraId}, // missing size and second half of tag, but Extra is best-effort parsing
+ Extra: []byte{zip64ExtraID}, // missing size and second half of tag, but Extra is best-effort parsing
}
testValidHeader(&h, t)
}
+func TestHeaderTooLongErr(t *testing.T) {
+ var headerTests = []struct {
+ name string
+ extra []byte
+ wanterr error
+ }{
+ {
+ name: strings.Repeat("x", 1<<16),
+ extra: []byte{},
+ wanterr: errLongName,
+ },
+ {
+ name: "long_extra",
+ extra: bytes.Repeat([]byte{0xff}, 1<<16),
+ wanterr: errLongExtra,
+ },
+ }
+
+ // write a zip file
+ buf := new(bytes.Buffer)
+ w := NewWriter(buf)
+
+ for _, test := range headerTests {
+ h := &FileHeader{
+ Name: test.name,
+ Extra: test.extra,
+ }
+ _, err := w.CreateHeader(h)
+ if err != test.wanterr {
+ t.Errorf("error=%v, want %v", err, test.wanterr)
+ }
+ }
+
+ if err := w.Close(); err != nil {
+ t.Fatal(err)
+ }
+}
+
func TestHeaderIgnoredSize(t *testing.T) {
h := FileHeader{
Name: "foo.txt",
Method: Deflate,
- Extra: []byte{zip64ExtraId & 0xFF, zip64ExtraId >> 8, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}, // bad size but shouldn't be consulted
+ Extra: []byte{zip64ExtraID & 0xFF, zip64ExtraID >> 8, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}, // bad size but shouldn't be consulted
}
testValidHeader(&h, t)
}
diff --git a/src/bootstrap.bash b/src/bootstrap.bash
index da3dff461f4..7b4f57461fc 100755
--- a/src/bootstrap.bash
+++ b/src/bootstrap.bash
@@ -14,6 +14,15 @@
#
# Only changes that have been committed to Git (at least locally,
# not necessary reviewed and submitted to master) are included in the tree.
+#
+# As a special case for Go's internal use only, if the
+# BOOTSTRAP_FORMAT environment variable is set to "mintgz", the
+# resulting archive is intended for use by the Go build system and
+# differs in that the mintgz file:
+# * is a tar.gz file instead of bz2
+# * has many unnecessary files deleted to reduce its size
+# * does not have a shared directory component for each tar entry
+# Do not depend on the mintgz format.
set -e
@@ -28,6 +37,11 @@ if [ -e $targ ]; then
exit 2
fi
+if [ "$BOOTSTRAP_FORMAT" != "mintgz" -a "$BOOTSTRAP_FORMAT" != "" ]; then
+ echo "unknown BOOTSTRAP_FORMAT format"
+ exit 2
+fi
+
unset GOROOT
src=$(cd .. && pwd)
echo "#### Copying to $targ"
@@ -62,8 +76,36 @@ else
rmdir bin/*_*
rm -rf "pkg/${gohostos}_${gohostarch}" "pkg/tool/${gohostos}_${gohostarch}"
fi
+
+GITREV=$(git rev-parse --short HEAD)
rm -rf pkg/bootstrap pkg/obj .git
+# Support for building minimal tar.gz for the builders.
+# The build system doesn't support bzip2, and by deleting more stuff,
+# they start faster, especially on machines without fast filesystems
+# and things like tmpfs configures.
+# Do not depend on this format. It's for internal use only.
+if [ "$BOOTSTRAP_FORMAT" = "mintgz" ]; then
+ OUTGZ="gobootstrap-${GOOS}-${GOARCH}-${GITREV}.tar.gz"
+ echo "Preparing to generate build system's ${OUTGZ}; cleaning ..."
+ rm -rf bin/gofmt
+ rm -rf src/runtime/race/race_*.syso
+ rm -rf api test doc misc/cgo/test misc/trace
+ rm -rf pkg/tool/*_*/{addr2line,api,cgo,cover,doc,fix,nm,objdump,pack,pprof,test2json,trace,vet}
+ rm -rf pkg/*_*/{image,database,cmd}
+ rm -rf $(find . -type d -name testdata)
+ find . -type f -name '*_test.go' -exec rm {} \;
+ # git clean doesn't clean symlinks apparently, and the buildlet
+ # rejects them, so:
+ find . -type l -exec rm {} \;
+
+ echo "Writing ${OUTGZ} ..."
+ tar cf - . | gzip -9 > ../$OUTGZ
+ cd ..
+ ls -l "$(pwd)/$OUTGZ"
+ exit 0
+fi
+
echo ----
echo Bootstrap toolchain for "$GOOS/$GOARCH" installed in "$(pwd)".
echo Building tbz.
diff --git a/src/bufio/bufio.go b/src/bufio/bufio.go
index da94a2503f0..ad9c9f5ddf7 100644
--- a/src/bufio/bufio.go
+++ b/src/bufio/bufio.go
@@ -62,6 +62,9 @@ func NewReader(rd io.Reader) *Reader {
return NewReaderSize(rd, defaultBufSize)
}
+// Size returns the size of the underlying buffer in bytes.
+func (r *Reader) Size() int { return len(r.buf) }
+
// Reset discards any buffered data, resets all state, and switches
// the buffered reader to read from r.
func (b *Reader) Reset(r io.Reader) {
@@ -548,6 +551,9 @@ func NewWriter(w io.Writer) *Writer {
return NewWriterSize(w, defaultBufSize)
}
+// Size returns the size of the underlying buffer in bytes.
+func (b *Writer) Size() int { return len(b.buf) }
+
// Reset discards any unflushed buffered data, clears any error, and
// resets b to write its output to w.
func (b *Writer) Reset(w io.Writer) {
diff --git a/src/bufio/bufio_test.go b/src/bufio/bufio_test.go
index ef0f6c834e8..c829d2b0648 100644
--- a/src/bufio/bufio_test.go
+++ b/src/bufio/bufio_test.go
@@ -1418,6 +1418,24 @@ func TestReaderDiscard(t *testing.T) {
}
+func TestReaderSize(t *testing.T) {
+ if got, want := NewReader(nil).Size(), DefaultBufSize; got != want {
+ t.Errorf("NewReader's Reader.Size = %d; want %d", got, want)
+ }
+ if got, want := NewReaderSize(nil, 1234).Size(), 1234; got != want {
+ t.Errorf("NewReaderSize's Reader.Size = %d; want %d", got, want)
+ }
+}
+
+func TestWriterSize(t *testing.T) {
+ if got, want := NewWriter(nil).Size(), DefaultBufSize; got != want {
+ t.Errorf("NewWriter's Writer.Size = %d; want %d", got, want)
+ }
+ if got, want := NewWriterSize(nil, 1234).Size(), 1234; got != want {
+ t.Errorf("NewWriterSize's Writer.Size = %d; want %d", got, want)
+ }
+}
+
// An onlyReader only implements io.Reader, no matter what other methods the underlying implementation may have.
type onlyReader struct {
io.Reader
diff --git a/src/bufio/export_test.go b/src/bufio/export_test.go
index 3d3bb27d8da..1667f01a841 100644
--- a/src/bufio/export_test.go
+++ b/src/bufio/export_test.go
@@ -11,6 +11,8 @@ import (
var IsSpace = isSpace
+const DefaultBufSize = defaultBufSize
+
func (s *Scanner) MaxTokenSize(n int) {
if n < utf8.UTFMax || n > 1e9 {
panic("bad max token size")
diff --git a/src/bufio/scan.go b/src/bufio/scan.go
index 9f741c98307..40aaa4ab817 100644
--- a/src/bufio/scan.go
+++ b/src/bufio/scan.go
@@ -123,8 +123,9 @@ var ErrFinalToken = errors.New("final token")
// After Scan returns false, the Err method will return any error that
// occurred during scanning, except that if it was io.EOF, Err
// will return nil.
-// Scan panics if the split function returns 100 empty tokens without
-// advancing the input. This is a common error mode for scanners.
+// Scan panics if the split function returns too many empty
+// tokens without advancing the input. This is a common error mode for
+// scanners.
func (s *Scanner) Scan() bool {
if s.done {
return false
@@ -156,8 +157,8 @@ func (s *Scanner) Scan() bool {
} else {
// Returning tokens not advancing input at EOF.
s.empties++
- if s.empties > 100 {
- panic("bufio.Scan: 100 empty tokens without progressing")
+ if s.empties > maxConsecutiveEmptyReads {
+ panic("bufio.Scan: too many empty tokens without progressing")
}
}
return true
diff --git a/src/builtin/builtin.go b/src/builtin/builtin.go
index 1c7c041d680..4578c855a9e 100644
--- a/src/builtin/builtin.go
+++ b/src/builtin/builtin.go
@@ -171,8 +171,9 @@ func cap(v Type) int
// Slice: The size specifies the length. The capacity of the slice is
// equal to its length. A second integer argument may be provided to
// specify a different capacity; it must be no smaller than the
-// length, so make([]int, 0, 10) allocates a slice of length 0 and
-// capacity 10.
+// length. For example, make([]int, 0, 10) allocates an underlying array
+// of size 10 and returns a slice of length 0 and capacity 10 that is
+// backed by this underlying array.
// Map: An empty map is allocated with enough space to hold the
// specified number of elements. The size may be omitted, in which case
// a small starting size is allocated.
diff --git a/src/bytes/boundary_test.go b/src/bytes/boundary_test.go
new file mode 100644
index 00000000000..ea84f1e40fd
--- /dev/null
+++ b/src/bytes/boundary_test.go
@@ -0,0 +1,84 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// +build linux
+
+package bytes_test
+
+import (
+ . "bytes"
+ "syscall"
+ "testing"
+)
+
+// This file tests the situation where byte operations are checking
+// data very near to a page boundary. We want to make sure those
+// operations do not read across the boundary and cause a page
+// fault where they shouldn't.
+
+// These tests run only on linux. The code being tested is
+// not OS-specific, so it does not need to be tested on all
+// operating systems.
+
+// dangerousSlice returns a slice which is immediately
+// preceded and followed by a faulting page.
+func dangerousSlice(t *testing.T) []byte {
+ pagesize := syscall.Getpagesize()
+ b, err := syscall.Mmap(0, 0, 3*pagesize, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANONYMOUS|syscall.MAP_PRIVATE)
+ if err != nil {
+ t.Fatalf("mmap failed %s", err)
+ }
+ err = syscall.Mprotect(b[:pagesize], syscall.PROT_NONE)
+ if err != nil {
+ t.Fatalf("mprotect low failed %s\n", err)
+ }
+ err = syscall.Mprotect(b[2*pagesize:], syscall.PROT_NONE)
+ if err != nil {
+ t.Fatalf("mprotect high failed %s\n", err)
+ }
+ return b[pagesize : 2*pagesize]
+}
+
+func TestEqualNearPageBoundary(t *testing.T) {
+ t.Parallel()
+ b := dangerousSlice(t)
+ for i := range b {
+ b[i] = 'A'
+ }
+ for i := 0; i <= len(b); i++ {
+ Equal(b[:i], b[len(b)-i:])
+ Equal(b[len(b)-i:], b[:i])
+ }
+}
+
+func TestIndexByteNearPageBoundary(t *testing.T) {
+ t.Parallel()
+ b := dangerousSlice(t)
+ for i := range b {
+ idx := IndexByte(b[i:], 1)
+ if idx != -1 {
+ t.Fatalf("IndexByte(b[%d:])=%d, want -1\n", i, idx)
+ }
+ }
+}
+
+func TestIndexNearPageBoundary(t *testing.T) {
+ t.Parallel()
+ var q [64]byte
+ b := dangerousSlice(t)
+ if len(b) > 256 {
+ // Only worry about when we're near the end of a page.
+ b = b[len(b)-256:]
+ }
+ for j := 1; j < len(q); j++ {
+ q[j-1] = 1 // difference is only found on the last byte
+ for i := range b {
+ idx := Index(b[i:], q[:j])
+ if idx != -1 {
+ t.Fatalf("Index(b[%d:], q[:%d])=%d, want -1\n", i, j, idx)
+ }
+ }
+ q[j-1] = 0
+ }
+}
diff --git a/src/bytes/buffer.go b/src/bytes/buffer.go
index 20e42bbbbca..dc9d5e95d32 100644
--- a/src/bytes/buffer.go
+++ b/src/bytes/buffer.go
@@ -15,34 +15,37 @@ import (
// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
// The zero value for Buffer is an empty buffer ready to use.
type Buffer struct {
- buf []byte // contents are the bytes buf[off : len(buf)]
- off int // read at &buf[off], write at &buf[len(buf)]
- lastRead readOp // last read operation, so that Unread* can work correctly.
- // FIXME: lastRead can fit in a single byte
+ buf []byte // contents are the bytes buf[off : len(buf)]
+ off int // read at &buf[off], write at &buf[len(buf)]
+ bootstrap [64]byte // memory to hold first slice; helps small buffers avoid allocation.
+ lastRead readOp // last read operation, so that Unread* can work correctly.
- // memory to hold first slice; helps small buffers avoid allocation.
// FIXME: it would be advisable to align Buffer to cachelines to avoid false
// sharing.
- bootstrap [64]byte
}
// The readOp constants describe the last action performed on
// the buffer, so that UnreadRune and UnreadByte can check for
// invalid usage. opReadRuneX constants are chosen such that
// converted to int they correspond to the rune size that was read.
-type readOp int
+type readOp int8
+// Don't use iota for these, as the values need to correspond with the
+// names and comments, which is easier to see when being explicit.
const (
opRead readOp = -1 // Any other read operation.
- opInvalid = 0 // Non-read operation.
- opReadRune1 = 1 // Read rune of size 1.
- opReadRune2 = 2 // Read rune of size 2.
- opReadRune3 = 3 // Read rune of size 3.
- opReadRune4 = 4 // Read rune of size 4.
+ opInvalid readOp = 0 // Non-read operation.
+ opReadRune1 readOp = 1 // Read rune of size 1.
+ opReadRune2 readOp = 2 // Read rune of size 2.
+ opReadRune3 readOp = 3 // Read rune of size 3.
+ opReadRune4 readOp = 4 // Read rune of size 4.
)
// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer.
var ErrTooLarge = errors.New("bytes.Buffer: too large")
+var errNegativeRead = errors.New("bytes.Buffer: reader returned negative count from Read")
+
+const maxInt = int(^uint(0) >> 1)
// Bytes returns a slice of length b.Len() holding the unread portion of the buffer.
// The slice is valid for use only until the next buffer modification (that is,
@@ -53,6 +56,8 @@ func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
// String returns the contents of the unread portion of the buffer
// as a string. If the Buffer is a nil pointer, it returns "".
+//
+// To build strings more efficiently, see the strings.Builder type.
func (b *Buffer) String() string {
if b == nil {
// Special case, useful in debugging.
@@ -61,6 +66,9 @@ func (b *Buffer) String() string {
return string(b.buf[b.off:])
}
+// empty returns whether the unread portion of the buffer is empty.
+func (b *Buffer) empty() bool { return len(b.buf) <= b.off }
+
// Len returns the number of bytes of the unread portion of the buffer;
// b.Len() == len(b.Bytes()).
func (b *Buffer) Len() int { return len(b.buf) - b.off }
@@ -81,7 +89,7 @@ func (b *Buffer) Truncate(n int) {
if n < 0 || n > b.Len() {
panic("bytes.Buffer: truncation out of range")
}
- b.buf = b.buf[0 : b.off+n]
+ b.buf = b.buf[:b.off+n]
}
// Reset resets the buffer to be empty,
@@ -97,7 +105,7 @@ func (b *Buffer) Reset() {
// internal buffer only needs to be resliced.
// It returns the index where bytes should be written and whether it succeeded.
func (b *Buffer) tryGrowByReslice(n int) (int, bool) {
- if l := len(b.buf); l+n <= cap(b.buf) {
+ if l := len(b.buf); n <= cap(b.buf)-l {
b.buf = b.buf[:l+n]
return l, true
}
@@ -122,15 +130,18 @@ func (b *Buffer) grow(n int) int {
b.buf = b.bootstrap[:n]
return 0
}
- if m+n <= cap(b.buf)/2 {
+ c := cap(b.buf)
+ if n <= c/2-m {
// We can slide things down instead of allocating a new
- // slice. We only need m+n <= cap(b.buf) to slide, but
+ // slice. We only need m+n <= c to slide, but
// we instead let capacity get twice as large so we
// don't spend all our time copying.
- copy(b.buf[:], b.buf[b.off:])
+ copy(b.buf, b.buf[b.off:])
+ } else if c > maxInt-c-n {
+ panic(ErrTooLarge)
} else {
// Not enough space anywhere, we need to allocate.
- buf := makeSlice(2*cap(b.buf) + n)
+ buf := makeSlice(2*c + n)
copy(buf, b.buf[b.off:])
b.buf = buf
}
@@ -150,7 +161,7 @@ func (b *Buffer) Grow(n int) {
panic("bytes.Buffer.Grow: negative count")
}
m := b.grow(n)
- b.buf = b.buf[0:m]
+ b.buf = b.buf[:m]
}
// Write appends the contents of p to the buffer, growing the buffer as
@@ -189,34 +200,22 @@ const MinRead = 512
// buffer becomes too large, ReadFrom will panic with ErrTooLarge.
func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
b.lastRead = opInvalid
- // If buffer is empty, reset to recover space.
- if b.off >= len(b.buf) {
- b.Reset()
- }
for {
- if free := cap(b.buf) - len(b.buf); free < MinRead {
- // not enough space at end
- newBuf := b.buf
- if b.off+free < MinRead {
- // not enough space using beginning of buffer;
- // double buffer capacity
- newBuf = makeSlice(2*cap(b.buf) + MinRead)
- }
- copy(newBuf, b.buf[b.off:])
- b.buf = newBuf[:len(b.buf)-b.off]
- b.off = 0
+ i := b.grow(MinRead)
+ m, e := r.Read(b.buf[i:cap(b.buf)])
+ if m < 0 {
+ panic(errNegativeRead)
}
- m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
- b.buf = b.buf[0 : len(b.buf)+m]
+
+ b.buf = b.buf[:i+m]
n += int64(m)
if e == io.EOF {
- break
+ return n, nil // e is EOF, so return nil explicitly
}
if e != nil {
return n, e
}
}
- return n, nil // err is EOF, so return nil explicitly
}
// makeSlice allocates a slice of size n. If the allocation fails, it panics
@@ -237,8 +236,7 @@ func makeSlice(n int) []byte {
// encountered during the write is also returned.
func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
b.lastRead = opInvalid
- if b.off < len(b.buf) {
- nBytes := b.Len()
+ if nBytes := b.Len(); nBytes > 0 {
m, e := w.Write(b.buf[b.off:])
if m > nBytes {
panic("bytes.Buffer.WriteTo: invalid Write count")
@@ -256,7 +254,7 @@ func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
}
// Buffer is now empty; reset.
b.Reset()
- return
+ return n, nil
}
// WriteByte appends the byte c to the buffer, growing the buffer as needed.
@@ -298,11 +296,11 @@ func (b *Buffer) WriteRune(r rune) (n int, err error) {
// otherwise it is nil.
func (b *Buffer) Read(p []byte) (n int, err error) {
b.lastRead = opInvalid
- if b.off >= len(b.buf) {
+ if b.empty() {
// Buffer is empty, reset to recover space.
b.Reset()
if len(p) == 0 {
- return
+ return 0, nil
}
return 0, io.EOF
}
@@ -311,7 +309,7 @@ func (b *Buffer) Read(p []byte) (n int, err error) {
if n > 0 {
b.lastRead = opRead
}
- return
+ return n, nil
}
// Next returns a slice containing the next n bytes from the buffer,
@@ -335,8 +333,7 @@ func (b *Buffer) Next(n int) []byte {
// ReadByte reads and returns the next byte from the buffer.
// If no byte is available, it returns error io.EOF.
func (b *Buffer) ReadByte() (byte, error) {
- b.lastRead = opInvalid
- if b.off >= len(b.buf) {
+ if b.empty() {
// Buffer is empty, reset to recover space.
b.Reset()
return 0, io.EOF
@@ -353,8 +350,7 @@ func (b *Buffer) ReadByte() (byte, error) {
// If the bytes are an erroneous UTF-8 encoding, it
// consumes one byte and returns U+FFFD, 1.
func (b *Buffer) ReadRune() (r rune, size int, err error) {
- b.lastRead = opInvalid
- if b.off >= len(b.buf) {
+ if b.empty() {
// Buffer is empty, reset to recover space.
b.Reset()
return 0, 0, io.EOF
@@ -413,7 +409,7 @@ func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
// return a copy of slice. The buffer's backing array may
// be overwritten by later calls.
line = append(line, slice...)
- return
+ return line, err
}
// readSlice is like ReadBytes but returns a reference to internal buffer data.
diff --git a/src/bytes/buffer_test.go b/src/bytes/buffer_test.go
index ce2f01a0ad3..e4bbc12f6a1 100644
--- a/src/bytes/buffer_test.go
+++ b/src/bytes/buffer_test.go
@@ -6,25 +6,27 @@ package bytes_test
import (
. "bytes"
- "internal/testenv"
"io"
"math/rand"
- "os/exec"
"runtime"
"testing"
"unicode/utf8"
)
-const N = 10000 // make this bigger for a larger (and slower) test
-var data string // test data for write tests
-var testBytes []byte // test data; same as data but as a slice.
+const N = 10000 // make this bigger for a larger (and slower) test
+var testString string // test data for write tests
+var testBytes []byte // test data; same as testString but as a slice.
+
+type negativeReader struct{}
+
+func (r *negativeReader) Read([]byte) (int, error) { return -1, nil }
func init() {
testBytes = make([]byte, N)
for i := 0; i < N; i++ {
testBytes[i] = 'a' + byte(i%26)
}
- data = string(testBytes)
+ testString = string(testBytes)
}
// Verify that contents of buf match the string s.
@@ -88,12 +90,12 @@ func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub
func TestNewBuffer(t *testing.T) {
buf := NewBuffer(testBytes)
- check(t, "NewBuffer", buf, data)
+ check(t, "NewBuffer", buf, testString)
}
func TestNewBufferString(t *testing.T) {
- buf := NewBufferString(data)
- check(t, "NewBufferString", buf, data)
+ buf := NewBufferString(testString)
+ check(t, "NewBufferString", buf, testString)
}
// Empty buf through repeated reads into fub.
@@ -128,7 +130,7 @@ func TestBasicOperations(t *testing.T) {
buf.Truncate(0)
check(t, "TestBasicOperations (3)", &buf, "")
- n, err := buf.Write([]byte(data[0:1]))
+ n, err := buf.Write(testBytes[0:1])
if n != 1 {
t.Errorf("wrote 1 byte, but n == %d", n)
}
@@ -137,30 +139,30 @@ func TestBasicOperations(t *testing.T) {
}
check(t, "TestBasicOperations (4)", &buf, "a")
- buf.WriteByte(data[1])
+ buf.WriteByte(testString[1])
check(t, "TestBasicOperations (5)", &buf, "ab")
- n, err = buf.Write([]byte(data[2:26]))
+ n, err = buf.Write(testBytes[2:26])
if n != 24 {
- t.Errorf("wrote 25 bytes, but n == %d", n)
+ t.Errorf("wrote 24 bytes, but n == %d", n)
}
- check(t, "TestBasicOperations (6)", &buf, string(data[0:26]))
+ check(t, "TestBasicOperations (6)", &buf, testString[0:26])
buf.Truncate(26)
- check(t, "TestBasicOperations (7)", &buf, string(data[0:26]))
+ check(t, "TestBasicOperations (7)", &buf, testString[0:26])
buf.Truncate(20)
- check(t, "TestBasicOperations (8)", &buf, string(data[0:20]))
+ check(t, "TestBasicOperations (8)", &buf, testString[0:20])
- empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5))
+ empty(t, "TestBasicOperations (9)", &buf, testString[0:20], make([]byte, 5))
empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100))
- buf.WriteByte(data[1])
+ buf.WriteByte(testString[1])
c, err := buf.ReadByte()
if err != nil {
t.Error("ReadByte unexpected eof")
}
- if c != data[1] {
+ if c != testString[1] {
t.Errorf("ReadByte wrong value c=%v", c)
}
c, err = buf.ReadByte()
@@ -177,8 +179,8 @@ func TestLargeStringWrites(t *testing.T) {
limit = 9
}
for i := 3; i < limit; i += 3 {
- s := fillString(t, "TestLargeWrites (1)", &buf, "", 5, data)
- empty(t, "TestLargeStringWrites (2)", &buf, s, make([]byte, len(data)/i))
+ s := fillString(t, "TestLargeWrites (1)", &buf, "", 5, testString)
+ empty(t, "TestLargeStringWrites (2)", &buf, s, make([]byte, len(testString)/i))
}
check(t, "TestLargeStringWrites (3)", &buf, "")
}
@@ -191,7 +193,7 @@ func TestLargeByteWrites(t *testing.T) {
}
for i := 3; i < limit; i += 3 {
s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes)
- empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i))
+ empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(testString)/i))
}
check(t, "TestLargeByteWrites (3)", &buf, "")
}
@@ -199,8 +201,8 @@ func TestLargeByteWrites(t *testing.T) {
func TestLargeStringReads(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
- s := fillString(t, "TestLargeReads (1)", &buf, "", 5, data[0:len(data)/i])
- empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)))
+ s := fillString(t, "TestLargeReads (1)", &buf, "", 5, testString[0:len(testString)/i])
+ empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(testString)))
}
check(t, "TestLargeStringReads (3)", &buf, "")
}
@@ -209,7 +211,7 @@ func TestLargeByteReads(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
- empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)))
+ empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(testString)))
}
check(t, "TestLargeByteReads (3)", &buf, "")
}
@@ -218,14 +220,14 @@ func TestMixedReadsAndWrites(t *testing.T) {
var buf Buffer
s := ""
for i := 0; i < 50; i++ {
- wlen := rand.Intn(len(data))
+ wlen := rand.Intn(len(testString))
if i%2 == 0 {
- s = fillString(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, data[0:wlen])
+ s = fillString(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testString[0:wlen])
} else {
s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen])
}
- rlen := rand.Intn(len(data))
+ rlen := rand.Intn(len(testString))
fub := make([]byte, rlen)
n, _ := buf.Read(fub)
s = s[n:]
@@ -263,17 +265,37 @@ func TestReadFrom(t *testing.T) {
s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
var b Buffer
b.ReadFrom(&buf)
- empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data)))
+ empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(testString)))
}
}
+func TestReadFromNegativeReader(t *testing.T) {
+ var b Buffer
+ defer func() {
+ switch err := recover().(type) {
+ case nil:
+ t.Fatal("bytes.Buffer.ReadFrom didn't panic")
+ case error:
+ // this is the error string of errNegativeRead
+ wantError := "bytes.Buffer: reader returned negative count from Read"
+ if err.Error() != wantError {
+ t.Fatalf("recovered panic: got %v, want %v", err.Error(), wantError)
+ }
+ default:
+ t.Fatalf("unexpected panic value: %#v", err)
+ }
+ }()
+
+ b.ReadFrom(new(negativeReader))
+}
+
func TestWriteTo(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
var b Buffer
buf.WriteTo(&b)
- empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data)))
+ empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(testString)))
}
}
@@ -473,6 +495,18 @@ func TestGrow(t *testing.T) {
}
}
+func TestGrowOverflow(t *testing.T) {
+ defer func() {
+ if err := recover(); err != ErrTooLarge {
+ t.Errorf("after too-large Grow, recover() = %v; want %v", err, ErrTooLarge)
+ }
+ }()
+
+ buf := NewBuffer(make([]byte, 1))
+ const maxInt = int(^uint(0) >> 1)
+ buf.Grow(maxInt)
+}
+
// Was a bug: used to give EOF reading empty slice at EOF.
func TestReadEmptyAtEOF(t *testing.T) {
b := new(Buffer)
@@ -548,26 +582,6 @@ func TestBufferGrowth(t *testing.T) {
}
}
-// Test that tryGrowByReslice is inlined.
-// Only execute on "linux-amd64" builder in order to avoid breakage.
-func TestTryGrowByResliceInlined(t *testing.T) {
- targetBuilder := "linux-amd64"
- if testenv.Builder() != targetBuilder {
- t.Skipf("%q gets executed on %q builder only", t.Name(), targetBuilder)
- }
- t.Parallel()
- goBin := testenv.GoToolPath(t)
- out, err := exec.Command(goBin, "tool", "nm", goBin).CombinedOutput()
- if err != nil {
- t.Fatalf("go tool nm: %v: %s", err, out)
- }
- // Verify this doesn't exist:
- sym := "bytes.(*Buffer).tryGrowByReslice"
- if Contains(out, []byte(sym)) {
- t.Errorf("found symbol %q in cmd/go, but should be inlined", sym)
- }
-}
-
func BenchmarkWriteByte(b *testing.B) {
const n = 4 << 10
b.SetBytes(n)
diff --git a/src/bytes/bytes.go b/src/bytes/bytes.go
index 7c878af688c..9af177fa882 100644
--- a/src/bytes/bytes.go
+++ b/src/bytes/bytes.go
@@ -39,7 +39,7 @@ func explode(s []byte, n int) [][]byte {
break
}
_, size = utf8.DecodeRune(s)
- a[na] = s[0:size]
+ a[na] = s[0:size:size]
s = s[size:]
na++
}
@@ -68,12 +68,12 @@ func Contains(b, subslice []byte) bool {
return Index(b, subslice) != -1
}
-// ContainsAny reports whether any of the UTF-8-encoded Unicode code points in chars are within b.
+// ContainsAny reports whether any of the UTF-8-encoded code points in chars are within b.
func ContainsAny(b []byte, chars string) bool {
return IndexAny(b, chars) >= 0
}
-// ContainsRune reports whether the Unicode code point r is within b.
+// ContainsRune reports whether the rune is contained in the UTF-8-encoded byte slice b.
func ContainsRune(b []byte, r rune) bool {
return IndexRune(b, r) >= 0
}
@@ -112,7 +112,7 @@ func LastIndexByte(s []byte, c byte) int {
return -1
}
-// IndexRune interprets s as a sequence of UTF-8-encoded Unicode code points.
+// IndexRune interprets s as a sequence of UTF-8-encoded code points.
// It returns the byte index of the first occurrence in s of the given rune.
// It returns -1 if rune is not present in s.
// If r is utf8.RuneError, it returns the first instance of any
@@ -144,30 +144,32 @@ func IndexRune(s []byte, r rune) int {
// code points in chars. It returns -1 if chars is empty or if there is no code
// point in common.
func IndexAny(s []byte, chars string) int {
- if len(chars) > 0 {
- if len(s) > 8 {
- if as, isASCII := makeASCIISet(chars); isASCII {
- for i, c := range s {
- if as.contains(c) {
- return i
- }
- }
- return -1
- }
- }
- var width int
- for i := 0; i < len(s); i += width {
- r := rune(s[i])
- if r < utf8.RuneSelf {
- width = 1
- } else {
- r, width = utf8.DecodeRune(s[i:])
- }
- for _, ch := range chars {
- if r == ch {
+ if chars == "" {
+ // Avoid scanning all of s.
+ return -1
+ }
+ if len(s) > 8 {
+ if as, isASCII := makeASCIISet(chars); isASCII {
+ for i, c := range s {
+ if as.contains(c) {
return i
}
}
+ return -1
+ }
+ }
+ var width int
+ for i := 0; i < len(s); i += width {
+ r := rune(s[i])
+ if r < utf8.RuneSelf {
+ width = 1
+ } else {
+ r, width = utf8.DecodeRune(s[i:])
+ }
+ for _, ch := range chars {
+ if r == ch {
+ return i
+ }
}
}
return -1
@@ -178,25 +180,27 @@ func IndexAny(s []byte, chars string) int {
// the Unicode code points in chars. It returns -1 if chars is empty or if
// there is no code point in common.
func LastIndexAny(s []byte, chars string) int {
- if len(chars) > 0 {
- if len(s) > 8 {
- if as, isASCII := makeASCIISet(chars); isASCII {
- for i := len(s) - 1; i >= 0; i-- {
- if as.contains(s[i]) {
- return i
- }
- }
- return -1
- }
- }
- for i := len(s); i > 0; {
- r, size := utf8.DecodeLastRune(s[:i])
- i -= size
- for _, c := range chars {
- if r == c {
+ if chars == "" {
+ // Avoid scanning all of s.
+ return -1
+ }
+ if len(s) > 8 {
+ if as, isASCII := makeASCIISet(chars); isASCII {
+ for i := len(s) - 1; i >= 0; i-- {
+ if as.contains(s[i]) {
return i
}
}
+ return -1
+ }
+ }
+ for i := len(s); i > 0; {
+ r, size := utf8.DecodeLastRune(s[:i])
+ i -= size
+ for _, c := range chars {
+ if r == c {
+ return i
+ }
}
}
return -1
@@ -223,7 +227,7 @@ func genSplit(s, sep []byte, sepSave, n int) [][]byte {
if m < 0 {
break
}
- a[i] = s[:m+sepSave]
+ a[i] = s[: m+sepSave : m+sepSave]
s = s[m+len(sep):]
i++
}
@@ -265,52 +269,112 @@ func SplitAfter(s, sep []byte) [][]byte {
return genSplit(s, sep, len(sep), -1)
}
-// Fields splits the slice s around each instance of one or more consecutive white space
-// characters, returning a slice of subslices of s or an empty list if s contains only white space.
+var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
+
+// Fields interprets s as a sequence of UTF-8-encoded code points.
+// It splits the slice s around each instance of one or more consecutive white space
+// characters, as defined by unicode.IsSpace, returning a slice of subslices of s or an
+// empty slice if s contains only white space.
func Fields(s []byte) [][]byte {
- return FieldsFunc(s, unicode.IsSpace)
+ // First count the fields.
+ // This is an exact count if s is ASCII, otherwise it is an approximation.
+ n := 0
+ wasSpace := 1
+ // setBits is used to track which bits are set in the bytes of s.
+ setBits := uint8(0)
+ for i := 0; i < len(s); i++ {
+ r := s[i]
+ setBits |= r
+ isSpace := int(asciiSpace[r])
+ n += wasSpace & ^isSpace
+ wasSpace = isSpace
+ }
+
+ if setBits >= utf8.RuneSelf {
+ // Some runes in the input slice are not ASCII.
+ return FieldsFunc(s, unicode.IsSpace)
+ }
+
+ // ASCII fast path
+ a := make([][]byte, n)
+ na := 0
+ fieldStart := 0
+ i := 0
+ // Skip spaces in the front of the input.
+ for i < len(s) && asciiSpace[s[i]] != 0 {
+ i++
+ }
+ fieldStart = i
+ for i < len(s) {
+ if asciiSpace[s[i]] == 0 {
+ i++
+ continue
+ }
+ a[na] = s[fieldStart:i:i]
+ na++
+ i++
+ // Skip spaces in between fields.
+ for i < len(s) && asciiSpace[s[i]] != 0 {
+ i++
+ }
+ fieldStart = i
+ }
+ if fieldStart < len(s) { // Last field might end at EOF.
+ a[na] = s[fieldStart:len(s):len(s)]
+ }
+ return a
}
-// FieldsFunc interprets s as a sequence of UTF-8-encoded Unicode code points.
+// FieldsFunc interprets s as a sequence of UTF-8-encoded code points.
// It splits the slice s at each run of code points c satisfying f(c) and
// returns a slice of subslices of s. If all code points in s satisfy f(c), or
// len(s) == 0, an empty slice is returned.
// FieldsFunc makes no guarantees about the order in which it calls f(c).
// If f does not return consistent results for a given c, FieldsFunc may crash.
func FieldsFunc(s []byte, f func(rune) bool) [][]byte {
- n := 0
- inField := false
+ // A span is used to record a slice of s of the form s[start:end].
+ // The start index is inclusive and the end index is exclusive.
+ type span struct {
+ start int
+ end int
+ }
+ spans := make([]span, 0, 32)
+
+ // Find the field start and end indices.
+ wasField := false
+ fromIndex := 0
for i := 0; i < len(s); {
- r, size := utf8.DecodeRune(s[i:])
- wasInField := inField
- inField = !f(r)
- if inField && !wasInField {
- n++
+ size := 1
+ r := rune(s[i])
+ if r >= utf8.RuneSelf {
+ r, size = utf8.DecodeRune(s[i:])
+ }
+ if f(r) {
+ if wasField {
+ spans = append(spans, span{start: fromIndex, end: i})
+ wasField = false
+ }
+ } else {
+ if !wasField {
+ fromIndex = i
+ wasField = true
+ }
}
i += size
}
- a := make([][]byte, n)
- na := 0
- fieldStart := -1
- for i := 0; i <= len(s) && na < n; {
- r, size := utf8.DecodeRune(s[i:])
- if fieldStart < 0 && size > 0 && !f(r) {
- fieldStart = i
- i += size
- continue
- }
- if fieldStart >= 0 && (size == 0 || f(r)) {
- a[na] = s[fieldStart:i]
- na++
- fieldStart = -1
- }
- if size == 0 {
- break
- }
- i += size
+ // Last field might end at EOF.
+ if wasField {
+ spans = append(spans, span{fromIndex, len(s)})
}
- return a[0:na]
+
+ // Create subslices from recorded field indices.
+ a := make([][]byte, len(spans))
+ for i, span := range spans {
+ a[i] = s[span.start:span.end:span.end]
+ }
+
+ return a
}
// Join concatenates the elements of s to create a new byte slice. The separator
@@ -349,8 +413,8 @@ func HasSuffix(s, suffix []byte) bool {
// Map returns a copy of the byte slice s with all its characters modified
// according to the mapping function. If mapping returns a negative value, the character is
-// dropped from the string with no replacement. The characters in s and the
-// output are interpreted as UTF-8-encoded Unicode code points.
+// dropped from the byte slice with no replacement. The characters in s and the
+// output are interpreted as UTF-8-encoded code points.
func Map(mapping func(r rune) rune, s []byte) []byte {
// In the worst case, the slice can grow when mapped, making
// things unpleasant. But it's so rare we barge in assuming it's
@@ -408,28 +472,28 @@ func Repeat(b []byte, count int) []byte {
return nb
}
-// ToUpper returns a copy of the byte slice s with all Unicode letters mapped to their upper case.
+// ToUpper treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters within it mapped to their upper case.
func ToUpper(s []byte) []byte { return Map(unicode.ToUpper, s) }
-// ToLower returns a copy of the byte slice s with all Unicode letters mapped to their lower case.
+// ToLower treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their lower case.
func ToLower(s []byte) []byte { return Map(unicode.ToLower, s) }
-// ToTitle returns a copy of the byte slice s with all Unicode letters mapped to their title case.
+// ToTitle treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their title case.
func ToTitle(s []byte) []byte { return Map(unicode.ToTitle, s) }
-// ToUpperSpecial returns a copy of the byte slice s with all Unicode letters mapped to their
+// ToUpperSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
// upper case, giving priority to the special casing rules.
func ToUpperSpecial(c unicode.SpecialCase, s []byte) []byte {
return Map(func(r rune) rune { return c.ToUpper(r) }, s)
}
-// ToLowerSpecial returns a copy of the byte slice s with all Unicode letters mapped to their
+// ToLowerSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
// lower case, giving priority to the special casing rules.
func ToLowerSpecial(c unicode.SpecialCase, s []byte) []byte {
return Map(func(r rune) rune { return c.ToLower(r) }, s)
}
-// ToTitleSpecial returns a copy of the byte slice s with all Unicode letters mapped to their
+// ToTitleSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
// title case, giving priority to the special casing rules.
func ToTitleSpecial(c unicode.SpecialCase, s []byte) []byte {
return Map(func(r rune) rune { return c.ToTitle(r) }, s)
@@ -460,8 +524,8 @@ func isSeparator(r rune) bool {
return unicode.IsSpace(r)
}
-// Title returns a copy of s with all Unicode letters that begin words
-// mapped to their title case.
+// Title treats s as UTF-8-encoded bytes and returns a copy with all Unicode letters that begin
+// words mapped to their title case.
//
// BUG(rsc): The rule Title uses for word boundaries does not handle Unicode punctuation properly.
func Title(s []byte) []byte {
@@ -481,8 +545,8 @@ func Title(s []byte) []byte {
s)
}
-// TrimLeftFunc returns a subslice of s by slicing off all leading UTF-8-encoded
-// Unicode code points c that satisfy f(c).
+// TrimLeftFunc treats s as UTF-8-encoded bytes and returns a subslice of s by slicing off
+// all leading UTF-8-encoded code points c that satisfy f(c).
func TrimLeftFunc(s []byte, f func(r rune) bool) []byte {
i := indexFunc(s, f, false)
if i == -1 {
@@ -491,8 +555,8 @@ func TrimLeftFunc(s []byte, f func(r rune) bool) []byte {
return s[i:]
}
-// TrimRightFunc returns a subslice of s by slicing off all trailing UTF-8
-// encoded Unicode code points c that satisfy f(c).
+// TrimRightFunc returns a subslice of s by slicing off all trailing
+// UTF-8-encoded code points c that satisfy f(c).
func TrimRightFunc(s []byte, f func(r rune) bool) []byte {
i := lastIndexFunc(s, f, false)
if i >= 0 && s[i] >= utf8.RuneSelf {
@@ -505,7 +569,7 @@ func TrimRightFunc(s []byte, f func(r rune) bool) []byte {
}
// TrimFunc returns a subslice of s by slicing off all leading and trailing
-// UTF-8-encoded Unicode code points c that satisfy f(c).
+// UTF-8-encoded code points c that satisfy f(c).
func TrimFunc(s []byte, f func(r rune) bool) []byte {
return TrimRightFunc(TrimLeftFunc(s, f), f)
}
@@ -528,14 +592,14 @@ func TrimSuffix(s, suffix []byte) []byte {
return s
}
-// IndexFunc interprets s as a sequence of UTF-8-encoded Unicode code points.
+// IndexFunc interprets s as a sequence of UTF-8-encoded code points.
// It returns the byte index in s of the first Unicode
// code point satisfying f(c), or -1 if none do.
func IndexFunc(s []byte, f func(r rune) bool) int {
return indexFunc(s, f, true)
}
-// LastIndexFunc interprets s as a sequence of UTF-8-encoded Unicode code points.
+// LastIndexFunc interprets s as a sequence of UTF-8-encoded code points.
// It returns the byte index in s of the last Unicode
// code point satisfying f(c), or -1 if none do.
func LastIndexFunc(s []byte, f func(r rune) bool) int {
@@ -626,19 +690,19 @@ func makeCutsetFunc(cutset string) func(r rune) bool {
}
// Trim returns a subslice of s by slicing off all leading and
-// trailing UTF-8-encoded Unicode code points contained in cutset.
+// trailing UTF-8-encoded code points contained in cutset.
func Trim(s []byte, cutset string) []byte {
return TrimFunc(s, makeCutsetFunc(cutset))
}
// TrimLeft returns a subslice of s by slicing off all leading
-// UTF-8-encoded Unicode code points contained in cutset.
+// UTF-8-encoded code points contained in cutset.
func TrimLeft(s []byte, cutset string) []byte {
return TrimLeftFunc(s, makeCutsetFunc(cutset))
}
// TrimRight returns a subslice of s by slicing off all trailing
-// UTF-8-encoded Unicode code points that are contained in cutset.
+// UTF-8-encoded code points that are contained in cutset.
func TrimRight(s []byte, cutset string) []byte {
return TrimRightFunc(s, makeCutsetFunc(cutset))
}
@@ -649,7 +713,8 @@ func TrimSpace(s []byte) []byte {
return TrimFunc(s, unicode.IsSpace)
}
-// Runes returns a slice of runes (Unicode code points) equivalent to s.
+// Runes interprets s as a sequence of UTF-8-encoded code points.
+// It returns a slice of runes (Unicode code points) equivalent to s.
func Runes(s []byte) []rune {
t := make([]rune, utf8.RuneCount(s))
i := 0
@@ -758,3 +823,46 @@ func EqualFold(s, t []byte) bool {
// One string is empty. Are both?
return len(s) == len(t)
}
+
+func indexRabinKarp(s, sep []byte) int {
+ // Rabin-Karp search
+ hashsep, pow := hashStr(sep)
+ n := len(sep)
+ var h uint32
+ for i := 0; i < n; i++ {
+ h = h*primeRK + uint32(s[i])
+ }
+ if h == hashsep && Equal(s[:n], sep) {
+ return 0
+ }
+ for i := n; i < len(s); {
+ h *= primeRK
+ h += uint32(s[i])
+ h -= pow * uint32(s[i-n])
+ i++
+ if h == hashsep && Equal(s[i-n:i], sep) {
+ return i - n
+ }
+ }
+ return -1
+}
+
+// primeRK is the prime base used in Rabin-Karp algorithm.
+const primeRK = 16777619
+
+// hashStr returns the hash and the appropriate multiplicative
+// factor for use in Rabin-Karp algorithm.
+func hashStr(sep []byte) (uint32, uint32) {
+ hash := uint32(0)
+ for i := 0; i < len(sep); i++ {
+ hash = hash*primeRK + uint32(sep[i])
+ }
+ var pow, sq uint32 = 1, primeRK
+ for i := len(sep); i > 0; i >>= 1 {
+ if i&1 != 0 {
+ pow *= sq
+ }
+ sq *= sq
+ }
+ return hash, pow
+}
diff --git a/src/bytes/bytes_amd64.go b/src/bytes/bytes_amd64.go
index 77d5970152a..0c9d613ef9d 100644
--- a/src/bytes/bytes_amd64.go
+++ b/src/bytes/bytes_amd64.go
@@ -75,52 +75,14 @@ func Index(s, sep []byte) int {
}
return -1
}
- // Rabin-Karp search
- hashsep, pow := hashStr(sep)
- var h uint32
- for i := 0; i < n; i++ {
- h = h*primeRK + uint32(s[i])
- }
- if h == hashsep && Equal(s[:n], sep) {
- return 0
- }
- for i := n; i < len(s); {
- h *= primeRK
- h += uint32(s[i])
- h -= pow * uint32(s[i-n])
- i++
- if h == hashsep && Equal(s[i-n:i], sep) {
- return i - n
- }
- }
- return -1
+ return indexRabinKarp(s, sep)
}
// Count counts the number of non-overlapping instances of sep in s.
-// If sep is an empty slice, Count returns 1 + the number of Unicode code points in s.
+// If sep is an empty slice, Count returns 1 + the number of UTF-8-encoded code points in s.
func Count(s, sep []byte) int {
if len(sep) == 1 && cpu.X86.HasPOPCNT {
return countByte(s, sep[0])
}
return countGeneric(s, sep)
}
-
-// primeRK is the prime base used in Rabin-Karp algorithm.
-const primeRK = 16777619
-
-// hashStr returns the hash and the appropriate multiplicative
-// factor for use in Rabin-Karp algorithm.
-func hashStr(sep []byte) (uint32, uint32) {
- hash := uint32(0)
- for i := 0; i < len(sep); i++ {
- hash = hash*primeRK + uint32(sep[i])
- }
- var pow, sq uint32 = 1, primeRK
- for i := len(sep); i > 0; i >>= 1 {
- if i&1 != 0 {
- pow *= sq
- }
- sq *= sq
- }
- return hash, pow
-}
diff --git a/src/bytes/bytes_arm64.go b/src/bytes/bytes_arm64.go
new file mode 100644
index 00000000000..846eeba486a
--- /dev/null
+++ b/src/bytes/bytes_arm64.go
@@ -0,0 +1,68 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bytes
+
+func countByte(s []byte, c byte) int // bytes_arm64.s
+
+// Index returns the index of the first instance of sep in s, or -1 if sep is not present in s.
+func Index(s, sep []byte) int {
+ n := len(sep)
+ switch {
+ case n == 0:
+ return 0
+ case n == 1:
+ return IndexByte(s, sep[0])
+ case n == len(s):
+ if Equal(sep, s) {
+ return 0
+ }
+ return -1
+ case n > len(s):
+ return -1
+ }
+ c := sep[0]
+ i := 0
+ fails := 0
+ t := s[:len(s)-n+1]
+ for i < len(t) {
+ if t[i] != c {
+ o := IndexByte(t[i:], c)
+ if o < 0 {
+ break
+ }
+ i += o
+ }
+ if Equal(s[i:i+n], sep) {
+ return i
+ }
+ i++
+ fails++
+ if fails >= 4+i>>4 && i < len(t) {
+ // Give up on IndexByte, it isn't skipping ahead
+ // far enough to be better than Rabin-Karp.
+ // Experiments (using IndexPeriodic) suggest
+ // the cutover is about 16 byte skips.
+ // TODO: if large prefixes of sep are matching
+ // we should cutover at even larger average skips,
+ // because Equal becomes that much more expensive.
+ // This code does not take that effect into account.
+ j := indexRabinKarp(s[i:], sep)
+ if j < 0 {
+ return -1
+ }
+ return i + j
+ }
+ }
+ return -1
+}
+
+// Count counts the number of non-overlapping instances of sep in s.
+// If sep is an empty slice, Count returns 1 + the number of UTF-8-encoded code points in s.
+func Count(s, sep []byte) int {
+ if len(sep) == 1 {
+ return countByte(s, sep[0])
+ }
+ return countGeneric(s, sep)
+}
diff --git a/src/bytes/bytes_arm64.s b/src/bytes/bytes_arm64.s
new file mode 100644
index 00000000000..5e229d772be
--- /dev/null
+++ b/src/bytes/bytes_arm64.s
@@ -0,0 +1,74 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// countByte(s []byte, c byte) int
+TEXT bytes·countByte(SB),NOSPLIT,$0-40
+ MOVD s_base+0(FP), R0
+ MOVD s_len+8(FP), R2
+ MOVBU c+24(FP), R1
+ // R11 = count of byte to search
+ MOVD $0, R11
+ // short path to handle 0-byte case
+ CBZ R2, done
+ CMP $0x20, R2
+ // jump directly to tail if length < 32
+ BLO tail
+ ANDS $0x1f, R0, R9
+ BEQ chunk
+ // Work with not 32-byte aligned head
+ BIC $0x1f, R0, R3
+ ADD $0x20, R3
+head_loop:
+ MOVBU.P 1(R0), R5
+ CMP R5, R1
+ CINC EQ, R11, R11
+ SUB $1, R2, R2
+ CMP R0, R3
+ BNE head_loop
+ // Work with 32-byte aligned chunks
+chunk:
+ BIC $0x1f, R2, R9
+ // The first chunk can also be the last
+ CBZ R9, tail
+ // R3 = end of 32-byte chunks
+ ADD R0, R9, R3
+ MOVD $1, R5
+ VMOV R5, V5.B16
+ // R2 = length of tail
+ SUB R9, R2, R2
+ // Duplicate R1 (byte to search) to 16 1-byte elements of V0
+ VMOV R1, V0.B16
+ // Clear the low 64-bit element of V7 and V8
+ VEOR V7.B8, V7.B8, V7.B8
+ VEOR V8.B8, V8.B8, V8.B8
+ // Count the target byte in 32-byte chunk
+chunk_loop:
+ VLD1.P (R0), [V1.B16, V2.B16]
+ CMP R0, R3
+ VCMEQ V0.B16, V1.B16, V3.B16
+ VCMEQ V0.B16, V2.B16, V4.B16
+ // Clear the higher 7 bits
+ VAND V5.B16, V3.B16, V3.B16
+ VAND V5.B16, V4.B16, V4.B16
+ // Count lanes match the requested byte
+ VADDP V4.B16, V3.B16, V6.B16 // 32B->16B
+ VUADDLV V6.B16, V7
+ // Accumulate the count in low 64-bit element of V8 when inside the loop
+ VADD V7, V8
+ BNE chunk_loop
+ VMOV V8.D[0], R6
+ ADD R6, R11, R11
+ CBZ R2, done
+tail:
+ // Work with tail shorter than 32 bytes
+ MOVBU.P 1(R0), R5
+ SUB $1, R2, R2
+ CMP R5, R1
+ CINC EQ, R11, R11
+ CBNZ R2, tail
+done:
+ MOVD R11, ret+32(FP)
+ RET
diff --git a/src/bytes/bytes_generic.go b/src/bytes/bytes_generic.go
index 98454bc121b..0e7d33f09ad 100644
--- a/src/bytes/bytes_generic.go
+++ b/src/bytes/bytes_generic.go
@@ -2,27 +2,29 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build !amd64,!s390x
+// +build !amd64,!s390x,!arm64
package bytes
-// TODO: implements short string optimization on non amd64 platforms
-// and get rid of bytes_amd64.go
-
// Index returns the index of the first instance of sep in s, or -1 if sep is not present in s.
func Index(s, sep []byte) int {
n := len(sep)
- if n == 0 {
+ switch {
+ case n == 0:
return 0
- }
- if n > len(s) {
+ case n == 1:
+ return IndexByte(s, sep[0])
+ case n == len(s):
+ if Equal(sep, s) {
+ return 0
+ }
+ return -1
+ case n > len(s):
return -1
}
c := sep[0]
- if n == 1 {
- return IndexByte(s, c)
- }
i := 0
+ fails := 0
t := s[:len(s)-n+1]
for i < len(t) {
if t[i] != c {
@@ -36,12 +38,28 @@ func Index(s, sep []byte) int {
return i
}
i++
+ fails++
+ if fails >= 4+i>>4 && i < len(t) {
+ // Give up on IndexByte, it isn't skipping ahead
+ // far enough to be better than Rabin-Karp.
+ // Experiments (using IndexPeriodic) suggest
+ // the cutover is about 16 byte skips.
+ // TODO: if large prefixes of sep are matching
+ // we should cutover at even larger average skips,
+ // because Equal becomes that much more expensive.
+ // This code does not take that effect into account.
+ j := indexRabinKarp(s[i:], sep)
+ if j < 0 {
+ return -1
+ }
+ return i + j
+ }
}
return -1
}
// Count counts the number of non-overlapping instances of sep in s.
-// If sep is an empty slice, Count returns 1 + the number of Unicode code points in s.
+// If sep is an empty slice, Count returns 1 + the number of UTF-8-encoded code points in s.
func Count(s, sep []byte) int {
return countGeneric(s, sep)
}
diff --git a/src/bytes/bytes_s390x.go b/src/bytes/bytes_s390x.go
index 68b57301fe8..c59b891292f 100644
--- a/src/bytes/bytes_s390x.go
+++ b/src/bytes/bytes_s390x.go
@@ -76,49 +76,11 @@ func Index(s, sep []byte) int {
}
return -1
}
- // Rabin-Karp search
- hashsep, pow := hashStr(sep)
- var h uint32
- for i := 0; i < n; i++ {
- h = h*primeRK + uint32(s[i])
- }
- if h == hashsep && Equal(s[:n], sep) {
- return 0
- }
- for i := n; i < len(s); {
- h *= primeRK
- h += uint32(s[i])
- h -= pow * uint32(s[i-n])
- i++
- if h == hashsep && Equal(s[i-n:i], sep) {
- return i - n
- }
- }
- return -1
+ return indexRabinKarp(s, sep)
}
// Count counts the number of non-overlapping instances of sep in s.
-// If sep is an empty slice, Count returns 1 + the number of Unicode code points in s.
+// If sep is an empty slice, Count returns 1 + the number of UTF-8-encoded code points in s.
func Count(s, sep []byte) int {
return countGeneric(s, sep)
}
-
-// primeRK is the prime base used in Rabin-Karp algorithm.
-const primeRK = 16777619
-
-// hashStr returns the hash and the appropriate multiplicative
-// factor for use in Rabin-Karp algorithm.
-func hashStr(sep []byte) (uint32, uint32) {
- hash := uint32(0)
- for i := 0; i < len(sep); i++ {
- hash = hash*primeRK + uint32(sep[i])
- }
- var pow, sq uint32 = 1, primeRK
- for i := len(sep); i > 0; i >>= 1 {
- if i&1 != 0 {
- pow *= sq
- }
- sq *= sq
- }
- return hash, pow
-}
diff --git a/src/bytes/bytes_test.go b/src/bytes/bytes_test.go
index ca0cdbb7c9f..1e56571c738 100644
--- a/src/bytes/bytes_test.go
+++ b/src/bytes/bytes_test.go
@@ -139,6 +139,9 @@ var indexTests = []BinOpTest{
{"barfoobarfooyyyzzzyyyzzzyyyzzzyyyxxxzzzyyy", "x", 33},
{"foofyfoobarfoobar", "y", 4},
{"oooooooooooooooooooooo", "r", -1},
+ // test fallback to Rabin-Karp.
+ {"oxoxoxoxoxoxoxoxoxoxoxoy", "oy", 22},
+ {"oxoxoxoxoxoxoxoxoxoxoxox", "oy", -1},
}
var lastIndexTests = []BinOpTest{
@@ -736,6 +739,13 @@ var splittests = []SplitTest{
func TestSplit(t *testing.T) {
for _, tt := range splittests {
a := SplitN([]byte(tt.s), []byte(tt.sep), tt.n)
+
+ // Appending to the results should not change future results.
+ var x []byte
+ for _, v := range a {
+ x = append(v, 'z')
+ }
+
result := sliceOfString(a)
if !eq(result, tt.a) {
t.Errorf(`Split(%q, %q, %d) = %v; want %v`, tt.s, tt.sep, tt.n, result, tt.a)
@@ -744,6 +754,11 @@ func TestSplit(t *testing.T) {
if tt.n == 0 {
continue
}
+
+ if want := tt.a[len(tt.a)-1] + "z"; string(x) != want {
+ t.Errorf("last appended result was %s; want %s", x, want)
+ }
+
s := Join(a, []byte(tt.sep))
if string(s) != tt.s {
t.Errorf(`Join(Split(%q, %q, %d), %q) = %q`, tt.s, tt.sep, tt.n, tt.sep, s)
@@ -782,11 +797,23 @@ var splitaftertests = []SplitTest{
func TestSplitAfter(t *testing.T) {
for _, tt := range splitaftertests {
a := SplitAfterN([]byte(tt.s), []byte(tt.sep), tt.n)
+
+ // Appending to the results should not change future results.
+ var x []byte
+ for _, v := range a {
+ x = append(v, 'z')
+ }
+
result := sliceOfString(a)
if !eq(result, tt.a) {
t.Errorf(`Split(%q, %q, %d) = %v; want %v`, tt.s, tt.sep, tt.n, result, tt.a)
continue
}
+
+ if want := tt.a[len(tt.a)-1] + "z"; string(x) != want {
+ t.Errorf("last appended result was %s; want %s", x, want)
+ }
+
s := Join(a, nil)
if string(s) != tt.s {
t.Errorf(`Join(Split(%q, %q, %d), %q) = %q`, tt.s, tt.sep, tt.n, tt.sep, s)
@@ -821,12 +848,29 @@ var fieldstests = []FieldsTest{
func TestFields(t *testing.T) {
for _, tt := range fieldstests {
- a := Fields([]byte(tt.s))
+ b := []byte(tt.s)
+ a := Fields(b)
+
+ // Appending to the results should not change future results.
+ var x []byte
+ for _, v := range a {
+ x = append(v, 'z')
+ }
+
result := sliceOfString(a)
if !eq(result, tt.a) {
t.Errorf("Fields(%q) = %v; want %v", tt.s, a, tt.a)
continue
}
+
+ if string(b) != tt.s {
+ t.Errorf("slice changed to %s; want %s", string(b), tt.s)
+ }
+ if len(tt.a) > 0 {
+ if want := tt.a[len(tt.a)-1] + "z"; string(x) != want {
+ t.Errorf("last appended result was %s; want %s", x, want)
+ }
+ }
}
}
@@ -847,11 +891,28 @@ func TestFieldsFunc(t *testing.T) {
{"aXXbXXXcX", []string{"a", "b", "c"}},
}
for _, tt := range fieldsFuncTests {
- a := FieldsFunc([]byte(tt.s), pred)
+ b := []byte(tt.s)
+ a := FieldsFunc(b, pred)
+
+ // Appending to the results should not change future results.
+ var x []byte
+ for _, v := range a {
+ x = append(v, 'z')
+ }
+
result := sliceOfString(a)
if !eq(result, tt.a) {
t.Errorf("FieldsFunc(%q) = %v, want %v", tt.s, a, tt.a)
}
+
+ if string(b) != tt.s {
+ t.Errorf("slice changed to %s; want %s", b, tt.s)
+ }
+ if len(tt.a) > 0 {
+ if want := tt.a[len(tt.a)-1] + "z"; string(x) != want {
+ t.Errorf("last appended result was %s; want %s", x, want)
+ }
+ }
}
}
@@ -1502,19 +1563,58 @@ var makeFieldsInput = func() []byte {
return x
}
-var fieldsInput = makeFieldsInput()
+var makeFieldsInputASCII = func() []byte {
+ x := make([]byte, 1<<20)
+ // Input is ~10% space, rest ASCII non-space.
+ for i := range x {
+ if rand.Intn(10) == 0 {
+ x[i] = ' '
+ } else {
+ x[i] = 'x'
+ }
+ }
+ return x
+}
+
+var bytesdata = []struct {
+ name string
+ data []byte
+}{
+ {"ASCII", makeFieldsInputASCII()},
+ {"Mixed", makeFieldsInput()},
+}
func BenchmarkFields(b *testing.B) {
- b.SetBytes(int64(len(fieldsInput)))
- for i := 0; i < b.N; i++ {
- Fields(fieldsInput)
+ for _, sd := range bytesdata {
+ b.Run(sd.name, func(b *testing.B) {
+ for j := 1 << 4; j <= 1<<20; j <<= 4 {
+ b.Run(fmt.Sprintf("%d", j), func(b *testing.B) {
+ b.ReportAllocs()
+ b.SetBytes(int64(j))
+ data := sd.data[:j]
+ for i := 0; i < b.N; i++ {
+ Fields(data)
+ }
+ })
+ }
+ })
}
}
func BenchmarkFieldsFunc(b *testing.B) {
- b.SetBytes(int64(len(fieldsInput)))
- for i := 0; i < b.N; i++ {
- FieldsFunc(fieldsInput, unicode.IsSpace)
+ for _, sd := range bytesdata {
+ b.Run(sd.name, func(b *testing.B) {
+ for j := 1 << 4; j <= 1<<20; j <<= 4 {
+ b.Run(fmt.Sprintf("%d", j), func(b *testing.B) {
+ b.ReportAllocs()
+ b.SetBytes(int64(j))
+ data := sd.data[:j]
+ for i := 0; i < b.N; i++ {
+ FieldsFunc(data, unicode.IsSpace)
+ }
+ })
+ }
+ })
}
}
@@ -1633,3 +1733,18 @@ func BenchmarkTrimASCII(b *testing.B) {
}
}
}
+
+func BenchmarkIndexPeriodic(b *testing.B) {
+ key := []byte{1, 1}
+ for _, skip := range [...]int{2, 4, 8, 16, 32, 64} {
+ b.Run(fmt.Sprintf("IndexPeriodic%d", skip), func(b *testing.B) {
+ buf := make([]byte, 1<<16)
+ for i := 0; i < len(buf); i += skip {
+ buf[i] = 1
+ }
+ for i := 0; i < b.N; i++ {
+ Index(buf, key)
+ }
+ })
+ }
+}
diff --git a/src/bytes/equal_test.go b/src/bytes/equal_test.go
deleted file mode 100644
index 9fdead8a604..00000000000
--- a/src/bytes/equal_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-//
-// +build linux
-
-package bytes_test
-
-import (
- . "bytes"
- "syscall"
- "testing"
- "unsafe"
-)
-
-// This file tests the situation where memeq is checking
-// data very near to a page boundary. We want to make sure
-// equal does not read across the boundary and cause a page
-// fault where it shouldn't.
-
-// This test runs only on linux. The code being tested is
-// not OS-specific, so it does not need to be tested on all
-// operating systems.
-
-func TestEqualNearPageBoundary(t *testing.T) {
- pagesize := syscall.Getpagesize()
- b := make([]byte, 4*pagesize)
- i := pagesize
- for ; uintptr(unsafe.Pointer(&b[i]))%uintptr(pagesize) != 0; i++ {
- }
- syscall.Mprotect(b[i-pagesize:i], 0)
- syscall.Mprotect(b[i+pagesize:i+2*pagesize], 0)
- defer syscall.Mprotect(b[i-pagesize:i], syscall.PROT_READ|syscall.PROT_WRITE)
- defer syscall.Mprotect(b[i+pagesize:i+2*pagesize], syscall.PROT_READ|syscall.PROT_WRITE)
-
- // both of these should fault
- //pagesize += int(b[i-1])
- //pagesize += int(b[i+pagesize])
-
- for j := 0; j < pagesize; j++ {
- b[i+j] = 'A'
- }
- for j := 0; j <= pagesize; j++ {
- Equal(b[i:i+j], b[i+pagesize-j:i+pagesize])
- Equal(b[i+pagesize-j:i+pagesize], b[i:i+j])
- }
-}
diff --git a/src/bytes/example_test.go b/src/bytes/example_test.go
index 93972770ab2..5b7a46058f5 100644
--- a/src/bytes/example_test.go
+++ b/src/bytes/example_test.go
@@ -119,6 +119,32 @@ func ExampleContains() {
// true
}
+func ExampleContainsAny() {
+ fmt.Println(bytes.ContainsAny([]byte("I like seafood."), "fÄo!"))
+ fmt.Println(bytes.ContainsAny([]byte("I like seafood."), "去是伟大的."))
+ fmt.Println(bytes.ContainsAny([]byte("I like seafood."), ""))
+ fmt.Println(bytes.ContainsAny([]byte(""), ""))
+ // Output:
+ // true
+ // true
+ // false
+ // false
+}
+
+func ExampleContainsRune() {
+ fmt.Println(bytes.ContainsRune([]byte("I like seafood."), 'f'))
+ fmt.Println(bytes.ContainsRune([]byte("I like seafood."), 'ö'))
+ fmt.Println(bytes.ContainsRune([]byte("去是伟大的!"), '大'))
+ fmt.Println(bytes.ContainsRune([]byte("去是伟大的!"), '!'))
+ fmt.Println(bytes.ContainsRune([]byte(""), '@'))
+ // Output:
+ // true
+ // false
+ // true
+ // true
+ // false
+}
+
func ExampleCount() {
fmt.Println(bytes.Count([]byte("cheese"), []byte("e")))
fmt.Println(bytes.Count([]byte("five"), []byte(""))) // before & after each rune
@@ -127,6 +153,14 @@ func ExampleCount() {
// 5
}
+func ExampleEqual() {
+ fmt.Println(bytes.Equal([]byte("Go"), []byte("Go")))
+ fmt.Println(bytes.Equal([]byte("Go"), []byte("C++")))
+ // Output:
+ // true
+ // false
+}
+
func ExampleEqualFold() {
fmt.Println(bytes.EqualFold([]byte("Go"), []byte("go")))
// Output: true
@@ -162,6 +196,14 @@ func ExampleIndex() {
// -1
}
+func ExampleIndexByte() {
+ fmt.Println(bytes.IndexByte([]byte("chicken"), byte('k')))
+ fmt.Println(bytes.IndexByte([]byte("chicken"), byte('g')))
+ // Output:
+ // 4
+ // -1
+}
+
func ExampleIndexFunc() {
f := func(c rune) bool {
return unicode.Is(unicode.Han, c)
@@ -199,6 +241,36 @@ func ExampleLastIndex() {
// -1
}
+func ExampleLastIndexAny() {
+ fmt.Println(bytes.LastIndexAny([]byte("go gopher"), "MüQp"))
+ fmt.Println(bytes.LastIndexAny([]byte("go 地鼠"), "地大"))
+ fmt.Println(bytes.LastIndexAny([]byte("go gopher"), "z,!."))
+ // Output:
+ // 5
+ // 3
+ // -1
+}
+
+func ExampleLastIndexByte() {
+ fmt.Println(bytes.LastIndexByte([]byte("go gopher"), byte('g')))
+ fmt.Println(bytes.LastIndexByte([]byte("go gopher"), byte('r')))
+ fmt.Println(bytes.LastIndexByte([]byte("go gopher"), byte('z')))
+ // Output:
+ // 3
+ // 8
+ // -1
+}
+
+func ExampleLastIndexFunc() {
+ fmt.Println(bytes.LastIndexFunc([]byte("go gopher!"), unicode.IsLetter))
+ fmt.Println(bytes.LastIndexFunc([]byte("go gopher!"), unicode.IsPunct))
+ fmt.Println(bytes.LastIndexFunc([]byte("go gopher!"), unicode.IsNumber))
+ // Output:
+ // 8
+ // 9
+ // -1
+}
+
func ExampleJoin() {
s := [][]byte{[]byte("foo"), []byte("bar"), []byte("baz")}
fmt.Printf("%s", bytes.Join(s, []byte(", ")))
@@ -218,6 +290,23 @@ func ExampleReplace() {
// moo moo moo
}
+func ExampleRunes() {
+ rs := bytes.Runes([]byte("go gopher"))
+ for _, r := range rs {
+ fmt.Printf("%#U\n", r)
+ }
+ // Output:
+ // U+0067 'g'
+ // U+006F 'o'
+ // U+0020 ' '
+ // U+0067 'g'
+ // U+006F 'o'
+ // U+0070 'p'
+ // U+0068 'h'
+ // U+0065 'e'
+ // U+0072 'r'
+}
+
func ExampleSplit() {
fmt.Printf("%q\n", bytes.Split([]byte("a,b,c"), []byte(",")))
fmt.Printf("%q\n", bytes.Split([]byte("a man a plan a canal panama"), []byte("a ")))
@@ -267,6 +356,18 @@ func ExampleTrim() {
// Output: ["Achtung! Achtung"]
}
+func ExampleTrimFunc() {
+ fmt.Println(string(bytes.TrimFunc([]byte("go-gopher!"), unicode.IsLetter)))
+ fmt.Println(string(bytes.TrimFunc([]byte("\"go-gopher!\""), unicode.IsLetter)))
+ fmt.Println(string(bytes.TrimFunc([]byte("go-gopher!"), unicode.IsPunct)))
+ fmt.Println(string(bytes.TrimFunc([]byte("1234go-gopher!567"), unicode.IsNumber)))
+ // Output:
+ // -gopher!
+ // "go-gopher!"
+ // go-gopher
+ // go-gopher!
+}
+
func ExampleMap() {
rot13 := func(r rune) rune {
switch {
@@ -281,11 +382,43 @@ func ExampleMap() {
// Output: 'Gjnf oevyyvt naq gur fyvgul tbcure...
}
+func ExampleTrimLeft() {
+ fmt.Print(string(bytes.TrimLeft([]byte("453gopher8257"), "0123456789")))
+ // Output:
+ // gopher8257
+}
+
+func ExampleTrimLeftFunc() {
+ fmt.Println(string(bytes.TrimLeftFunc([]byte("go-gopher"), unicode.IsLetter)))
+ fmt.Println(string(bytes.TrimLeftFunc([]byte("go-gopher!"), unicode.IsPunct)))
+ fmt.Println(string(bytes.TrimLeftFunc([]byte("1234go-gopher!567"), unicode.IsNumber)))
+ // Output:
+ // -gopher
+ // go-gopher!
+ // go-gopher!567
+}
+
func ExampleTrimSpace() {
fmt.Printf("%s", bytes.TrimSpace([]byte(" \t\n a lone gopher \n\t\r\n")))
// Output: a lone gopher
}
+func ExampleTrimRight() {
+ fmt.Print(string(bytes.TrimRight([]byte("453gopher8257"), "0123456789")))
+ // Output:
+ // 453gopher
+}
+
+func ExampleTrimRightFunc() {
+ fmt.Println(string(bytes.TrimRightFunc([]byte("go-gopher"), unicode.IsLetter)))
+ fmt.Println(string(bytes.TrimRightFunc([]byte("go-gopher!"), unicode.IsPunct)))
+ fmt.Println(string(bytes.TrimRightFunc([]byte("1234go-gopher!567"), unicode.IsNumber)))
+ // Output:
+ // go-
+ // go-gopher
+ // 1234go-gopher!
+}
+
func ExampleToUpper() {
fmt.Printf("%s", bytes.ToUpper([]byte("Gopher")))
// Output: GOPHER
@@ -295,3 +428,11 @@ func ExampleToLower() {
fmt.Printf("%s", bytes.ToLower([]byte("Gopher")))
// Output: gopher
}
+
+func ExampleReader_Len() {
+ fmt.Println(bytes.NewReader([]byte("Hi!")).Len())
+ fmt.Println(bytes.NewReader([]byte("こんにちは!")).Len())
+ // Output:
+ // 3
+ // 16
+}
diff --git a/src/bytes/reader.go b/src/bytes/reader.go
index 28cfc7a9788..08464c2402d 100644
--- a/src/bytes/reader.go
+++ b/src/bytes/reader.go
@@ -35,6 +35,7 @@ func (r *Reader) Len() int {
// to any other method.
func (r *Reader) Size() int64 { return int64(len(r.s)) }
+// Read implements the io.Reader interface.
func (r *Reader) Read(b []byte) (n int, err error) {
if r.i >= int64(len(r.s)) {
return 0, io.EOF
@@ -45,6 +46,7 @@ func (r *Reader) Read(b []byte) (n int, err error) {
return
}
+// ReadAt implements the io.ReaderAt interface.
func (r *Reader) ReadAt(b []byte, off int64) (n int, err error) {
// cannot modify state - see io.ReaderAt
if off < 0 {
@@ -60,6 +62,7 @@ func (r *Reader) ReadAt(b []byte, off int64) (n int, err error) {
return
}
+// ReadByte implements the io.ByteReader interface.
func (r *Reader) ReadByte() (byte, error) {
r.prevRune = -1
if r.i >= int64(len(r.s)) {
@@ -70,6 +73,7 @@ func (r *Reader) ReadByte() (byte, error) {
return b, nil
}
+// UnreadByte complements ReadByte in implementing the io.ByteScanner interface.
func (r *Reader) UnreadByte() error {
r.prevRune = -1
if r.i <= 0 {
@@ -79,6 +83,7 @@ func (r *Reader) UnreadByte() error {
return nil
}
+// ReadRune implements the io.RuneReader interface.
func (r *Reader) ReadRune() (ch rune, size int, err error) {
if r.i >= int64(len(r.s)) {
r.prevRune = -1
@@ -94,6 +99,7 @@ func (r *Reader) ReadRune() (ch rune, size int, err error) {
return
}
+// UnreadRune complements ReadRune in implementing the io.RuneScanner interface.
func (r *Reader) UnreadRune() error {
if r.prevRune < 0 {
return errors.New("bytes.Reader.UnreadRune: previous operation was not ReadRune")
diff --git a/src/bytes/reader_test.go b/src/bytes/reader_test.go
index 7b3034d4e0d..8806876ff13 100644
--- a/src/bytes/reader_test.go
+++ b/src/bytes/reader_test.go
@@ -140,9 +140,9 @@ func TestReaderWriteTo(t *testing.T) {
for i := 0; i < 30; i += 3 {
var l int
if i > 0 {
- l = len(data) / i
+ l = len(testString) / i
}
- s := data[:l]
+ s := testString[:l]
r := NewReader(testBytes[:l])
var b Buffer
n, err := r.WriteTo(&b)
diff --git a/src/cmd/api/goapi.go b/src/cmd/api/goapi.go
index 936f9e55115..8cc78c01ed4 100644
--- a/src/cmd/api/goapi.go
+++ b/src/cmd/api/goapi.go
@@ -27,6 +27,18 @@ import (
"strings"
)
+func goCmd() string {
+ var exeSuffix string
+ if runtime.GOOS == "windows" {
+ exeSuffix = ".exe"
+ }
+ path := filepath.Join(runtime.GOROOT(), "bin", "go"+exeSuffix)
+ if _, err := os.Stat(path); err == nil {
+ return path
+ }
+ return "go"
+}
+
// Flags
var (
checkFile = flag.String("c", "", "optional comma-separated filename(s) to check API against")
@@ -127,7 +139,7 @@ func main() {
if flag.NArg() > 0 {
pkgNames = flag.Args()
} else {
- stds, err := exec.Command("go", "list", "std").Output()
+ stds, err := exec.Command(goCmd(), "list", "std").Output()
if err != nil {
log.Fatal(err)
}
diff --git a/src/cmd/api/goapi_test.go b/src/cmd/api/goapi_test.go
index 0d00f6a2977..3c4e50a21a3 100644
--- a/src/cmd/api/goapi_test.go
+++ b/src/cmd/api/goapi_test.go
@@ -9,6 +9,7 @@ import (
"flag"
"fmt"
"go/build"
+ "internal/testenv"
"io/ioutil"
"os"
"os/exec"
@@ -163,7 +164,7 @@ func TestSkipInternal(t *testing.T) {
}
func BenchmarkAll(b *testing.B) {
- stds, err := exec.Command("go", "list", "std").Output()
+ stds, err := exec.Command(testenv.GoToolPath(b), "list", "std").Output()
if err != nil {
b.Fatal(err)
}
diff --git a/src/cmd/api/run.go b/src/cmd/api/run.go
index 20cddb704bf..219776cae40 100644
--- a/src/cmd/api/run.go
+++ b/src/cmd/api/run.go
@@ -14,8 +14,21 @@ import (
"os"
"os/exec"
"path/filepath"
+ "runtime"
)
+func goCmd() string {
+ var exeSuffix string
+ if runtime.GOOS == "windows" {
+ exeSuffix = ".exe"
+ }
+ path := filepath.Join(runtime.GOROOT(), "bin", "go"+exeSuffix)
+ if _, err := os.Stat(path); err == nil {
+ return path
+ }
+ return "go"
+}
+
var goroot string
func main() {
@@ -25,7 +38,7 @@ func main() {
log.Fatal("No $GOROOT set.")
}
- out, err := exec.Command("go", "tool", "api",
+ out, err := exec.Command(goCmd(), "tool", "api",
"-c", file("go1", "go1.1", "go1.2", "go1.3", "go1.4", "go1.5", "go1.6", "go1.7", "go1.8", "go1.9"),
"-next", file("next"),
"-except", file("except")).CombinedOutput()
diff --git a/src/cmd/asm/doc.go b/src/cmd/asm/doc.go
index aa621479579..c39cab3c195 100644
--- a/src/cmd/asm/doc.go
+++ b/src/cmd/asm/doc.go
@@ -19,23 +19,26 @@ The GOOS and GOARCH environment variables set the desired target.
Flags:
- -D value
- predefined symbol with optional simple value -D=identifier=value;
- can be set multiple times
- -I value
- include directory; can be set multiple times
- -S print assembly and machine code
+ -D name[=value]
+ Predefine symbol name with an optional simple value.
+ Can be repeated to define multiple symbols.
+ -I dir1 -I dir2
+ Search for #include files in dir1, dir2, etc,
+ after consulting $GOROOT/pkg/$GOOS_$GOARCH.
+ -S
+ Print assembly and machine code.
+ -V
+ Print assembler version and exit.
-debug
- dump instructions as they are parsed
+ Dump instructions as they are parsed.
-dynlink
- support references to Go symbols defined in other shared libraries
- -o string
- output file; default foo.o for /a/b/c/foo.s
+ Support references to Go symbols defined in other shared libraries.
+ -o file
+ Write output to file. The default is foo.o for /a/b/c/foo.s.
-shared
- generate code that can be linked into a shared library
- -trimpath string
- remove prefix from recorded source file paths
-
+ Generate code that can be linked into a shared library.
+ -trimpath prefix
+ Remove prefix from recorded source file paths.
Input language:
The assembler uses mostly the same syntax for all architectures,
diff --git a/src/cmd/asm/internal/arch/amd64.go b/src/cmd/asm/internal/arch/amd64.go
deleted file mode 100644
index ff20d32daaa..00000000000
--- a/src/cmd/asm/internal/arch/amd64.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file encapsulates some of the odd characteristics of the
-// AMD64 instruction set, to minimize its interaction
-// with the core of the assembler.
-
-package arch
-
-import (
- "cmd/internal/obj"
- "cmd/internal/obj/x86"
-)
-
-// IsAMD4OP reports whether the op (as defined by an amd64.A* constant) is
-// a 4-operand instruction.
-func IsAMD4OP(op obj.As) bool {
- switch op {
- case x86.AVPERM2F128,
- x86.AVPALIGNR,
- x86.AVPERM2I128,
- x86.AVINSERTI128,
- x86.AVPBLENDD:
- return true
- }
- return false
-}
diff --git a/src/cmd/asm/internal/arch/arm.go b/src/cmd/asm/internal/arch/arm.go
index 40443d5ecaf..6e86ac0fbe2 100644
--- a/src/cmd/asm/internal/arch/arm.go
+++ b/src/cmd/asm/internal/arch/arm.go
@@ -122,6 +122,16 @@ func IsARMMRC(op obj.As) bool {
return false
}
+// IsARMBFX reports whether the op (as defined by an arm.A* constant) is one the
+// BFX-like instructions which are in the form of "op $width, $LSB, (Reg,) Reg".
+func IsARMBFX(op obj.As) bool {
+ switch op {
+ case arm.ABFX, arm.ABFXU, arm.ABFC, arm.ABFI:
+ return true
+ }
+ return false
+}
+
// IsARMFloatCmp reports whether the op is a floating comparison instruction.
func IsARMFloatCmp(op obj.As) bool {
switch op {
diff --git a/src/cmd/asm/internal/arch/arm64.go b/src/cmd/asm/internal/arch/arm64.go
index dd04719451e..2fd21b58b8f 100644
--- a/src/cmd/asm/internal/arch/arm64.go
+++ b/src/cmd/asm/internal/arch/arm64.go
@@ -11,6 +11,7 @@ package arch
import (
"cmd/internal/obj"
"cmd/internal/obj/arm64"
+ "errors"
)
var arm64LS = map[string]uint8{
@@ -56,7 +57,9 @@ func jumpArm64(word string) bool {
func IsARM64CMP(op obj.As) bool {
switch op {
case arm64.ACMN, arm64.ACMP, arm64.ATST,
- arm64.ACMNW, arm64.ACMPW, arm64.ATSTW:
+ arm64.ACMNW, arm64.ACMPW, arm64.ATSTW,
+ arm64.AFCMPS, arm64.AFCMPD,
+ arm64.AFCMPES, arm64.AFCMPED:
return true
}
return false
@@ -67,7 +70,8 @@ func IsARM64CMP(op obj.As) bool {
// handling.
func IsARM64STLXR(op obj.As) bool {
switch op {
- case arm64.ASTLXRB, arm64.ASTLXRH, arm64.ASTLXRW, arm64.ASTLXR:
+ case arm64.ASTLXRB, arm64.ASTLXRH, arm64.ASTLXRW, arm64.ASTLXR,
+ arm64.ASTXRB, arm64.ASTXRH, arm64.ASTXRW, arm64.ASTXR:
return true
}
return false
@@ -115,3 +119,162 @@ func arm64RegisterNumber(name string, n int16) (int16, bool) {
}
return 0, false
}
+
+// ARM64RegisterExtension parses an ARM64 register with extension or arrangment.
+func ARM64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, isIndex bool) error {
+ rm := uint32(reg)
+ switch ext {
+ case "UXTB":
+ if !isAmount {
+ return errors.New("invalid register extension")
+ }
+ a.Reg = arm64.REG_UXTB + (reg & 31) + int16(num<<5)
+ a.Offset = int64(((rm & 31) << 16) | (uint32(num) << 10))
+ case "UXTH":
+ if !isAmount {
+ return errors.New("invalid register extension")
+ }
+ a.Reg = arm64.REG_UXTH + (num & 31) + int16(num<<5)
+ a.Offset = int64(((rm & 31) << 16) | (1 << 13) | (uint32(num) << 10))
+ case "UXTW":
+ if !isAmount {
+ return errors.New("invalid register extension")
+ }
+ a.Reg = arm64.REG_UXTW + (reg & 31) + int16(num<<5)
+ a.Offset = int64(((rm & 31) << 16) | (2 << 13) | (uint32(num) << 10))
+ case "UXTX":
+ if !isAmount {
+ return errors.New("invalid register extension")
+ }
+ a.Reg = arm64.REG_UXTX + (reg & 31) + int16(num<<5)
+ a.Offset = int64(((rm & 31) << 16) | (3 << 13) | (uint32(num) << 10))
+ case "SXTB":
+ if !isAmount {
+ return errors.New("invalid register extension")
+ }
+ a.Reg = arm64.REG_SXTB + (reg & 31) + int16(num<<5)
+ a.Offset = int64(((rm & 31) << 16) | (4 << 13) | (uint32(num) << 10))
+ case "SXTH":
+ if !isAmount {
+ return errors.New("invalid register extension")
+ }
+ a.Reg = arm64.REG_SXTH + (reg & 31) + int16(num<<5)
+ a.Offset = int64(((rm & 31) << 16) | (5 << 13) | (uint32(num) << 10))
+ case "SXTW":
+ if !isAmount {
+ return errors.New("invalid register extension")
+ }
+ a.Reg = arm64.REG_SXTW + (reg & 31) + int16(num<<5)
+ a.Offset = int64(((rm & 31) << 16) | (6 << 13) | (uint32(num) << 10))
+ case "SXTX":
+ if !isAmount {
+ return errors.New("invalid register extension")
+ }
+ a.Reg = arm64.REG_SXTX + (reg & 31) + int16(num<<5)
+ a.Offset = int64(((rm & 31) << 16) | (7 << 13) | (uint32(num) << 10))
+ case "B8":
+ a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8B & 15) << 5)
+ case "B16":
+ a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_16B & 15) << 5)
+ case "H4":
+ a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4H & 15) << 5)
+ case "H8":
+ a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8H & 15) << 5)
+ case "S2":
+ a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2S & 15) << 5)
+ case "S4":
+ a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4S & 15) << 5)
+ case "D2":
+ a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2D & 15) << 5)
+ case "B":
+ if !isIndex {
+ return nil
+ }
+ a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_B & 15) << 5)
+ a.Index = num
+ case "H":
+ if !isIndex {
+ return nil
+ }
+ a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_H & 15) << 5)
+ a.Index = num
+ case "S":
+ if !isIndex {
+ return nil
+ }
+ a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_S & 15) << 5)
+ a.Index = num
+ case "D":
+ if !isIndex {
+ return nil
+ }
+ a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_D & 15) << 5)
+ a.Index = num
+ default:
+ return errors.New("unsupported register extension type: " + ext)
+ }
+ a.Type = obj.TYPE_REG
+ return nil
+}
+
+// ARM64RegisterArrangement parses an ARM64 vector register arrangment.
+func ARM64RegisterArrangement(reg int16, name, arng string) (int64, error) {
+ var curQ, curSize uint16
+ if name[0] != 'V' {
+ return 0, errors.New("expect V0 through V31; found: " + name)
+ }
+ if reg < 0 {
+ return 0, errors.New("invalid register number: " + name)
+ }
+ switch arng {
+ case "B8":
+ curSize = 0
+ curQ = 0
+ case "B16":
+ curSize = 0
+ curQ = 1
+ case "H4":
+ curSize = 1
+ curQ = 0
+ case "H8":
+ curSize = 1
+ curQ = 1
+ case "S2":
+ curSize = 1
+ curQ = 0
+ case "S4":
+ curSize = 2
+ curQ = 1
+ case "D1":
+ curSize = 3
+ curQ = 0
+ case "D2":
+ curSize = 3
+ curQ = 1
+ default:
+ return 0, errors.New("invalid arrangement in ARM64 register list")
+ }
+ return (int64(curQ) & 1 << 30) | (int64(curSize&3) << 10), nil
+}
+
+// ARM64RegisterListOffset generates offset encoding according to AArch64 specification.
+func ARM64RegisterListOffset(firstReg, regCnt int, arrangement int64) (int64, error) {
+ offset := int64(firstReg)
+ switch regCnt {
+ case 1:
+ offset |= 0x7 << 12
+ case 2:
+ offset |= 0xa << 12
+ case 3:
+ offset |= 0x6 << 12
+ case 4:
+ offset |= 0x2 << 12
+ default:
+ return 0, errors.New("invalid register numbers in ARM64 register list")
+ }
+ offset |= arrangement
+ // arm64 uses the 60th bit to differentiate from other archs
+ // For more details, refer to: obj/arm64/list7.go
+ offset |= 1 << 60
+ return offset, nil
+}
diff --git a/src/cmd/asm/internal/asm/asm.go b/src/cmd/asm/internal/asm/asm.go
index df23856c474..bf3545b32fa 100644
--- a/src/cmd/asm/internal/asm/asm.go
+++ b/src/cmd/asm/internal/asm/asm.go
@@ -384,7 +384,7 @@ func (p *Parser) asmJump(op obj.As, cond string, a []obj.Addr) {
prog.Reg = p.getRegister(prog, op, &a[1])
} else {
// Compare register with immediate and jump.
- prog.From3 = newAddr(a[1])
+ prog.SetFrom3(a[1])
}
break
}
@@ -507,27 +507,6 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
break
}
// Strange special cases.
- if arch.IsARMSTREX(op) {
- /*
- STREX x, (y)
- from=(y) reg=x to=x
- STREX (x), y
- from=(x) reg=y to=y
- */
- if a[0].Type == obj.TYPE_REG && a[1].Type != obj.TYPE_REG {
- prog.From = a[1]
- prog.Reg = a[0].Reg
- prog.To = a[0]
- break
- } else if a[0].Type != obj.TYPE_REG && a[1].Type == obj.TYPE_REG {
- prog.From = a[0]
- prog.Reg = a[1].Reg
- prog.To = a[1]
- break
- }
- p.errorf("unrecognized addressing for %s", op)
- return
- }
if arch.IsARMFloatCmp(op) {
prog.From = a[0]
prog.Reg = p.getRegister(prog, op, &a[1])
@@ -564,18 +543,20 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
prog.To = a[2]
break
}
+ if arch.IsARMBFX(op) {
+ // a[0] and a[1] must be constants, a[2] must be a register
+ prog.From = a[0]
+ prog.SetFrom3(a[1])
+ prog.To = a[2]
+ break
+ }
// Otherwise the 2nd operand (a[1]) must be a register.
prog.From = a[0]
prog.Reg = p.getRegister(prog, op, &a[1])
prog.To = a[2]
case sys.AMD64:
- // Catch missing operand here, because we store immediate as part of From3, and can't distinguish
- // missing operand from legal value 0 in obj/x86/asm6.
- if arch.IsAMD4OP(op) {
- p.errorf("4 operands required, but only 3 are provided for %s instruction", op)
- }
prog.From = a[0]
- prog.From3 = newAddr(a[1])
+ prog.SetFrom3(a[1])
prog.To = a[2]
case sys.ARM64:
// ARM64 instructions with one input and two outputs.
@@ -594,7 +575,7 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
prog.To = a[2]
case sys.I386:
prog.From = a[0]
- prog.From3 = newAddr(a[1])
+ prog.SetFrom3(a[1])
prog.To = a[2]
case sys.PPC64:
if arch.IsPPC64CMP(op) {
@@ -616,7 +597,7 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
prog.To = a[2]
case obj.TYPE_CONST:
prog.From = a[0]
- prog.From3 = newAddr(a[1])
+ prog.SetFrom3(a[1])
prog.To = a[2]
default:
p.errorf("invalid addressing modes for %s instruction", op)
@@ -627,7 +608,7 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
if a[1].Type == obj.TYPE_REG {
prog.Reg = p.getRegister(prog, op, &a[1])
} else {
- prog.From3 = newAddr(a[1])
+ prog.SetFrom3(a[1])
}
prog.To = a[2]
default:
@@ -635,40 +616,39 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
return
}
case 4:
- if p.arch.Family == sys.ARM && arch.IsARMMULA(op) {
- // All must be registers.
- p.getRegister(prog, op, &a[0])
- r1 := p.getRegister(prog, op, &a[1])
- r2 := p.getRegister(prog, op, &a[2])
- p.getRegister(prog, op, &a[3])
- prog.From = a[0]
- prog.To = a[3]
- prog.To.Type = obj.TYPE_REGREG2
- prog.To.Offset = int64(r2)
- prog.Reg = r1
- break
+ if p.arch.Family == sys.ARM {
+ if arch.IsARMBFX(op) {
+ // a[0] and a[1] must be constants, a[2] and a[3] must be registers
+ prog.From = a[0]
+ prog.SetFrom3(a[1])
+ prog.Reg = p.getRegister(prog, op, &a[2])
+ prog.To = a[3]
+ break
+ }
+ if arch.IsARMMULA(op) {
+ // All must be registers.
+ p.getRegister(prog, op, &a[0])
+ r1 := p.getRegister(prog, op, &a[1])
+ r2 := p.getRegister(prog, op, &a[2])
+ p.getRegister(prog, op, &a[3])
+ prog.From = a[0]
+ prog.To = a[3]
+ prog.To.Type = obj.TYPE_REGREG2
+ prog.To.Offset = int64(r2)
+ prog.Reg = r1
+ break
+ }
}
if p.arch.Family == sys.AMD64 {
- // 4 operand instruction have form ymm1, ymm2, ymm3/m256, imm8
- // So From3 is always just a register, so we store imm8 in Offset field,
- // to avoid increasing size of Prog.
- prog.From = a[1]
- prog.From3 = newAddr(a[2])
- if a[0].Type != obj.TYPE_CONST {
- p.errorf("first operand must be an immediate in %s instruction", op)
- }
- if prog.From3.Type != obj.TYPE_REG {
- p.errorf("third operand must be a register in %s instruction", op)
- }
- prog.From3.Offset = int64(p.getImmediate(prog, op, &a[0]))
+ prog.From = a[0]
+ prog.RestArgs = []obj.Addr{a[1], a[2]}
prog.To = a[3]
- prog.RegTo2 = -1
break
}
if p.arch.Family == sys.ARM64 {
prog.From = a[0]
prog.Reg = p.getRegister(prog, op, &a[1])
- prog.From3 = newAddr(a[2])
+ prog.SetFrom3(a[2])
prog.To = a[3]
break
}
@@ -676,12 +656,12 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
if arch.IsPPC64RLD(op) {
prog.From = a[0]
prog.Reg = p.getRegister(prog, op, &a[1])
- prog.From3 = newAddr(a[2])
+ prog.SetFrom3(a[2])
prog.To = a[3]
break
} else if arch.IsPPC64ISEL(op) {
// ISEL BC,RB,RA,RT becomes isel rt,ra,rb,bc
- prog.From3 = newAddr(a[2]) // ra
+ prog.SetFrom3(a[2]) // ra
prog.From = a[0] // bc
prog.Reg = p.getRegister(prog, op, &a[1]) // rb
prog.To = a[3] // rt
@@ -695,13 +675,13 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
if a[1].Type == obj.TYPE_REG {
prog.From = a[0]
prog.Reg = p.getRegister(prog, op, &a[1])
- prog.From3 = newAddr(a[2])
+ prog.SetFrom3(a[2])
prog.To = a[3]
break
} else if a[1].Type == obj.TYPE_CONST {
prog.From = a[0]
prog.Reg = p.getRegister(prog, op, &a[2])
- prog.From3 = newAddr(a[1])
+ prog.SetFrom3(a[1])
prog.To = a[3]
break
} else {
@@ -716,7 +696,7 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
}
prog.From = a[0]
prog.Reg = p.getRegister(prog, op, &a[1])
- prog.From3 = newAddr(a[2])
+ prog.SetFrom3(a[2])
prog.To = a[3]
break
}
@@ -735,10 +715,10 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
} else {
mask = (^uint32(0) >> uint(mask2+1)) & (^uint32(0) << uint(31-(mask1-1)))
}
- prog.From3 = &obj.Addr{
+ prog.SetFrom3(obj.Addr{
Type: obj.TYPE_CONST,
Offset: int64(mask),
- }
+ })
prog.To = a[4]
break
}
diff --git a/src/cmd/asm/internal/asm/endtoend_test.go b/src/cmd/asm/internal/asm/endtoend_test.go
index e5bc34edec3..e877a531786 100644
--- a/src/cmd/asm/internal/asm/endtoend_test.go
+++ b/src/cmd/asm/internal/asm/endtoend_test.go
@@ -186,7 +186,7 @@ Diff:
t.Errorf(format, args...)
ok = false
}
- obj.Flushplist(ctxt, pList, nil)
+ obj.Flushplist(ctxt, pList, nil, "")
for p := top; p != nil; p = p.Link {
if p.As == obj.ATEXT {
@@ -290,7 +290,7 @@ func testErrors(t *testing.T, goarch, file string) {
errBuf.WriteString(s)
}
pList.Firstpc, ok = parser.Parse()
- obj.Flushplist(ctxt, pList, nil)
+ obj.Flushplist(ctxt, pList, nil, "")
if ok && !failed {
t.Errorf("asm: %s had no errors", goarch)
}
@@ -391,6 +391,7 @@ func TestAMD64EndToEnd(t *testing.T) {
func TestAMD64Encoder(t *testing.T) {
testEndToEnd(t, "amd64", "amd64enc")
+ testEndToEnd(t, "amd64", "amd64enc_extra")
}
func TestAMD64Errors(t *testing.T) {
diff --git a/src/cmd/asm/internal/asm/parse.go b/src/cmd/asm/internal/asm/parse.go
index a6e13db7496..1d5d07344dd 100644
--- a/src/cmd/asm/internal/asm/parse.go
+++ b/src/cmd/asm/internal/asm/parse.go
@@ -258,11 +258,11 @@ func (p *Parser) parseScale(s string) int8 {
}
// operand parses a general operand and stores the result in *a.
-func (p *Parser) operand(a *obj.Addr) bool {
+func (p *Parser) operand(a *obj.Addr) {
//fmt.Printf("Operand: %v\n", p.input)
if len(p.input) == 0 {
p.errorf("empty operand: cannot happen")
- return false
+ return
}
// General address (with a few exceptions) looks like
// $sym±offset(SB)(reg)(index*scale)
@@ -290,7 +290,7 @@ func (p *Parser) operand(a *obj.Addr) bool {
p.symbolReference(a, name, prefix)
// fmt.Printf("SYM %s\n", obj.Dconv(&emptyProg, 0, a))
if p.peek() == scanner.EOF {
- return true
+ return
}
}
@@ -301,7 +301,7 @@ func (p *Parser) operand(a *obj.Addr) bool {
}
p.registerList(a)
p.expectOperandEnd()
- return true
+ return
}
// Register: R1
@@ -321,6 +321,10 @@ func (p *Parser) operand(a *obj.Addr) bool {
a.Reg, _ = p.registerReference(name)
p.get(')')
}
+ } else if p.atRegisterExtension() {
+ p.registerExtension(a, tok.String(), prefix)
+ p.expectOperandEnd()
+ return
} else if r1, r2, scale, ok := p.register(tok.String(), prefix); ok {
if scale != 0 {
p.errorf("expected simple register reference")
@@ -335,7 +339,7 @@ func (p *Parser) operand(a *obj.Addr) bool {
}
// fmt.Printf("REG %s\n", obj.Dconv(&emptyProg, 0, a))
p.expectOperandEnd()
- return true
+ return
}
// Constant.
@@ -348,7 +352,7 @@ func (p *Parser) operand(a *obj.Addr) bool {
tok := p.next()
if tok.ScanToken == scanner.EOF {
p.errorf("missing right parenthesis")
- return false
+ return
}
rname := tok.String()
p.back()
@@ -367,12 +371,12 @@ func (p *Parser) operand(a *obj.Addr) bool {
a.Val = p.floatExpr()
// fmt.Printf("FCONST %s\n", obj.Dconv(&emptyProg, 0, a))
p.expectOperandEnd()
- return true
+ return
}
if p.have(scanner.String) {
if prefix != '$' {
p.errorf("string constant must be an immediate")
- return false
+ return
}
str, err := strconv.Unquote(p.get(scanner.String).String())
if err != nil {
@@ -382,7 +386,7 @@ func (p *Parser) operand(a *obj.Addr) bool {
a.Val = str
// fmt.Printf("SCONST %s\n", obj.Dconv(&emptyProg, 0, a))
p.expectOperandEnd()
- return true
+ return
}
a.Offset = int64(p.expr())
if p.peek() != '(' {
@@ -396,7 +400,7 @@ func (p *Parser) operand(a *obj.Addr) bool {
}
// fmt.Printf("CONST %d %s\n", a.Offset, obj.Dconv(&emptyProg, 0, a))
p.expectOperandEnd()
- return true
+ return
}
// fmt.Printf("offset %d \n", a.Offset)
}
@@ -406,7 +410,7 @@ func (p *Parser) operand(a *obj.Addr) bool {
// fmt.Printf("DONE %s\n", p.arch.Dconv(&emptyProg, 0, a))
p.expectOperandEnd()
- return true
+ return
}
// atStartOfRegister reports whether the parser is at the start of a register definition.
@@ -439,6 +443,20 @@ func (p *Parser) atRegisterShift() bool {
return p.at('(', scanner.Int, ')') && lex.IsRegisterShift(p.input[p.inputPos+3].ScanToken)
}
+// atRegisterExtension reports whether we are at the start of an ARM64 extended register.
+// We have consumed the register or R prefix.
+func (p *Parser) atRegisterExtension() bool {
+ // ARM64 only.
+ if p.arch.Family != sys.ARM64 {
+ return false
+ }
+ // R1.xxx
+ if p.peek() == '.' {
+ return true
+ }
+ return false
+}
+
// registerReference parses a register given either the name, R10, or a parenthesized form, SPR(10).
func (p *Parser) registerReference(name string) (int16, bool) {
r, present := p.arch.Register[name]
@@ -573,6 +591,59 @@ func (p *Parser) registerShift(name string, prefix rune) int64 {
}
}
+// registerExtension parses a register with extension or arrangment.
+// There is known to be a register (current token) and an extension operator (peeked token).
+func (p *Parser) registerExtension(a *obj.Addr, name string, prefix rune) {
+ if prefix != 0 {
+ p.errorf("prefix %c not allowed for shifted register: $%s", prefix, name)
+ }
+
+ reg, ok := p.registerReference(name)
+ if !ok {
+ p.errorf("unexpected %s in register extension", name)
+ return
+ }
+
+ p.get('.')
+ tok := p.next()
+ ext := tok.String()
+ isIndex := false
+ num := int16(0)
+ isAmount := true // Amount is zero by default
+ if p.peek() == lex.LSH {
+ // parses left shift amount applied after extension: <(SB)(AX), AX // ERROR "invalid instruction"
MOVL (AX)(SP*1), AX // ERROR "invalid instruction"
+ EXTRACTPS $4, X2, (BX) // ERROR "invalid instruction"
+ EXTRACTPS $-1, X2, (BX) // ERROR "invalid instruction"
+ // VSIB addressing does not permit non-vector (X/Y)
+ // scaled index register.
+ VPGATHERDQ X12,(R13)(AX*2), X11 // ERROR "invalid instruction"
+ VPGATHERDQ X2, 664(BX*1), X1 // ERROR "invalid instruction"
+ VPGATHERDQ Y2, (BP)(AX*2), Y1 // ERROR "invalid instruction"
+ VPGATHERDQ Y5, 664(DX*8), Y6 // ERROR "invalid instruction"
+ VPGATHERDQ Y5, (DX), Y0 // ERROR "invalid instruction"
+ // VM/X rejects Y index register.
+ VPGATHERDQ Y5, 664(Y14*8), Y6 // ERROR "invalid instruction"
+ VPGATHERQQ X2, (BP)(Y7*2), X1 // ERROR "invalid instruction"
+ // VM/Y rejects X index register.
+ VPGATHERQQ Y2, (BP)(X7*2), Y1 // ERROR "invalid instruction"
+ VPGATHERDD Y5, -8(X14*8), Y6 // ERROR "invalid instruction"
+ // No VSIB for legacy instructions.
+ MOVL (AX)(X0*1), AX // ERROR "invalid instruction"
+ MOVL (AX)(Y0*1), AX // ERROR "invalid instruction"
+ // AVX2GATHER mask/index/dest #UD cases.
+ VPGATHERQQ Y2, (BP)(X2*2), Y2 // ERROR "mask, index, and destination registers should be distinct"
+ VPGATHERQQ Y2, (BP)(X2*2), Y7 // ERROR "mask, index, and destination registers should be distinct"
+ VPGATHERQQ Y2, (BP)(X7*2), Y2 // ERROR "mask, index, and destination registers should be distinct"
+ VPGATHERQQ Y7, (BP)(X2*2), Y2 // ERROR "mask, index, and destination registers should be distinct"
+ VPGATHERDQ X2, 664(X2*8), X2 // ERROR "mask, index, and destination registers should be distinct"
+ VPGATHERDQ X2, 664(X2*8), X7 // ERROR "mask, index, and destination registers should be distinct"
+ VPGATHERDQ X2, 664(X7*8), X2 // ERROR "mask, index, and destination registers should be distinct"
+ VPGATHERDQ X7, 664(X2*8), X2 // ERROR "mask, index, and destination registers should be distinct"
RET
diff --git a/src/cmd/asm/internal/asm/testdata/arm.s b/src/cmd/asm/internal/asm/testdata/arm.s
index 8f743e7bfa6..bc6cf07e83a 100644
--- a/src/cmd/asm/internal/asm/testdata/arm.s
+++ b/src/cmd/asm/internal/asm/testdata/arm.s
@@ -57,7 +57,7 @@ TEXT foo(SB), DUPOK|NOSPLIT, $0
// outcode($1, $2, &$3, 0, &$5);
// }
MOVW.S R1, R2
- MOVW.S $1, R2
+ MOVW $1, R2
MOVW.S R1<0, R8 // 7280a8e6
+ XTAB R2@>8, R8 // 7284a8e6
+ XTAB R2@>16, R8 // 7288a8e6
+ XTAB R2@>24, R8 // 728ca8e6
+ XTAH R3@>0, R9 // 7390b9e6
+ XTAH R3@>8, R9 // 7394b9e6
+ XTAH R3@>16, R9 // 7398b9e6
+ XTAH R3@>24, R9 // 739cb9e6
+ XTABU R4@>0, R7 // 7470e7e6
+ XTABU R4@>8, R7 // 7474e7e6
+ XTABU R4@>16, R7 // 7478e7e6
+ XTABU R4@>24, R7 // 747ce7e6
+ XTAHU R5@>0, R1 // 7510f1e6
+ XTAHU R5@>8, R1 // 7514f1e6
+ XTAHU R5@>16, R1 // 7518f1e6
+ XTAHU R5@>24, R1 // 751cf1e6
+ XTAB R2@>0, R4, R8 // 7280a4e6
+ XTAB R2@>8, R4, R8 // 7284a4e6
+ XTAB R2@>16, R4, R8 // 7288a4e6
+ XTAB R2@>24, R4, R8 // 728ca4e6
+ XTAH R3@>0, R4, R9 // 7390b4e6
+ XTAH R3@>8, R4, R9 // 7394b4e6
+ XTAH R3@>16, R4, R9 // 7398b4e6
+ XTAH R3@>24, R4, R9 // 739cb4e6
+ XTABU R4@>0, R0, R7 // 7470e0e6
+ XTABU R4@>8, R0, R7 // 7474e0e6
+ XTABU R4@>16, R0, R7 // 7478e0e6
+ XTABU R4@>24, R0, R7 // 747ce0e6
+ XTAHU R5@>0, R9, R1 // 7510f9e6
+ XTAHU R5@>8, R9, R1 // 7514f9e6
+ XTAHU R5@>16, R9, R1 // 7518f9e6
+ XTAHU R5@>24, R9, R1 // 751cf9e6
+
// DIVHW R0, R1, R2: R1 / R0 -> R2
DIVHW R0, R1, R2 // 11f012e7
DIVUHW R0, R1, R2 // 11f032e7
@@ -1007,6 +1029,15 @@ jmp_label_3:
SWI $65535 // ffff00ef
SWI // 000000ef
+// BFX/BFXU/BFC/BFI
+ BFX $16, $8, R1, R2 // BFX $16, R1, $8, R2 // 5124afe7
+ BFX $29, $2, R8 // 5881bce7
+ BFXU $16, $8, R1, R2 // BFXU $16, R1, $8, R2 // 5124efe7
+ BFXU $29, $2, R8 // 5881fce7
+ BFC $29, $2, R8 // 1f81dee7
+ BFI $29, $2, R8 // 1881dee7
+ BFI $16, $8, R1, R2 // BFI $16, R1, $8, R2 // 1124d7e7
+
// synthetic arithmatic
ADD $0xffffffaa, R2, R3 // ADD $4294967210, R2, R3 // 55b0e0e30b3082e0
ADD $0xffffff55, R5 // ADD $4294967125, R5 // aab0e0e30b5085e0
@@ -1088,8 +1119,6 @@ jmp_label_3:
// MVN
MVN $0xff, R1 // MVN $255, R1 // ff10e0e3
MVN $0xff000000, R1 // MVN $4278190080, R1 // ff14e0e3
- MVN.S $0xff, R1 // MVN.S $255, R1 // ff10f0e3
- MVN.S $0xff000000, R1 // MVN.S $4278190080, R1 // ff14f0e3
MVN R9<<30, R7 // 097fe0e1
MVN R9>>30, R7 // 297fe0e1
MVN R9->30, R7 // 497fe0e1
@@ -1106,8 +1135,7 @@ jmp_label_3:
MVN.S R9>>R8, R7 // 3978f0e1
MVN.S R9->R8, R7 // 5978f0e1
MVN.S R9@>R8, R7 // 7978f0e1
- MVN $0xffffffae, R5 // MVN $4294967214, R5 // 51b0e0e30b50e0e1
- MVN.S $0xffffffae, R5 // MVN.S $4294967214, R5 // 51b0e0e30b50f0e1
+ MVN $0xffffffbe, R5 // MVN $4294967230, R5 // 4150a0e3
// MOVM
MOVM.IA [R0,R2,R4,R6], (R1) // MOVM.U [R0,R2,R4,R6], (R1) // 550081e8
@@ -1145,11 +1173,23 @@ jmp_label_3:
// MOVW
MOVW R3, R4 // 0340a0e1
+ MOVW.S R3, R4 // 0340b0e1
MOVW R9, R2 // 0920a0e1
+ MOVW.S R9, R2 // 0920b0e1
+ MOVW R5>>1, R2 // a520a0e1
+ MOVW.S R5>>1, R2 // a520b0e1
+ MOVW R5<<1, R2 // 8520a0e1
+ MOVW.S R5<<1, R2 // 8520b0e1
+ MOVW R5->1, R2 // c520a0e1
+ MOVW.S R5->1, R2 // c520b0e1
+ MOVW R5@>1, R2 // e520a0e1
+ MOVW.S R5@>1, R2 // e520b0e1
MOVW $0xff, R9 // MOVW $255, R9 // ff90a0e3
MOVW $0xff000000, R9 // MOVW $4278190080, R9 // ff94a0e3
MOVW $0xff(R0), R1 // MOVW $255(R0), R1 // ff1080e2
+ MOVW.S $0xff(R0), R1 // MOVW.S $255(R0), R1 // ff1090e2
MOVW $-0xff(R0), R1 // MOVW $-255(R0), R1 // ff1040e2
+ MOVW.S $-0xff(R0), R1 // MOVW.S $-255(R0), R1 // ff1050e2
MOVW $0xffffffae, R1 // MOVW $4294967214, R1 // 5110e0e3
MOVW $0xaaaaaaaa, R1 // MOVW $2863311530, R1
MOVW R1, (R2) // 001082e5
@@ -1388,6 +1428,18 @@ jmp_label_3:
MOVB.U R0<<0(R1), R2 // d02011e1
MOVB.W R0<<0(R1), R2 // d020b1e1
MOVB.P R0<<0(R1), R2 // d02091e0
+ MOVBS R2@>0, R8 // 7280afe6
+ MOVBS R2@>8, R8 // 7284afe6
+ MOVBS R2@>16, R8 // 7288afe6
+ MOVBS R2@>24, R8 // 728cafe6
+ MOVB R2@>0, R8 // 7280afe6
+ MOVB R2@>8, R8 // 7284afe6
+ MOVB R2@>16, R8 // 7288afe6
+ MOVB R2@>24, R8 // 728cafe6
+ MOVBU R4@>0, R7 // 7470efe6
+ MOVBU R4@>8, R7 // 7474efe6
+ MOVBU R4@>16, R7 // 7478efe6
+ MOVBU R4@>24, R7 // 747cefe6
// MOVH
MOVH R3, R4 // 0340a0e1
@@ -1490,6 +1542,42 @@ jmp_label_3:
MOVHS math·Exp(SB), R0 // MOVHS math.Exp(SB), R0
MOVHU R0, math·Exp(SB) // MOVHU R0, math.Exp(SB)
MOVHU math·Exp(SB), R0 // MOVHU math.Exp(SB), R0
+ MOVHS R0<<0(R1), R2 // f02091e1
+ MOVHS.U R0<<0(R1), R2 // f02011e1
+ MOVHS.W R0<<0(R1), R2 // f020b1e1
+ MOVHS.P R0<<0(R1), R2 // f02091e0
+ MOVH R0<<0(R1), R2 // f02091e1
+ MOVH.U R0<<0(R1), R2 // f02011e1
+ MOVH.W R0<<0(R1), R2 // f020b1e1
+ MOVH.P R0<<0(R1), R2 // f02091e0
+ MOVHU R0<<0(R1), R2 // b02091e1
+ MOVHU.U R0<<0(R1), R2 // b02011e1
+ MOVHU.W R0<<0(R1), R2 // b020b1e1
+ MOVHU.P R0<<0(R1), R2 // b02091e0
+ MOVHS R2, R5<<0(R1) // b52081e1
+ MOVHS.U R2, R5<<0(R1) // b52001e1
+ MOVHS.W R2, R5<<0(R1) // b520a1e1
+ MOVHS.P R2, R5<<0(R1) // b52081e0
+ MOVH R2, R5<<0(R1) // b52081e1
+ MOVH.U R2, R5<<0(R1) // b52001e1
+ MOVH.W R2, R5<<0(R1) // b520a1e1
+ MOVH.P R2, R5<<0(R1) // b52081e0
+ MOVHU R2, R5<<0(R1) // b52081e1
+ MOVHU.U R2, R5<<0(R1) // b52001e1
+ MOVHU.W R2, R5<<0(R1) // b520a1e1
+ MOVHU.P R2, R5<<0(R1) // b52081e0
+ MOVHS R3@>0, R9 // 7390bfe6
+ MOVHS R3@>8, R9 // 7394bfe6
+ MOVHS R3@>16, R9 // 7398bfe6
+ MOVHS R3@>24, R9 // 739cbfe6
+ MOVH R3@>0, R9 // 7390bfe6
+ MOVH R3@>8, R9 // 7394bfe6
+ MOVH R3@>16, R9 // 7398bfe6
+ MOVH R3@>24, R9 // 739cbfe6
+ MOVHU R5@>0, R1 // 7510ffe6
+ MOVHU R5@>8, R1 // 7514ffe6
+ MOVHU R5@>16, R1 // 7518ffe6
+ MOVHU R5@>24, R1 // 751cffe6
//
// END
diff --git a/src/cmd/asm/internal/asm/testdata/arm64.s b/src/cmd/asm/internal/asm/testdata/arm64.s
index 1b6dc188c4c..ab6ad5bcb79 100644
--- a/src/cmd/asm/internal/asm/testdata/arm64.s
+++ b/src/cmd/asm/internal/asm/testdata/arm64.s
@@ -29,6 +29,33 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
ADD R1<<22, R2, R3
ADD R1->33, R2, R3
AND R1@>33, R2, R3
+ ADD R1.UXTB, R2, R3 // 4360218b
+ ADD R1.UXTB<<4, R2, R3 // 4370218b
+ VADDP V1.B16, V2.B16, V3.B16 // 43bc214e
+ VADDP V1.S4, V2.S4, V3.S4 // 43bca14e
+ VADDP V1.D2, V2.D2, V3.D2 // 43bce14e
+ VAND V21.B8, V12.B8, V3.B8 // 831d350e
+ VCMEQ V1.H4, V2.H4, V3.H4 // 438c612e
+ VORR V5.B16, V4.B16, V3.B16 // 831ca54e
+ VADD V16.S4, V5.S4, V9.S4 // a984b04e
+ VEOR V0.B16, V1.B16, V0.B16 // 201c206e
+ SHA256H V9.S4, V3, V2 // 6240095e
+ SHA256H2 V9.S4, V4, V3 // 8350095e
+ SHA256SU0 V8.S4, V7.S4 // 0729285e
+ SHA256SU1 V6.S4, V5.S4, V7.S4 // a760065e
+ SHA1SU0 V11.S4, V8.S4, V6.S4 // 06310b5e
+ SHA1SU1 V5.S4, V1.S4 // a118285e
+ SHA1C V1.S4, V2, V3 // 4300015e
+ SHA1H V5, V4 // a408285e
+ SHA1M V8.S4, V7, V6 // e620085e
+ SHA1P V11.S4, V10, V9 // 49110b5e
+ VADDV V0.S4, V0 // 00b8b14e
+ VMOVI $82, V0.B16 // 40e6024f
+ VUADDLV V6.B16, V6 // c638306e
+ VADD V1, V2, V3 // 4384e15e
+ VADD V1, V3, V3 // 6384e15e
+ VSUB V12, V30, V30 // de87ec7e
+ VSUB V12, V20, V30 // 9e86ec7e
// LTYPE1 imsr ',' spreg ','
// {
@@ -84,6 +111,18 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
MOVD $1, ZR
MOVD $1, R1
MOVD ZR, (R1)
+ VLD1 (R8), [V1.B16, V2.B16] // 01a1404c
+ VLD1.P (R3), [V31.H8, V0.H8] // 7fa4df4c
+ VLD1.P (R8)(R20), [V21.B16, V22.B16] // VLD1.P (R8)(R20*1), [V21.B16,V22.B16] // 15a1d44c
+ VLD1.P 64(R1), [V5.B16, V6.B16, V7.B16, V8.B16] // 2520df4c
+ VST1.P [V4.S4, V5.S4], 32(R1) // 24a89f4c
+ VST1 [V0.S4, V1.S4], (R0) // 00a8004c
+ VMOVS V20, (R0) // 140000bd
+ VMOVS.P V20, 4(R0) // 144400bc
+ VMOVS.W V20, 4(R0) // 144c00bc
+ VMOVS (R0), V20 // 140040bd
+ VMOVS.P 8(R0), V20 // 148440bc
+ VMOVS.W 8(R0), V20 // 148c40bc
// small offset fits into instructions
MOVB 1(R1), R2 // 22048039
@@ -147,7 +186,16 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
// outcode($1, &$2, NREG, &$4);
// }
MOVK $1, R1
-
+ VMOV V8.S[1], R1 // 013d0c0e
+ VMOV V0.D[0], R11 // 0b3c084e
+ VMOV V0.D[1], R11 // 0b3c184e
+ VMOV R20, V1.S[0] // 811e044e
+ VMOV R1, V9.H4 // 290c020e
+ VMOV R22, V11.D2 // cb0e084e
+ VMOV V2.B16, V4.B16 // 441ca24e
+ VMOV V20.S[0], V20 // 9406045e
+ VREV32 V5.B16, V5.B16 // a508206e
+ VDUP V19.S[0], V17.S4 // 7106044e
//
// B/BL
//
@@ -193,6 +241,7 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
CMP R1->11, R2
CMP R1>>22, R2
CMP R1<<33, R2
+ CMP R22.SXTX, RSP // ffe336eb
//
// CBZ
//
@@ -338,6 +387,20 @@ again:
JMP foo(SB)
CALL foo(SB)
+// LDP/STP
+ LDP (R0), (R1, R2)
+ LDP 8(R0), (R1, R2)
+ LDP.W 8(R0), (R1, R2)
+ LDP.P 8(R0), (R1, R2)
+ LDP x(SB), (R1, R2)
+ LDP x+8(SB), (R1, R2)
+ STP (R3, R4), (R5)
+ STP (R3, R4), 8(R5)
+ STP.W (R3, R4), 8(R5)
+ STP.P (R3, R4), 8(R5)
+ STP (R3, R4), x(SB)
+ STP (R3, R4), x+8(SB)
+
// END
//
// LTYPEE comma
diff --git a/src/cmd/asm/internal/asm/testdata/arm64enc.s b/src/cmd/asm/internal/asm/testdata/arm64enc.s
index ec89474990b..b02e0b32ec0 100644
--- a/src/cmd/asm/internal/asm/testdata/arm64enc.s
+++ b/src/cmd/asm/internal/asm/testdata/arm64enc.s
@@ -57,7 +57,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8
CALL -1(PC) // ffffff97
CALL (R15) // e0013fd6
JMP (R29) // a0031fd6
- // BRK $35943 // e08c31d4
+ BRK $35943 // e08c31d4
CBNZW R2, -1(PC) // e2ffff35
CBNZ R7, -1(PC) // e7ffffb5
CBZW R15, -1(PC) // efffff34
@@ -81,8 +81,8 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8
CINVW EQ, R2, R17 // 5110825a
CINV VS, R12, R7 // 87718cda
CINV VS, R30, R30 // de739eda
- // CLREX $4 // 5f3403d5
- // CLREX $0 // 5f3003d5
+ CLREX $4 // 5f3403d5
+ CLREX $0 // 5f3003d5
CLSW R15, R6 // e615c05a
CLS R15, ZR // ff15c0da
CLZW R1, R14 // 2e10c05a
@@ -128,9 +128,9 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8
CSNEGW HS, R16, R29, R10 // 0a269d5a
CSNEG NE, R21, R18, R11 // ab1692da
//TODO DC
- // DCPS1 $11378 // 418ea5d4
- // DCPS2 $10699 // 6239a5d4
- // DCPS3 $24415 // e3ebabd4
+ DCPS1 $11378 // 418ea5d4
+ DCPS2 $10699 // 6239a5d4
+ DCPS3 $24415 // e3ebabd4
DMB $1 // bf3103d5
DMB $0 // bf3003d5
DRPS // e003bfd6
@@ -145,8 +145,8 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8
EXTR $35, R22, R12, R8 // 888dd693
SEVL // bf2003d5
HINT $6 // df2003d5
- // HLT $65509 // a0fc5fd4
- // HVC $61428 // 82fe1dd4
+ HLT $65509 // a0fc5fd4
+ HVC $61428 // 82fe1dd4
ISB $1 // df3103d5
ISB $15 // df3f03d5
LDARW (R12), R29 // 9dfddf88
@@ -242,15 +242,15 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8
ORRW $16252928, ZR, R21 // f5130d32
MOVD $-4260607558625, R11 // eb6b16b2
MOVD R30, R7 // e7031eaa
- // MOVKW $(3905<<0), R21 // MOVKW $3905, R21 // 35e88172
- // MOVKW $(3905<<16), R21 // MOVKW $255918080, R21 // 35e8a172
- // MOVK $(3905<<32), R21 // MOVK $16771847290880, R21 // 35e8c1f2
+ MOVKW $(3905<<0), R21 // MOVKW $3905, R21 // 35e88172
+ MOVKW $(3905<<16), R21 // MOVKW $255918080, R21 // 35e8a172
+ MOVK $(3905<<32), R21 // MOVK $16771847290880, R21 // 35e8c1f2
MOVD $0, R5 // 050080d2
- // MRS $4567, R16 // f03a32d5
- // MRS $32345, R6 // 26cb3fd5
- // MSR R25, $3452 // 99af11d5
- // MSR R25, $16896 // 194018d5
- // MSR $6, DAIFClr // ff4603d5
+ MSR $1, SPSel // bf4100d5
+ MSR $9, DAIFSet // df4903d5
+ MSR $6, DAIFClr // ff4603d5
+ MRS ELR_EL1, R8 // 284038d5
+ MSR R16, ELR_EL1 // 304018d5
MSUBW R1, R1, R12, R5 // 8585011b
MSUB R19, R16, R26, R2 // 42c3139b
MULW R26, R5, R22 // b67c1a1b
@@ -304,7 +304,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8
SMNEGL R26, R3, R15 // 6ffc3a9b
SMULH R17, R21, R21 // b57e519b
SMULL R0, R5, R0 // a07c209b
- // SMC $37977 // 238b12d4
+ SMC $37977 // 238b12d4
STLRW R16, (R22) // d0fe9f88
STLR R3, (R24) // 03ff9fc8
//TODO STLRB R11, (R22) // cbfe9f08
@@ -345,10 +345,10 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8
//TODO STTRH 9(R10), R18 // 52990078
//TODO STXP (R20), R18, R5, ZR // 854a3f88
//TODO STXP (R22), R9, R17, R0 // d12620c8
- // STXRW R2, (R19), R18 // 627e1288
- // STXR R15, (R21), R13 // af7e0dc8
- // STXRB R7, (R9), R24 // 277d1808
- // STXRH R12, (R3), R8 // 6c7c0848
+ STXRW R2, (R19), R18 // 627e1288
+ STXR R15, (R21), R13 // af7e0dc8
+ STXRB R7, (R9), R24 // 277d1808
+ STXRH R12, (R3), R8 // 6c7c0848
//TODO SUBW R20.UXTW<<7, R23, R18 // f25e344b
//TODO SUB R5.SXTW<<2, R1, R26 // 3ac825cb
SUB $(1923<<12), R4, R27 // SUB $7876608, R4, R27 // 9b0c5ed1
@@ -398,16 +398,16 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8
FCCMPD HI, F11, F15, $15 // ef856b1e
FCCMPES HS, F28, F13, $13 // bd253c1e
FCCMPED LT, F20, F4, $9 // 99b4741e
- // FCMPS F3, F17 // 2022231e
- // FCMPS $(0.0), F8 // 0821201e
- // FCMPD F11, F27 // 60236b1e
- // FCMPD $(0.0), F25 // 2823601e
- // FCMPES F16, F30 // d023301e
- // FCMPES $(0.0), F29 // b823201e
- // FCMPED F13, F10 // 50216d1e
- // FCMPED $(0.0), F25 // 3823601e
- // FCSELS EQ, F26, F27, F25 // 590f3b1e
- // FCSELD PL, F8, F22, F7 // 075d761e
+ FCMPS F3, F17 // 2022231e
+ FCMPS $(0.0), F8 // 0821201e
+ FCMPD F11, F27 // 60236b1e
+ FCMPD $(0.0), F25 // 2823601e
+ FCMPES F16, F30 // d023301e
+ FCMPES $(0.0), F29 // b823201e
+ FCMPED F13, F10 // 50216d1e
+ FCMPED $(0.0), F25 // 3823601e
+ FCSELS EQ, F26, F27, F25 // 590f3b1e
+ FCSELD PL, F8, F22, F7 // 075d761e
//TODO FCVTASW F21, R15 // af02241e
//TODO FCVTAS F20, ZR // 9f02249e
//TODO FCVTASW F6, R11 // cb00641e
diff --git a/src/cmd/asm/internal/asm/testdata/armerror.s b/src/cmd/asm/internal/asm/testdata/armerror.s
index 2959a2f47bc..f2bed8d1c37 100644
--- a/src/cmd/asm/internal/asm/testdata/armerror.s
+++ b/src/cmd/asm/internal/asm/testdata/armerror.s
@@ -35,6 +35,22 @@ TEXT errors(SB),$0
BL 4(R4) // ERROR "non-zero offset"
ADDF F0, R1, F2 // ERROR "illegal combination"
SWI (R0) // ERROR "illegal combination"
+ MULAD F0, F1 // ERROR "illegal combination"
+ MULAF F0, F1 // ERROR "illegal combination"
+ MULSD F0, F1 // ERROR "illegal combination"
+ MULSF F0, F1 // ERROR "illegal combination"
+ NMULAD F0, F1 // ERROR "illegal combination"
+ NMULAF F0, F1 // ERROR "illegal combination"
+ NMULSD F0, F1 // ERROR "illegal combination"
+ NMULSF F0, F1 // ERROR "illegal combination"
+ FMULAD F0, F1 // ERROR "illegal combination"
+ FMULAF F0, F1 // ERROR "illegal combination"
+ FMULSD F0, F1 // ERROR "illegal combination"
+ FMULSF F0, F1 // ERROR "illegal combination"
+ FNMULAD F0, F1 // ERROR "illegal combination"
+ FNMULAF F0, F1 // ERROR "illegal combination"
+ FNMULSD F0, F1 // ERROR "illegal combination"
+ FNMULSF F0, F1 // ERROR "illegal combination"
NEGF F0, F1, F2 // ERROR "illegal combination"
NEGD F0, F1, F2 // ERROR "illegal combination"
ABSF F0, F1, F2 // ERROR "illegal combination"
@@ -63,6 +79,8 @@ TEXT errors(SB),$0
MOVW errors(SB), F0 // ERROR "illegal combination"
MOVW $20, errors(SB) // ERROR "illegal combination"
MOVW errors(SB), $20 // ERROR "illegal combination"
+ MOVW (R1), [R0-R4] // ERROR "illegal combination"
+ MOVW [R0-R4], (R1) // ERROR "illegal combination"
MOVB $245, R1 // ERROR "illegal combination"
MOVH $245, R1 // ERROR "illegal combination"
MOVB $0xff000000, R1 // ERROR "illegal combination"
@@ -85,10 +103,10 @@ TEXT errors(SB),$0
MOVH $0xffffff00, CPSR // ERROR "illegal combination"
MOVB $0xfffffff0, FPSR // ERROR "illegal combination"
MOVH $0xfffffff0, FPSR // ERROR "illegal combination"
- MOVB.IA 4(R1), [R0-R4] // ERROR "illegal combination"
- MOVB.DA 4(R1), [R0-R4] // ERROR "illegal combination"
- MOVH.IA 4(R1), [R0-R4] // ERROR "illegal combination"
- MOVH.DA 4(R1), [R0-R4] // ERROR "illegal combination"
+ MOVB (R1), [R0-R4] // ERROR "illegal combination"
+ MOVB [R0-R4], (R1) // ERROR "illegal combination"
+ MOVH (R1), [R0-R4] // ERROR "illegal combination"
+ MOVH [R0-R4], (R1) // ERROR "illegal combination"
MOVB $0xff(R0), R1 // ERROR "illegal combination"
MOVH $0xff(R0), R1 // ERROR "illegal combination"
MOVB $errors(SB), R2 // ERROR "illegal combination"
@@ -124,5 +142,123 @@ TEXT errors(SB),$0
MOVFW CPSR, R2 // ERROR "illegal combination"
MOVDW R1, CPSR // ERROR "illegal combination"
MOVFW R1, CPSR // ERROR "illegal combination"
+ BFX $12, $41, R2, R3 // ERROR "wrong width or LSB"
+ BFX $12, $-2, R2 // ERROR "wrong width or LSB"
+ BFXU $40, $4, R2, R3 // ERROR "wrong width or LSB"
+ BFXU $-40, $4, R2 // ERROR "wrong width or LSB"
+ BFX $-2, $4, R2, R3 // ERROR "wrong width or LSB"
+ BFXU $4, R2, R5, R2 // ERROR "missing or wrong LSB"
+ BFXU $4, R2, R5 // ERROR "missing or wrong LSB"
+ BFC $12, $8, R2, R3 // ERROR "illegal combination"
+ MOVB R0>>8, R2 // ERROR "illegal shift"
+ MOVH R0<<16, R2 // ERROR "illegal shift"
+ MOVBS R0->8, R2 // ERROR "illegal shift"
+ MOVHS R0<<24, R2 // ERROR "illegal shift"
+ MOVBU R0->24, R2 // ERROR "illegal shift"
+ MOVHU R0@>1, R2 // ERROR "illegal shift"
+ XTAB R0>>8, R2 // ERROR "illegal shift"
+ XTAH R0<<16, R2 // ERROR "illegal shift"
+ XTABU R0->24, R2 // ERROR "illegal shift"
+ XTAHU R0@>1, R2 // ERROR "illegal shift"
+ XTAB R0>>8, R5, R2 // ERROR "illegal shift"
+ XTAH R0<<16, R5, R2 // ERROR "illegal shift"
+ XTABU R0->24, R5, R2 // ERROR "illegal shift"
+ XTAHU R0@>1, R5, R2 // ERROR "illegal shift"
+ AND.W R0, R1 // ERROR "invalid .W suffix"
+ ORR.P R2, R3, R4 // ERROR "invalid .P suffix"
+ CMP.S R1, R2 // ERROR "invalid .S suffix"
+ BIC.P $124, R1, R2 // ERROR "invalid .P suffix"
+ MOVW.S $124, R1 // ERROR "invalid .S suffix"
+ MVN.S $123, g // ERROR "invalid .S suffix"
+ RSB.U $0, R9 // ERROR "invalid .U suffix"
+ CMP.S $29, g // ERROR "invalid .S suffix"
+ ADD.W R1<R2, R1 // ERROR "invalid .S suffix"
+ SLL.P R1, R2, R3 // ERROR "invalid .P suffix"
+ SRA.U R2, R8 // ERROR "invalid .U suffix"
+ SWI.S // ERROR "invalid .S suffix"
+ SWI.P $0 // ERROR "invalid .P suffix"
+ MOVW.S $0xaaaaaaaa, R7 // ERROR "invalid .S suffix"
+ MOVW.P $0xffffff44, R1 // ERROR "invalid .P suffix"
+ MOVW.S $0xffffff77, R1 // ERROR "invalid .S suffix"
+ MVN.S $0xffffffaa, R8 // ERROR "invalid .S suffix"
+ MVN.S $0xaaaaaaaa, R8 // ERROR "invalid .S suffix"
+ ADD.U $0xaaaaaaaa, R4 // ERROR "invalid .U suffix"
+ ORR.P $0x555555, R7, R3 // ERROR "invalid .P suffix"
+ TST.S $0xabcd1234, R2 // ERROR "invalid .S suffix"
+ MOVB.S R1, R2 // ERROR "invalid .S suffix"
+ MOVBU.P R1, R2 // ERROR "invalid .P suffix"
+ MOVBS.U R1, R2 // ERROR "invalid .U suffix"
+ MOVH.S R1, R2 // ERROR "invalid .S suffix"
+ MOVHU.P R1, R2 // ERROR "invalid .P suffix"
+ MOVHS.U R1, R2 // ERROR "invalid .U suffix"
+ MUL.P R0, R1, R2 // ERROR "invalid .P suffix"
+ MULU.W R1, R2 // ERROR "invalid .W suffix"
+ DIVHW.S R0, R1, R2 // ERROR "invalid .S suffix"
+ DIVHW.W R1, R2 // ERROR "invalid .W suffix"
+ MULL.W R2, R0, (R5, R8) // ERROR "invalid .W suffix"
+ MULLU.U R2, R0, (R5, R8) // ERROR "invalid .U suffix"
+ BFX.S $2, $4, R3 // ERROR "invalid .S suffix"
+ BFXU.W $2, $4, R3, R0 // ERROR "invalid .W suffix"
+ MOVB.S R1, 4(R2) // ERROR "invalid .S suffix"
+ MOVHU.S R1, 4(R2) // ERROR "invalid .S suffix"
+ MOVW.S R1, 4(R2) // ERROR "invalid .S suffix"
+ MOVBU.S 4(R2), R3 // ERROR "invalid .S suffix"
+ MOVH.S 4(R2), R3 // ERROR "invalid .S suffix"
+ MOVW.S 4(R2), R3 // ERROR "invalid .S suffix"
+ XTAB.S R0@>0, R2 // ERROR "invalid .S suffix"
+ XTAB.W R0@>8, R2, R9 // ERROR "invalid .W suffix"
+ MOVBU.S R0@>24, R1 // ERROR "invalid .S suffix"
+ MOVHS.S R0@>16, R1 // ERROR "invalid .S suffix"
+ MOVB.S R1, 0xaaaa(R2) // ERROR "invalid .S suffix"
+ MOVHU.S R1, 0xaaaa(R2) // ERROR "invalid .S suffix"
+ MOVW.S R1, 0xaaaa(R2) // ERROR "invalid .S suffix"
+ MOVBU.S 0xaaaa(R2), R3 // ERROR "invalid .S suffix"
+ MOVH.S 0xaaaa(R2), R3 // ERROR "invalid .S suffix"
+ MOVW.S 0xaaaa(R2), R3 // ERROR "invalid .S suffix"
+ MOVW.S CPSR, R1 // ERROR "invalid .S suffix"
+ MOVW.S R3, CPSR // ERROR "invalid .S suffix"
+ MOVW.S $0, CPSR // ERROR "invalid .S suffix"
+ MOVM.S (R0), [R2-R4] // ERROR "invalid .S suffix"
+ MOVM.S [R1-R6], (R9) // ERROR "invalid .S suffix"
+ SWPW.S R1, (R2), R3 // ERROR "invalid .S suffix"
+ MOVF.S (R0), F1 // ERROR "invalid .S suffix"
+ MOVF.S F9, (R4) // ERROR "invalid .S suffix"
+ MOVF.S 0xfff0(R0), F1 // ERROR "invalid .S suffix"
+ MOVF.S F9, 0xfff0(R4) // ERROR "invalid .S suffix"
+ ADDF.S F1, F2, F3 // ERROR "invalid .S suffix"
+ SUBD.U F1, F2 // ERROR "invalid .U suffix"
+ NEGF.W F9, F10 // ERROR "invalid .W suffix"
+ ABSD.P F9, F10 // ERROR "invalid .P suffix"
+ MOVW.S FPSR, R0 // ERROR "invalid .S suffix"
+ MOVW.P g, FPSR // ERROR "invalid .P suffix"
+ MOVW.S R1->4(R6), R2 // ERROR "invalid .S suffix"
+ MOVB.S R9, R2<<8(R4) // ERROR "invalid .S suffix"
+ MOVHU.S R9, R2<<0(R4) // ERROR "invalid .S suffix"
+ STREX.S R0, (R1), R2 // ERROR "invalid .S suffix"
+ LDREX.S (R2), R8 // ERROR "invalid .S suffix"
+ MOVF.S $0.0, F3 // ERROR "invalid .S suffix"
+ CMPF.S F1, F2 // ERROR "invalid .S suffix"
+ MOVFW.S F0, F9 // ERROR "invalid .S suffix"
+ MOVWF.W F3, F1 // ERROR "invalid .W suffix"
+ MOVFW.P F0, R9 // ERROR "invalid .P suffix"
+ MOVWF.W R3, F1 // ERROR "invalid .W suffix"
+ MOVW.S F0, R9 // ERROR "invalid .S suffix"
+ MOVW.U R3, F1 // ERROR "invalid .U suffix"
+ PLD.S 4(R1) // ERROR "invalid .S suffix"
+ CLZ.S R1, R2 // ERROR "invalid .S suffix"
+ MULBB.S R0, R1, R2 // ERROR "invalid .S suffix"
+ MULA.W R9, R6, R1, g // ERROR "invalid .W suffix"
+ MULS.S R2, R3, R4, g // ERROR "invalid .S suffix"
+
+ STREX R1, (R0) // ERROR "illegal combination"
+ STREX (R1), R0 // ERROR "illegal combination"
+ STREX R1, (R0), R1 // ERROR "cannot use same register as both source and destination"
+ STREX R1, (R0), R0 // ERROR "cannot use same register as both source and destination"
+ STREXD R0, (R2), R0 // ERROR "cannot use same register as both source and destination"
+ STREXD R0, (R2), R1 // ERROR "cannot use same register as both source and destination"
+ STREXD R0, (R2), R2 // ERROR "cannot use same register as both source and destination"
+ STREXD R1, (R4), R7 // ERROR "must be even"
END
diff --git a/src/cmd/asm/internal/asm/testdata/armv6.s b/src/cmd/asm/internal/asm/testdata/armv6.s
index cc79275f690..c6649bc1fb4 100644
--- a/src/cmd/asm/internal/asm/testdata/armv6.s
+++ b/src/cmd/asm/internal/asm/testdata/armv6.s
@@ -18,6 +18,26 @@ TEXT foo(SB), DUPOK|NOSPLIT, $0
MULD.EQ F3, F4, F5 // 035b240e
MULF.NE F0, F2 // 002a221e
MULD F3, F5 // 035b25ee
+ NMULF F0, F1, F2 // 402a21ee
+ NMULF F3, F7 // 437a27ee
+ NMULD F0, F1, F2 // 402b21ee
+ NMULD F3, F7 // 437b27ee
+ MULAF F5, F6, F7 // 057a06ee
+ MULAD F5, F6, F7 // 057b06ee
+ MULSF F5, F6, F7 // 457a06ee
+ MULSD F5, F6, F7 // 457b06ee
+ NMULAF F5, F6, F7 // 057a16ee
+ NMULAD F5, F6, F7 // 057b16ee
+ NMULSF F5, F6, F7 // 457a16ee
+ NMULSD F5, F6, F7 // 457b16ee
+ FMULAF F5, F6, F7 // 057aa6ee
+ FMULAD F5, F6, F7 // 057ba6ee
+ FMULSF F5, F6, F7 // 457aa6ee
+ FMULSD F5, F6, F7 // 457ba6ee
+ FNMULAF F5, F6, F7 // 457a96ee
+ FNMULAD F5, F6, F7 // 457b96ee
+ FNMULSF F5, F6, F7 // 057a96ee
+ FNMULSD F5, F6, F7 // 057b96ee
DIVF F0, F1, F2 // 002a81ee
DIVD.EQ F3, F4, F5 // 035b840e
DIVF.NE F0, F2 // 002a821e
diff --git a/src/cmd/asm/internal/asm/testdata/ppc64.s b/src/cmd/asm/internal/asm/testdata/ppc64.s
index 30fb0f2c02b..2909c390945 100644
--- a/src/cmd/asm/internal/asm/testdata/ppc64.s
+++ b/src/cmd/asm/internal/asm/testdata/ppc64.s
@@ -550,6 +550,14 @@ label1:
// ftsqrt BF, FRB
FTSQRT F2,$7
+// FCFID
+// FCFIDS
+
+ FCFID F2,F3
+ FCFIDCC F3,F3
+ FCFIDS F2,F3
+ FCFIDSCC F2,F3
+
//
// CMP
//
@@ -581,6 +589,10 @@ label1:
// cmpb RA,RS,RB
CMPB R2,R2,R1
+// CMPEQB RA,RB,BF produces
+// cmpeqb BF,RA,RB
+ CMPEQB R1, R2, CR0
+
//
// rotate extended mnemonics map onto other shift instructions
//
@@ -706,6 +718,14 @@ label1:
// }
DCBF (R1)
DCBF (R1+R2) // DCBF (R1)(R2*1)
+ DCBF (R1), $1
+ DCBF (R1)(R2*1), $1
+ DCBT (R1), $1
+ DCBT (R1)(R2*1), $1
+
+// LDMX (RB)(RA*1),RT produces
+// ldmx RT,RA,RB
+ LDMX (R2)(R1*1), R3
// Population count, X-form
// RS,RA produces
@@ -714,6 +734,20 @@ label1:
POPCNTW R1,R2
POPCNTB R1,R2
+// Copysign
+ FCPSGN F1,F2,F3
+
+// Random number generator, X-form
+// DARN L,RT produces
+// darn RT,L
+ DARN $1, R1
+
+// Copy/Paste facility
+// RB,RA produces
+// RA,RB
+ COPY R2,R1
+ PASTECC R2,R1
+
// VMX instructions
// Described as:
@@ -788,6 +822,11 @@ label1:
VPMSUMW V2, V3, V1
VPMSUMD V2, V3, V1
+// Vector multiply-sum, VA-form
+// VRA, VRB, VRC, VRT produces
+// VRT, VRA, VRB, VRC
+ VMSUMUDM V4, V3, V2, V1
+
// Vector SUB, VX-form
// VRA,VRB,VRT produces
// VRT,VRA,VRB
@@ -885,12 +924,20 @@ label1:
VCMPGTSWCC V3, V2, V1
VCMPGTSD V3, V2, V1
VCMPGTSDCC V3, V2, V1
+ VCMPNEZB V3, V2, V1
+ VCMPNEZBCC V3, V2, V1
// Vector permute, VA-form
// VRA,VRB,VRC,VRT produces
// VRT,VRA,VRB,VRC
VPERM V3, V2, V1, V0
+// Vector bit permute, VX-form
+// VRA,VRB,VRT produces
+// VRT,VRA,VRB
+ VBPERMQ V3,V1,V2
+ VBPERMD V3,V1,V2
+
// Vector select, VA-form
// VRA,VRB,VRC,VRT produces
// VRT,VRA,VRB,VRC
@@ -958,6 +1005,7 @@ label1:
// RA,XS
MFVSRD VS0, R1
MFVSRWZ VS33, R1
+ MFVSRLD VS63, R1
// VSX move to VSR, XX1-form
// RA,XT produces
@@ -965,6 +1013,8 @@ label1:
MTVSRD R1, VS0
MTVSRWA R1, VS31
MTVSRWZ R1, VS63
+ MTVSRDD R1, R2, VS0
+ MTVSRWS R1, VS32
// VSX AND, XX3-form
// XA,XB,XT produces
@@ -1062,6 +1112,17 @@ label1:
XVCVUXDSP VS0,VS32
XVCVUXWSP VS0,VS32
+// Multiply-Add High Doubleword
+// RA,RB,RC,RT produces
+// RT,RA,RB,RC
+ MADDHD R1,R2,R3,R4
+ MADDHDU R1,R2,R3,R4
+
+// Add Extended using alternate carry bit
+// ADDEX RA,RB,CY,RT produces
+// addex RT, RA, RB, CY
+ ADDEX R1, R2, $0, R3
+
//
// NOP
//
diff --git a/src/cmd/asm/internal/asm/testdata/s390x.s b/src/cmd/asm/internal/asm/testdata/s390x.s
index 6cc129ccc51..884f6b23cf8 100644
--- a/src/cmd/asm/internal/asm/testdata/s390x.s
+++ b/src/cmd/asm/internal/asm/testdata/s390x.s
@@ -213,6 +213,11 @@ TEXT main·foo(SB),DUPOK|NOSPLIT,$16-0 // TEXT main.foo(SB), DUPOK|NOSPLIT, $16-
CMPWU R1, R2 // 1512
CMPWU R3, $4294967295 // c23fffffffff
+ TMHH R1, $65535 // a712ffff
+ TMHL R2, $1 // a7230001
+ TMLH R3, $0 // a7300000
+ TMLL R4, $32768 // a7418000
+
BNE 0(PC) // a7740000
BEQ 0(PC) // a7840000
BLT 0(PC) // a7440000
@@ -296,6 +301,9 @@ TEXT main·foo(SB),DUPOK|NOSPLIT,$16-0 // TEXT main.foo(SB), DUPOK|NOSPLIT, $16-
FMADDS F1, F2, F3 // b30e3012
FMSUB F4, F5, F5 // b31f5045
FMSUBS F6, F6, F7 // b30f7066
+ LPDFR F1, F2 // b3700021
+ LNDFR F3, F4 // b3710043
+ CPSDR F5, F6, F7 // b3725076
VL (R15), V1 // e710f0000006
VST V1, (R15) // e710f000000e
diff --git a/src/cmd/asm/internal/flags/flags.go b/src/cmd/asm/internal/flags/flags.go
index bd90b82bf6a..6acde294326 100644
--- a/src/cmd/asm/internal/flags/flags.go
+++ b/src/cmd/asm/internal/flags/flags.go
@@ -6,6 +6,7 @@
package flags
import (
+ "cmd/internal/objabi"
"flag"
"fmt"
"os"
@@ -31,6 +32,7 @@ var (
func init() {
flag.Var(&D, "D", "predefined symbol with optional simple value -D=identifier=value; can be set multiple times")
flag.Var(&I, "I", "include directory; can be set multiple times")
+ objabi.AddVersionFlag() // -V
}
// MultiFlag allows setting a value multiple times to collect a list, as in -I=dir1 -I=dir2.
diff --git a/src/cmd/asm/internal/lex/input.go b/src/cmd/asm/internal/lex/input.go
index ddfcddf36d8..666611e1799 100644
--- a/src/cmd/asm/internal/lex/input.go
+++ b/src/cmd/asm/internal/lex/input.go
@@ -13,6 +13,7 @@ import (
"text/scanner"
"cmd/asm/internal/flags"
+ "cmd/internal/objabi"
"cmd/internal/src"
)
@@ -454,7 +455,7 @@ func (in *Input) line() {
in.Error("unexpected token at end of #line: ", tok)
}
pos := src.MakePos(in.Base(), uint(in.Line()), uint(in.Col()))
- in.Stack.SetBase(src.NewLinePragmaBase(pos, file, uint(line)))
+ in.Stack.SetBase(src.NewLinePragmaBase(pos, file, objabi.AbsFile(objabi.WorkingDir(), file, *flags.TrimPath), uint(line)))
}
// #undef processing
diff --git a/src/cmd/asm/main.go b/src/cmd/asm/main.go
index 2e799163af4..04f56f96467 100644
--- a/src/cmd/asm/main.go
+++ b/src/cmd/asm/main.go
@@ -72,7 +72,7 @@ func main() {
break
}
// reports errors to parser.Errorf
- obj.Flushplist(ctxt, pList, nil)
+ obj.Flushplist(ctxt, pList, nil, "")
}
if ok {
obj.WriteObjFile(ctxt, buf)
diff --git a/src/cmd/buildid/buildid.go b/src/cmd/buildid/buildid.go
new file mode 100644
index 00000000000..8d810ffdd99
--- /dev/null
+++ b/src/cmd/buildid/buildid.go
@@ -0,0 +1,73 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "strings"
+
+ "cmd/internal/buildid"
+)
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: go tool buildid [-w] file\n")
+ flag.PrintDefaults()
+ os.Exit(2)
+}
+
+var wflag = flag.Bool("w", false, "write build ID")
+
+func main() {
+ log.SetPrefix("buildid: ")
+ log.SetFlags(0)
+ flag.Usage = usage
+ flag.Parse()
+ if flag.NArg() != 1 {
+ usage()
+ }
+
+ file := flag.Arg(0)
+ id, err := buildid.ReadFile(file)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if !*wflag {
+ fmt.Printf("%s\n", id)
+ return
+ }
+
+ f, err := os.Open(file)
+ if err != nil {
+ log.Fatal(err)
+ }
+ matches, hash, err := buildid.FindAndHash(f, id, 0)
+ if err != nil {
+ log.Fatal(err)
+ }
+ f.Close()
+
+ tail := id
+ if i := strings.LastIndex(id, "."); i >= 0 {
+ tail = tail[i+1:]
+ }
+ if len(tail) != len(hash)*2 {
+ log.Fatalf("%s: cannot find %d-byte hash in id %s", file, len(hash), id)
+ }
+ newID := id[:len(id)-len(tail)] + fmt.Sprintf("%x", hash)
+
+ f, err = os.OpenFile(file, os.O_WRONLY, 0)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if err := buildid.Rewrite(f, matches, newID); err != nil {
+ log.Fatal(err)
+ }
+ if err := f.Close(); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/src/cmd/buildid/doc.go b/src/cmd/buildid/doc.go
new file mode 100644
index 00000000000..d1ec155c976
--- /dev/null
+++ b/src/cmd/buildid/doc.go
@@ -0,0 +1,18 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Buildid displays or updates the build ID stored in a Go package or binary.
+
+Usage:
+ go tool buildid [-w] file
+
+By default, buildid prints the build ID found in the named file.
+If the -w option is given, buildid rewrites the build ID found in
+the file to accurately record a content hash of the file.
+
+This tool is only intended for use by the go command or
+other build systems.
+*/
+package main
diff --git a/src/cmd/cgo/ast.go b/src/cmd/cgo/ast.go
index 7122a9dbbeb..58e0ee78cb7 100644
--- a/src/cmd/cgo/ast.go
+++ b/src/cmd/cgo/ast.go
@@ -58,11 +58,14 @@ func (f *File) ParseGo(name string, src []byte) {
// so we use ast1 to look for the doc comments on import "C"
// and on exported functions, and we use ast2 for translating
// and reprinting.
+ // In cgo mode, we ignore ast2 and just apply edits directly
+ // the text behind ast1. In godefs mode we modify and print ast2.
ast1 := parse(name, src, parser.ParseComments)
ast2 := parse(name, src, 0)
f.Package = ast1.Name.Name
f.Name = make(map[string]*Name)
+ f.NamePos = make(map[*Name]token.Pos)
// In ast1, find the import "C" line and get any extra C preamble.
sawC := false
@@ -96,36 +99,53 @@ func (f *File) ParseGo(name string, src []byte) {
}
// In ast2, strip the import "C" line.
- w := 0
- for _, decl := range ast2.Decls {
- d, ok := decl.(*ast.GenDecl)
- if !ok {
- ast2.Decls[w] = decl
+ if *godefs {
+ w := 0
+ for _, decl := range ast2.Decls {
+ d, ok := decl.(*ast.GenDecl)
+ if !ok {
+ ast2.Decls[w] = decl
+ w++
+ continue
+ }
+ ws := 0
+ for _, spec := range d.Specs {
+ s, ok := spec.(*ast.ImportSpec)
+ if !ok || s.Path.Value != `"C"` {
+ d.Specs[ws] = spec
+ ws++
+ }
+ }
+ if ws == 0 {
+ continue
+ }
+ d.Specs = d.Specs[0:ws]
+ ast2.Decls[w] = d
w++
- continue
}
- ws := 0
- for _, spec := range d.Specs {
- s, ok := spec.(*ast.ImportSpec)
- if !ok || s.Path.Value != `"C"` {
- d.Specs[ws] = spec
- ws++
+ ast2.Decls = ast2.Decls[0:w]
+ } else {
+ for _, decl := range ast2.Decls {
+ d, ok := decl.(*ast.GenDecl)
+ if !ok {
+ continue
+ }
+ for _, spec := range d.Specs {
+ if s, ok := spec.(*ast.ImportSpec); ok && s.Path.Value == `"C"` {
+ // Replace "C" with _ "unsafe", to keep program valid.
+ // (Deleting import statement or clause is not safe if it is followed
+ // in the source by an explicit semicolon.)
+ f.Edit.Replace(f.offset(s.Path.Pos()), f.offset(s.Path.End()), `_ "unsafe"`)
+ }
}
}
- if ws == 0 {
- continue
- }
- d.Specs = d.Specs[0:ws]
- ast2.Decls[w] = d
- w++
}
- ast2.Decls = ast2.Decls[0:w]
// Accumulate pointers to uses of C.x.
if f.Ref == nil {
f.Ref = make([]*Ref, 0, 8)
}
- f.walk(ast2, "prog", (*File).saveExprs)
+ f.walk(ast2, ctxProg, (*File).saveExprs)
// Accumulate exported functions.
// The comments are only on ast1 but we need to
@@ -133,8 +153,8 @@ func (f *File) ParseGo(name string, src []byte) {
// The first walk fills in ExpFunc, and the
// second walk changes the entries to
// refer to ast2 instead.
- f.walk(ast1, "prog", (*File).saveExport)
- f.walk(ast2, "prog", (*File).saveExport2)
+ f.walk(ast1, ctxProg, (*File).saveExport)
+ f.walk(ast2, ctxProg, (*File).saveExport2)
f.Comments = ast1.Comments
f.AST = ast2
@@ -143,9 +163,6 @@ func (f *File) ParseGo(name string, src []byte) {
// Like ast.CommentGroup's Text method but preserves
// leading blank lines, so that line numbers line up.
func commentText(g *ast.CommentGroup) string {
- if g == nil {
- return ""
- }
var pieces []string
for _, com := range g.List {
c := com.Text
@@ -165,7 +182,7 @@ func commentText(g *ast.CommentGroup) string {
}
// Save various references we are going to need later.
-func (f *File) saveExprs(x interface{}, context string) {
+func (f *File) saveExprs(x interface{}, context astContext) {
switch x := x.(type) {
case *ast.Expr:
switch (*x).(type) {
@@ -178,7 +195,7 @@ func (f *File) saveExprs(x interface{}, context string) {
}
// Save references to C.xxx for later processing.
-func (f *File) saveRef(n *ast.Expr, context string) {
+func (f *File) saveRef(n *ast.Expr, context astContext) {
sel := (*n).(*ast.SelectorExpr)
// For now, assume that the only instance of capital C is when
// used as the imported package identifier.
@@ -188,10 +205,10 @@ func (f *File) saveRef(n *ast.Expr, context string) {
if l, ok := sel.X.(*ast.Ident); !ok || l.Name != "C" {
return
}
- if context == "as2" {
- context = "expr"
+ if context == ctxAssign2 {
+ context = ctxExpr
}
- if context == "embed-type" {
+ if context == ctxEmbedType {
error_(sel.Pos(), "cannot embed C type")
}
goname := sel.Sel.Name
@@ -212,6 +229,7 @@ func (f *File) saveRef(n *ast.Expr, context string) {
Go: goname,
}
f.Name[goname] = name
+ f.NamePos[name] = sel.Pos()
}
f.Ref = append(f.Ref, &Ref{
Name: name,
@@ -221,7 +239,7 @@ func (f *File) saveRef(n *ast.Expr, context string) {
}
// Save calls to C.xxx for later processing.
-func (f *File) saveCall(call *ast.CallExpr, context string) {
+func (f *File) saveCall(call *ast.CallExpr, context astContext) {
sel, ok := call.Fun.(*ast.SelectorExpr)
if !ok {
return
@@ -229,12 +247,12 @@ func (f *File) saveCall(call *ast.CallExpr, context string) {
if l, ok := sel.X.(*ast.Ident); !ok || l.Name != "C" {
return
}
- c := &Call{Call: call, Deferred: context == "defer"}
+ c := &Call{Call: call, Deferred: context == ctxDefer}
f.Calls = append(f.Calls, c)
}
// If a function should be exported add it to ExpFunc.
-func (f *File) saveExport(x interface{}, context string) {
+func (f *File) saveExport(x interface{}, context astContext) {
n, ok := x.(*ast.FuncDecl)
if !ok {
return
@@ -274,7 +292,7 @@ func (f *File) saveExport(x interface{}, context string) {
}
// Make f.ExpFunc[i] point at the Func from this AST instead of the other one.
-func (f *File) saveExport2(x interface{}, context string) {
+func (f *File) saveExport2(x interface{}, context astContext) {
n, ok := x.(*ast.FuncDecl)
if !ok {
return
@@ -288,8 +306,30 @@ func (f *File) saveExport2(x interface{}, context string) {
}
}
+type astContext int
+
+const (
+ ctxProg astContext = iota
+ ctxEmbedType
+ ctxType
+ ctxStmt
+ ctxExpr
+ ctxField
+ ctxParam
+ ctxAssign2 // assignment of a single expression to two variables
+ ctxSwitch
+ ctxTypeSwitch
+ ctxFile
+ ctxDecl
+ ctxSpec
+ ctxDefer
+ ctxCall // any function call other than ctxCall2
+ ctxCall2 // function call whose result is assigned to two variables
+ ctxSelector
+)
+
// walk walks the AST x, calling visit(f, x, context) for each node.
-func (f *File) walk(x interface{}, context string, visit func(*File, interface{}, string)) {
+func (f *File) walk(x interface{}, context astContext, visit func(*File, interface{}, astContext)) {
visit(f, x, context)
switch n := x.(type) {
case *ast.Expr:
@@ -304,10 +344,10 @@ func (f *File) walk(x interface{}, context string, visit func(*File, interface{}
// These are ordered and grouped to match ../../go/ast/ast.go
case *ast.Field:
- if len(n.Names) == 0 && context == "field" {
- f.walk(&n.Type, "embed-type", visit)
+ if len(n.Names) == 0 && context == ctxField {
+ f.walk(&n.Type, ctxEmbedType, visit)
} else {
- f.walk(&n.Type, "type", visit)
+ f.walk(&n.Type, ctxType, visit)
}
case *ast.FieldList:
for _, field := range n.List {
@@ -318,163 +358,163 @@ func (f *File) walk(x interface{}, context string, visit func(*File, interface{}
case *ast.Ellipsis:
case *ast.BasicLit:
case *ast.FuncLit:
- f.walk(n.Type, "type", visit)
- f.walk(n.Body, "stmt", visit)
+ f.walk(n.Type, ctxType, visit)
+ f.walk(n.Body, ctxStmt, visit)
case *ast.CompositeLit:
- f.walk(&n.Type, "type", visit)
- f.walk(n.Elts, "expr", visit)
+ f.walk(&n.Type, ctxType, visit)
+ f.walk(n.Elts, ctxExpr, visit)
case *ast.ParenExpr:
f.walk(&n.X, context, visit)
case *ast.SelectorExpr:
- f.walk(&n.X, "selector", visit)
+ f.walk(&n.X, ctxSelector, visit)
case *ast.IndexExpr:
- f.walk(&n.X, "expr", visit)
- f.walk(&n.Index, "expr", visit)
+ f.walk(&n.X, ctxExpr, visit)
+ f.walk(&n.Index, ctxExpr, visit)
case *ast.SliceExpr:
- f.walk(&n.X, "expr", visit)
+ f.walk(&n.X, ctxExpr, visit)
if n.Low != nil {
- f.walk(&n.Low, "expr", visit)
+ f.walk(&n.Low, ctxExpr, visit)
}
if n.High != nil {
- f.walk(&n.High, "expr", visit)
+ f.walk(&n.High, ctxExpr, visit)
}
if n.Max != nil {
- f.walk(&n.Max, "expr", visit)
+ f.walk(&n.Max, ctxExpr, visit)
}
case *ast.TypeAssertExpr:
- f.walk(&n.X, "expr", visit)
- f.walk(&n.Type, "type", visit)
+ f.walk(&n.X, ctxExpr, visit)
+ f.walk(&n.Type, ctxType, visit)
case *ast.CallExpr:
- if context == "as2" {
- f.walk(&n.Fun, "call2", visit)
+ if context == ctxAssign2 {
+ f.walk(&n.Fun, ctxCall2, visit)
} else {
- f.walk(&n.Fun, "call", visit)
+ f.walk(&n.Fun, ctxCall, visit)
}
- f.walk(n.Args, "expr", visit)
+ f.walk(n.Args, ctxExpr, visit)
case *ast.StarExpr:
f.walk(&n.X, context, visit)
case *ast.UnaryExpr:
- f.walk(&n.X, "expr", visit)
+ f.walk(&n.X, ctxExpr, visit)
case *ast.BinaryExpr:
- f.walk(&n.X, "expr", visit)
- f.walk(&n.Y, "expr", visit)
+ f.walk(&n.X, ctxExpr, visit)
+ f.walk(&n.Y, ctxExpr, visit)
case *ast.KeyValueExpr:
- f.walk(&n.Key, "expr", visit)
- f.walk(&n.Value, "expr", visit)
+ f.walk(&n.Key, ctxExpr, visit)
+ f.walk(&n.Value, ctxExpr, visit)
case *ast.ArrayType:
- f.walk(&n.Len, "expr", visit)
- f.walk(&n.Elt, "type", visit)
+ f.walk(&n.Len, ctxExpr, visit)
+ f.walk(&n.Elt, ctxType, visit)
case *ast.StructType:
- f.walk(n.Fields, "field", visit)
+ f.walk(n.Fields, ctxField, visit)
case *ast.FuncType:
- f.walk(n.Params, "param", visit)
+ f.walk(n.Params, ctxParam, visit)
if n.Results != nil {
- f.walk(n.Results, "param", visit)
+ f.walk(n.Results, ctxParam, visit)
}
case *ast.InterfaceType:
- f.walk(n.Methods, "field", visit)
+ f.walk(n.Methods, ctxField, visit)
case *ast.MapType:
- f.walk(&n.Key, "type", visit)
- f.walk(&n.Value, "type", visit)
+ f.walk(&n.Key, ctxType, visit)
+ f.walk(&n.Value, ctxType, visit)
case *ast.ChanType:
- f.walk(&n.Value, "type", visit)
+ f.walk(&n.Value, ctxType, visit)
case *ast.BadStmt:
case *ast.DeclStmt:
- f.walk(n.Decl, "decl", visit)
+ f.walk(n.Decl, ctxDecl, visit)
case *ast.EmptyStmt:
case *ast.LabeledStmt:
- f.walk(n.Stmt, "stmt", visit)
+ f.walk(n.Stmt, ctxStmt, visit)
case *ast.ExprStmt:
- f.walk(&n.X, "expr", visit)
+ f.walk(&n.X, ctxExpr, visit)
case *ast.SendStmt:
- f.walk(&n.Chan, "expr", visit)
- f.walk(&n.Value, "expr", visit)
+ f.walk(&n.Chan, ctxExpr, visit)
+ f.walk(&n.Value, ctxExpr, visit)
case *ast.IncDecStmt:
- f.walk(&n.X, "expr", visit)
+ f.walk(&n.X, ctxExpr, visit)
case *ast.AssignStmt:
- f.walk(n.Lhs, "expr", visit)
+ f.walk(n.Lhs, ctxExpr, visit)
if len(n.Lhs) == 2 && len(n.Rhs) == 1 {
- f.walk(n.Rhs, "as2", visit)
+ f.walk(n.Rhs, ctxAssign2, visit)
} else {
- f.walk(n.Rhs, "expr", visit)
+ f.walk(n.Rhs, ctxExpr, visit)
}
case *ast.GoStmt:
- f.walk(n.Call, "expr", visit)
+ f.walk(n.Call, ctxExpr, visit)
case *ast.DeferStmt:
- f.walk(n.Call, "defer", visit)
+ f.walk(n.Call, ctxDefer, visit)
case *ast.ReturnStmt:
- f.walk(n.Results, "expr", visit)
+ f.walk(n.Results, ctxExpr, visit)
case *ast.BranchStmt:
case *ast.BlockStmt:
f.walk(n.List, context, visit)
case *ast.IfStmt:
- f.walk(n.Init, "stmt", visit)
- f.walk(&n.Cond, "expr", visit)
- f.walk(n.Body, "stmt", visit)
- f.walk(n.Else, "stmt", visit)
+ f.walk(n.Init, ctxStmt, visit)
+ f.walk(&n.Cond, ctxExpr, visit)
+ f.walk(n.Body, ctxStmt, visit)
+ f.walk(n.Else, ctxStmt, visit)
case *ast.CaseClause:
- if context == "typeswitch" {
- context = "type"
+ if context == ctxTypeSwitch {
+ context = ctxType
} else {
- context = "expr"
+ context = ctxExpr
}
f.walk(n.List, context, visit)
- f.walk(n.Body, "stmt", visit)
+ f.walk(n.Body, ctxStmt, visit)
case *ast.SwitchStmt:
- f.walk(n.Init, "stmt", visit)
- f.walk(&n.Tag, "expr", visit)
- f.walk(n.Body, "switch", visit)
+ f.walk(n.Init, ctxStmt, visit)
+ f.walk(&n.Tag, ctxExpr, visit)
+ f.walk(n.Body, ctxSwitch, visit)
case *ast.TypeSwitchStmt:
- f.walk(n.Init, "stmt", visit)
- f.walk(n.Assign, "stmt", visit)
- f.walk(n.Body, "typeswitch", visit)
+ f.walk(n.Init, ctxStmt, visit)
+ f.walk(n.Assign, ctxStmt, visit)
+ f.walk(n.Body, ctxTypeSwitch, visit)
case *ast.CommClause:
- f.walk(n.Comm, "stmt", visit)
- f.walk(n.Body, "stmt", visit)
+ f.walk(n.Comm, ctxStmt, visit)
+ f.walk(n.Body, ctxStmt, visit)
case *ast.SelectStmt:
- f.walk(n.Body, "stmt", visit)
+ f.walk(n.Body, ctxStmt, visit)
case *ast.ForStmt:
- f.walk(n.Init, "stmt", visit)
- f.walk(&n.Cond, "expr", visit)
- f.walk(n.Post, "stmt", visit)
- f.walk(n.Body, "stmt", visit)
+ f.walk(n.Init, ctxStmt, visit)
+ f.walk(&n.Cond, ctxExpr, visit)
+ f.walk(n.Post, ctxStmt, visit)
+ f.walk(n.Body, ctxStmt, visit)
case *ast.RangeStmt:
- f.walk(&n.Key, "expr", visit)
- f.walk(&n.Value, "expr", visit)
- f.walk(&n.X, "expr", visit)
- f.walk(n.Body, "stmt", visit)
+ f.walk(&n.Key, ctxExpr, visit)
+ f.walk(&n.Value, ctxExpr, visit)
+ f.walk(&n.X, ctxExpr, visit)
+ f.walk(n.Body, ctxStmt, visit)
case *ast.ImportSpec:
case *ast.ValueSpec:
- f.walk(&n.Type, "type", visit)
+ f.walk(&n.Type, ctxType, visit)
if len(n.Names) == 2 && len(n.Values) == 1 {
- f.walk(&n.Values[0], "as2", visit)
+ f.walk(&n.Values[0], ctxAssign2, visit)
} else {
- f.walk(n.Values, "expr", visit)
+ f.walk(n.Values, ctxExpr, visit)
}
case *ast.TypeSpec:
- f.walk(&n.Type, "type", visit)
+ f.walk(&n.Type, ctxType, visit)
case *ast.BadDecl:
case *ast.GenDecl:
- f.walk(n.Specs, "spec", visit)
+ f.walk(n.Specs, ctxSpec, visit)
case *ast.FuncDecl:
if n.Recv != nil {
- f.walk(n.Recv, "param", visit)
+ f.walk(n.Recv, ctxParam, visit)
}
- f.walk(n.Type, "type", visit)
+ f.walk(n.Type, ctxType, visit)
if n.Body != nil {
- f.walk(n.Body, "stmt", visit)
+ f.walk(n.Body, ctxStmt, visit)
}
case *ast.File:
- f.walk(n.Decls, "decl", visit)
+ f.walk(n.Decls, ctxDecl, visit)
case *ast.Package:
for _, file := range n.Files {
- f.walk(file, "file", visit)
+ f.walk(file, ctxFile, visit)
}
case []ast.Decl:
diff --git a/src/cmd/cgo/doc.go b/src/cmd/cgo/doc.go
index b2388829a87..c1bdf0659fa 100644
--- a/src/cmd/cgo/doc.go
+++ b/src/cmd/cgo/doc.go
@@ -102,11 +102,13 @@ the use of cgo, and to 0 to disable it. The go tool will set the
build constraint "cgo" if cgo is enabled.
When cross-compiling, you must specify a C cross-compiler for cgo to
-use. You can do this by setting the CC_FOR_TARGET environment
-variable when building the toolchain using make.bash, or by setting
-the CC environment variable any time you run the go tool. The
-CXX_FOR_TARGET and CXX environment variables work in a similar way for
-C++ code.
+use. You can do this by setting the generic CC_FOR_TARGET or the
+more specific CC_FOR_${GOOS}_${GOARCH} (for example, CC_FOR_linux_arm)
+environment variable when building the toolchain using make.bash,
+or you can set the CC environment variable any time you run the go tool.
+
+The CXX_FOR_TARGET, CXX_FOR_${GOOS}_${GOARCH}, and CXX
+environment variables work in a similar way for C++ code.
Go references to C
@@ -126,12 +128,29 @@ C.complexfloat (complex float), and C.complexdouble (complex double).
The C type void* is represented by Go's unsafe.Pointer.
The C types __int128_t and __uint128_t are represented by [16]byte.
+A few special C types which would normally be represented by a pointer
+type in Go are instead represented by a uintptr. See the Special
+cases section below.
+
To access a struct, union, or enum type directly, prefix it with
struct_, union_, or enum_, as in C.struct_stat.
The size of any C type T is available as C.sizeof_T, as in
C.sizeof_struct_stat.
+A C function may be declared in the Go file with a parameter type of
+the special name _GoString_. This function may be called with an
+ordinary Go string value. The string length, and a pointer to the
+string contents, may be accessed by calling the C functions
+
+ size_t _GoStringLen(_GoString_ s);
+ const char *_GoStringPtr(_GoString_ s);
+
+These functions are only available in the preamble, not in other C
+files. The C code must not modify the contents of the pointer returned
+by _GoStringPtr. Note that the string contents may not have a trailing
+NUL byte.
+
As Go doesn't have support for C's union type in the general case,
C's union types are represented as a Go byte array with the same length.
@@ -241,7 +260,16 @@ They will be available in the C code as:
found in the _cgo_export.h generated header, after any preambles
copied from the cgo input files. Functions with multiple
return values are mapped to functions returning a struct.
+
Not all Go types can be mapped to C types in a useful way.
+Go struct types are not supported; use a C struct type.
+Go array types are not supported; use a C pointer.
+
+Go functions that take arguments of type string may be called with the
+C type _GoString_, described above. The _GoString_ type will be
+automatically defined in the preamble. Note that there is no way for C
+code to create a value of this type; this is only useful for passing
+string values from Go to C and back to Go.
Using //export in a file places a restriction on the preamble:
since it is copied into two different C output files, it must not
@@ -264,6 +292,14 @@ pointer is a Go pointer or a C pointer is a dynamic property
determined by how the memory was allocated; it has nothing to do with
the type of the pointer.
+Note that values of some Go types, other than the type's zero value,
+always include Go pointers. This is true of string, slice, interface,
+channel, map, and function types. A pointer type may hold a Go pointer
+or a C pointer. Array and struct types may or may not include Go
+pointers, depending on the element types. All the discussion below
+about Go pointers applies not just to pointer types, but also to other
+types that include Go pointers.
+
Go code may pass a Go pointer to C provided the Go memory to which it
points does not contain any Go pointers. The C code must preserve
this property: it must not store any Go pointers in Go memory, even
@@ -274,14 +310,17 @@ the Go memory in question is the entire array or the entire backing
array of the slice.
C code may not keep a copy of a Go pointer after the call returns.
+This includes the _GoString_ type, which, as noted above, includes a
+Go pointer; _GoString_ values may not be retained by C code.
-A Go function called by C code may not return a Go pointer. A Go
-function called by C code may take C pointers as arguments, and it may
-store non-pointer or C pointer data through those pointers, but it may
-not store a Go pointer in memory pointed to by a C pointer. A Go
-function called by C code may take a Go pointer as an argument, but it
-must preserve the property that the Go memory to which it points does
-not contain any Go pointers.
+A Go function called by C code may not return a Go pointer (which
+implies that it may not return a string, slice, channel, and so
+forth). A Go function called by C code may take C pointers as
+arguments, and it may store non-pointer or C pointer data through
+those pointers, but it may not store a Go pointer in memory pointed to
+by a C pointer. A Go function called by C code may take a Go pointer
+as an argument, but it must preserve the property that the Go memory
+to which it points does not contain any Go pointers.
Go code may not store a Go pointer in C memory. C code may store Go
pointers in C memory, subject to the rule above: it must stop storing
@@ -299,6 +338,84 @@ and of course there is nothing stopping the C code from doing anything
it likes. However, programs that break these rules are likely to fail
in unexpected and unpredictable ways.
+Special cases
+
+A few special C types which would normally be represented by a pointer
+type in Go are instead represented by a uintptr. Those types are
+the CF*Ref types from the CoreFoundation library on Darwin, including:
+
+ CFAllocatorRef
+ CFArrayRef
+ CFAttributedStringRef
+ CFBagRef
+ CFBinaryHeapRef
+ CFBitVectorRef
+ CFBooleanRef
+ CFBundleRef
+ CFCalendarRef
+ CFCharacterSetRef
+ CFDataRef
+ CFDateFormatterRef
+ CFDateRef
+ CFDictionaryRef
+ CFErrorRef
+ CFFileDescriptorRef
+ CFFileSecurityRef
+ CFLocaleRef
+ CFMachPortRef
+ CFMessagePortRef
+ CFMutableArrayRef
+ CFMutableAttributedStringRef
+ CFMutableBagRef
+ CFMutableBitVectorRef
+ CFMutableCharacterSetRef
+ CFMutableDataRef
+ CFMutableDictionaryRef
+ CFMutableSetRef
+ CFMutableStringRef
+ CFNotificationCenterRef
+ CFNullRef
+ CFNumberFormatterRef
+ CFNumberRef
+ CFPlugInInstanceRef
+ CFPlugInRef
+ CFPropertyListRef
+ CFReadStreamRef
+ CFRunLoopObserverRef
+ CFRunLoopRef
+ CFRunLoopSourceRef
+ CFRunLoopTimerRef
+ CFSetRef
+ CFSocketRef
+ CFStringRef
+ CFStringTokenizerRef
+ CFTimeZoneRef
+ CFTreeRef
+ CFTypeRef
+ CFURLCreateFromFSRef
+ CFURLEnumeratorRef
+ CFURLGetFSRef
+ CFURLRef
+ CFUUIDRef
+ CFUserNotificationRef
+ CFWriteStreamRef
+ CFXMLNodeRef
+ CFXMLParserRef
+ CFXMLTreeRef
+
+These types are uintptr on the Go side because they would otherwise
+confuse the Go garbage collector; they are sometimes not really
+pointers but data structures encoded in a pointer type. All operations
+on these types must happen in C. The proper constant to initialize an
+empty such reference is 0, not nil.
+
+This special case was introduced in Go 1.10. For auto-updating code
+from Go 1.9 and earlier, use the cftype rewrite in the Go fix tool:
+
+ go tool fix -r cftype
+
+It will replace nil with 0 in the appropriate places.
+
Using cgo directly
Usage:
@@ -312,32 +429,35 @@ invoking the C compiler to compile the C parts of the package.
The following options are available when running cgo directly:
+ -V
+ Print cgo version and exit.
+ -debug-define
+ Debugging option. Print #defines.
+ -debug-gcc
+ Debugging option. Trace C compiler execution and output.
-dynimport file
Write list of symbols imported by file. Write to
-dynout argument or to standard output. Used by go
build when building a cgo package.
+ -dynlinker
+ Write dynamic linker as part of -dynimport output.
-dynout file
Write -dynimport output to file.
-dynpackage package
Set Go package for -dynimport output.
- -dynlinker
- Write dynamic linker as part of -dynimport output.
- -godefs
- Write out input file in Go syntax replacing C package
- names with real values. Used to generate files in the
- syscall package when bootstrapping a new target.
- -srcdir directory
- Find the Go input files, listed on the command line,
- in directory.
- -objdir directory
- Put all generated files in directory.
- -importpath string
- The import path for the Go package. Optional; used for
- nicer comments in the generated files.
-exportheader file
If there are any exported functions, write the
generated export declarations to file.
C code can #include this to see the declarations.
+ -importpath string
+ The import path for the Go package. Optional; used for
+ nicer comments in the generated files.
+ -import_runtime_cgo
+ If set (which it is by default) import runtime/cgo in
+ generated output.
+ -import_syscall
+ If set (which it is by default) import syscall in
+ generated output.
-gccgo
Generate output for the gccgo compiler rather than the
gc compiler.
@@ -345,16 +465,13 @@ The following options are available when running cgo directly:
The -fgo-prefix option to be used with gccgo.
-gccgopkgpath path
The -fgo-pkgpath option to be used with gccgo.
- -import_runtime_cgo
- If set (which it is by default) import runtime/cgo in
- generated output.
- -import_syscall
- If set (which it is by default) import syscall in
- generated output.
- -debug-define
- Debugging option. Print #defines.
- -debug-gcc
- Debugging option. Trace C compiler execution and output.
+ -godefs
+ Write out input file in Go syntax replacing C package
+ names with real values. Used to generate files in the
+ syscall package when bootstrapping a new target.
+ -objdir directory
+ Put all generated files in directory.
+ -srcdir directory
*/
package main
@@ -403,21 +520,19 @@ about simple #defines for constants and the like. These are recorded
for later use.
Next, cgo needs to identify the kinds for each identifier. For the
-identifiers C.foo and C.bar, cgo generates this C program:
+identifiers C.foo, cgo generates this C program:
#line 1 "not-declared"
- void __cgo_f_xxx_1(void) { __typeof__(foo) *__cgo_undefined__; }
+ void __cgo_f_1_1(void) { __typeof__(foo) *__cgo_undefined__1; }
#line 1 "not-type"
- void __cgo_f_xxx_2(void) { foo *__cgo_undefined__; }
- #line 1 "not-const"
- void __cgo_f_xxx_3(void) { enum { __cgo_undefined__ = (foo)*1 }; }
- #line 2 "not-declared"
- void __cgo_f_xxx_1(void) { __typeof__(bar) *__cgo_undefined__; }
- #line 2 "not-type"
- void __cgo_f_xxx_2(void) { bar *__cgo_undefined__; }
- #line 2 "not-const"
- void __cgo_f_xxx_3(void) { enum { __cgo_undefined__ = (bar)*1 }; }
+ void __cgo_f_1_2(void) { foo *__cgo_undefined__2; }
+ #line 1 "not-int-const"
+ void __cgo_f_1_3(void) { enum { __cgo_undefined__3 = (foo)*1 }; }
+ #line 1 "not-num-const"
+ void __cgo_f_1_4(void) { static const double __cgo_undefined__4 = (foo); }
+ #line 1 "not-str-lit"
+ void __cgo_f_1_5(void) { static const char __cgo_undefined__5[] = (foo); }
This program will not compile, but cgo can use the presence or absence
of an error message on a given line to deduce the information it
@@ -427,45 +542,72 @@ errors that might stop parsing early.
An error on not-declared:1 indicates that foo is undeclared.
An error on not-type:1 indicates that foo is not a type (if declared at all, it is an identifier).
-An error on not-const:1 indicates that foo is not an integer constant.
+An error on not-int-const:1 indicates that foo is not an integer constant.
+An error on not-num-const:1 indicates that foo is not a number constant.
+An error on not-str-lit:1 indicates that foo is not a string literal.
+An error on not-signed-int-const:1 indicates that foo is not a signed integer constant.
-The line number specifies the name involved. In the example, 1 is foo and 2 is bar.
+The line number specifies the name involved. In the example, 1 is foo.
Next, cgo must learn the details of each type, variable, function, or
constant. It can do this by reading object files. If cgo has decided
-that t1 is a type, v2 and v3 are variables or functions, and c4, c5,
-and c6 are constants, it generates:
+that t1 is a type, v2 and v3 are variables or functions, and i4, i5
+are integer constants, u6 is an unsigned integer constant, and f7 and f8
+are float constants, and s9 and s10 are string constants, it generates:
__typeof__(t1) *__cgo__1;
__typeof__(v2) *__cgo__2;
__typeof__(v3) *__cgo__3;
- __typeof__(c4) *__cgo__4;
- enum { __cgo_enum__4 = c4 };
- __typeof__(c5) *__cgo__5;
- enum { __cgo_enum__5 = c5 };
- __typeof__(c6) *__cgo__6;
- enum { __cgo_enum__6 = c6 };
+ __typeof__(i4) *__cgo__4;
+ enum { __cgo_enum__4 = i4 };
+ __typeof__(i5) *__cgo__5;
+ enum { __cgo_enum__5 = i5 };
+ __typeof__(u6) *__cgo__6;
+ enum { __cgo_enum__6 = u6 };
+ __typeof__(f7) *__cgo__7;
+ __typeof__(f8) *__cgo__8;
+ __typeof__(s9) *__cgo__9;
+ __typeof__(s10) *__cgo__10;
- long long __cgo_debug_data[] = {
+ long long __cgodebug_ints[] = {
0, // t1
0, // v2
0, // v3
- c4,
- c5,
- c6,
+ i4,
+ i5,
+ u6,
+ 0, // f7
+ 0, // f8
+ 0, // s9
+ 0, // s10
1
};
+ double __cgodebug_floats[] = {
+ 0, // t1
+ 0, // v2
+ 0, // v3
+ 0, // i4
+ 0, // i5
+ 0, // u6
+ f7,
+ f8,
+ 0, // s9
+ 0, // s10
+ 1
+ };
+
+ const char __cgodebug_str__9[] = s9;
+ const unsigned long long __cgodebug_strlen__9 = sizeof(s9)-1;
+ const char __cgodebug_str__10[] = s10;
+ const unsigned long long __cgodebug_strlen__10 = sizeof(s10)-1;
+
and again invokes the system C compiler, to produce an object file
containing debug information. Cgo parses the DWARF debug information
for __cgo__N to learn the type of each identifier. (The types also
-distinguish functions from global variables.) If using a standard gcc,
-cgo can parse the DWARF debug information for the __cgo_enum__N to
-learn the identifier's value. The LLVM-based gcc on OS X emits
-incomplete DWARF information for enums; in that case cgo reads the
-constant values from the __cgo_debug_data from the object file's data
-segment.
+distinguish functions from global variables.) Cgo reads the constant
+values from the __cgodebug_* from the object file's data segment.
At this point cgo knows the meaning of each C.xxx well enough to start
the translation process.
@@ -550,9 +692,12 @@ _cgo_main.c:
int main() { return 0; }
void crosscall2(void(*fn)(void*, int, uintptr_t), void *a, int c, uintptr_t ctxt) { }
- uintptr_t _cgo_wait_runtime_init_done() { }
+ uintptr_t _cgo_wait_runtime_init_done() { return 0; }
+ void _cgo_release_context(uintptr_t ctxt) { }
+ char* _cgo_topofstack(void) { return (char*)0; }
void _cgo_allocate(void *a, int c) { }
void _cgo_panic(void *a, int c) { }
+ void _cgo_reginit(void) { }
The extra functions here are stubs to satisfy the references in the C
code generated for gcc. The build process links this stub, along with
diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go
index c104067a93c..5cd6ac953c3 100644
--- a/src/cmd/cgo/gcc.go
+++ b/src/cmd/cgo/gcc.go
@@ -169,21 +169,8 @@ func (p *Package) Translate(f *File) {
p.loadDWARF(f, needType)
}
if p.rewriteCalls(f) {
- // Add `import _cgo_unsafe "unsafe"` as the first decl
- // after the package statement.
- imp := &ast.GenDecl{
- Tok: token.IMPORT,
- Specs: []ast.Spec{
- &ast.ImportSpec{
- Name: ast.NewIdent("_cgo_unsafe"),
- Path: &ast.BasicLit{
- Kind: token.STRING,
- Value: `"unsafe"`,
- },
- },
- },
- }
- f.AST.Decls = append([]ast.Decl{imp}, f.AST.Decls...)
+ // Add `import _cgo_unsafe "unsafe"` after the package statement.
+ f.Edit.Insert(f.offset(f.AST.Name.End()), "; import _cgo_unsafe \"unsafe\"")
}
p.rewriteRef(f)
}
@@ -192,8 +179,8 @@ func (p *Package) Translate(f *File) {
// in the file f and saves relevant renamings in f.Name[name].Define.
func (p *Package) loadDefines(f *File) {
var b bytes.Buffer
- b.WriteString(f.Preamble)
b.WriteString(builtinProlog)
+ b.WriteString(f.Preamble)
stdout := p.gccDefines(b.Bytes())
for _, line := range strings.Split(stdout, "\n") {
@@ -264,10 +251,6 @@ func (p *Package) guessKinds(f *File) []*Name {
if n.IsConst() {
continue
}
-
- if isName(n.Define) {
- n.C = n.Define
- }
}
// If this is a struct, union, or enum type name, no need to guess the kind.
@@ -316,8 +299,8 @@ func (p *Package) guessKinds(f *File) []*Name {
// whether name denotes a type or an expression.
var b bytes.Buffer
- b.WriteString(f.Preamble)
b.WriteString(builtinProlog)
+ b.WriteString(f.Preamble)
for i, n := range names {
fmt.Fprintf(&b, "#line %d \"not-declared\"\n"+
@@ -423,14 +406,7 @@ func (p *Package) guessKinds(f *File) []*Name {
for i, n := range names {
switch sniff[i] {
default:
- var tpos token.Pos
- for _, ref := range f.Ref {
- if ref.Name == n {
- tpos = ref.Pos()
- break
- }
- }
- error_(tpos, "could not determine kind of name for C.%s", fixGo(n.Go))
+ error_(f.NamePos[n], "could not determine kind of name for C.%s", fixGo(n.Go))
case notStrLiteral | notType:
n.Kind = "iconst"
case notIntConst | notStrLiteral | notType:
@@ -472,8 +448,8 @@ func (p *Package) loadDWARF(f *File, names []*Name) {
// for each entry in names and then dereference the type we
// learn for __cgo__i.
var b bytes.Buffer
- b.WriteString(f.Preamble)
b.WriteString(builtinProlog)
+ b.WriteString(f.Preamble)
b.WriteString("#line 1 \"cgo-dwarf-inference\"\n")
for i, n := range names {
fmt.Fprintf(&b, "__typeof__(%s) *__cgo__%d;\n", n.C, i)
@@ -524,14 +500,6 @@ func (p *Package) loadDWARF(f *File, names []*Name) {
// Scan DWARF info for top-level TagVariable entries with AttrName __cgo__i.
types := make([]dwarf.Type, len(names))
- nameToIndex := make(map[*Name]int)
- for i, n := range names {
- nameToIndex[n] = i
- }
- nameToRef := make(map[*Name]*Ref)
- for _, ref := range f.Ref {
- nameToRef[ref.Name] = ref
- }
r := d.Reader()
for {
e, err := r.Next()
@@ -582,10 +550,7 @@ func (p *Package) loadDWARF(f *File, names []*Name) {
if types[i] == nil {
continue
}
- pos := token.NoPos
- if ref, ok := nameToRef[n]; ok {
- pos = ref.Pos()
- }
+ pos := f.NamePos[n]
f, fok := types[i].(*dwarf.FuncType)
if n.Kind != "type" && fok {
n.Kind = "func"
@@ -740,8 +705,9 @@ func (p *Package) rewriteCall(f *File, call *Call, name *Name) bool {
stmts = append(stmts, stmt)
}
+ const cgoMarker = "__cgo__###__marker__"
fcall := &ast.CallExpr{
- Fun: call.Call.Fun,
+ Fun: ast.NewIdent(cgoMarker),
Args: nargs,
}
ftype := &ast.FuncType{
@@ -763,31 +729,26 @@ func (p *Package) rewriteCall(f *File, call *Call, name *Name) bool {
}
}
- // There is a Ref pointing to the old call.Call.Fun.
+ // If this call expects two results, we have to
+ // adjust the results of the function we generated.
for _, ref := range f.Ref {
- if ref.Expr == &call.Call.Fun {
- ref.Expr = &fcall.Fun
-
- // If this call expects two results, we have to
- // adjust the results of the function we generated.
- if ref.Context == "call2" {
- if ftype.Results == nil {
- // An explicit void argument
- // looks odd but it seems to
- // be how cgo has worked historically.
- ftype.Results = &ast.FieldList{
- List: []*ast.Field{
- &ast.Field{
- Type: ast.NewIdent("_Ctype_void"),
- },
+ if ref.Expr == &call.Call.Fun && ref.Context == ctxCall2 {
+ if ftype.Results == nil {
+ // An explicit void argument
+ // looks odd but it seems to
+ // be how cgo has worked historically.
+ ftype.Results = &ast.FieldList{
+ List: []*ast.Field{
+ &ast.Field{
+ Type: ast.NewIdent("_Ctype_void"),
},
- }
+ },
}
- ftype.Results.List = append(ftype.Results.List,
- &ast.Field{
- Type: ast.NewIdent("error"),
- })
}
+ ftype.Results.List = append(ftype.Results.List,
+ &ast.Field{
+ Type: ast.NewIdent("error"),
+ })
}
}
@@ -801,14 +762,16 @@ func (p *Package) rewriteCall(f *File, call *Call, name *Name) bool {
Results: []ast.Expr{fcall},
}
}
- call.Call.Fun = &ast.FuncLit{
+ lit := &ast.FuncLit{
Type: ftype,
Body: &ast.BlockStmt{
List: append(stmts, fbody),
},
}
- call.Call.Lparen = token.NoPos
- call.Call.Rparen = token.NoPos
+ text := strings.Replace(gofmt(lit), "\n", ";", -1)
+ repl := strings.Split(text, cgoMarker)
+ f.Edit.Insert(f.offset(call.Call.Fun.Pos()), repl[0])
+ f.Edit.Insert(f.offset(call.Call.Fun.End()), repl[1])
return needsUnsafe
}
@@ -962,8 +925,8 @@ func (p *Package) checkAddrArgs(f *File, args []ast.Expr, x ast.Expr) []ast.Expr
// effect is a function call.
func (p *Package) hasSideEffects(f *File, x ast.Expr) bool {
found := false
- f.walk(x, "expr",
- func(f *File, x interface{}, context string) {
+ f.walk(x, ctxExpr,
+ func(f *File, x interface{}, context astContext) {
switch x.(type) {
case *ast.CallExpr:
found = true
@@ -1072,7 +1035,17 @@ func (p *Package) rewriteRef(f *File) {
// Assign mangled names.
for _, n := range f.Name {
if n.Kind == "not-type" {
- n.Kind = "var"
+ if n.Define == "" {
+ n.Kind = "var"
+ } else {
+ n.Kind = "macro"
+ n.FuncType = &FuncType{
+ Result: n.Type,
+ Go: &ast.FuncType{
+ Results: &ast.FieldList{List: []*ast.Field{{Type: n.Type.Go}}},
+ },
+ }
+ }
}
if n.Mangle == "" {
p.mangleName(n)
@@ -1092,10 +1065,10 @@ func (p *Package) rewriteRef(f *File) {
}
var expr ast.Expr = ast.NewIdent(r.Name.Mangle) // default
switch r.Context {
- case "call", "call2":
+ case ctxCall, ctxCall2:
if r.Name.Kind != "func" {
if r.Name.Kind == "type" {
- r.Context = "type"
+ r.Context = ctxType
if r.Name.Type == nil {
error_(r.Pos(), "invalid conversion to C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C)
break
@@ -1107,7 +1080,7 @@ func (p *Package) rewriteRef(f *File) {
break
}
functions[r.Name.Go] = true
- if r.Context == "call2" {
+ if r.Context == ctxCall2 {
if r.Name.Go == "_CMalloc" {
error_(r.Pos(), "no two-result form for C.malloc")
break
@@ -1125,8 +1098,9 @@ func (p *Package) rewriteRef(f *File) {
r.Name = n
break
}
- case "expr":
- if r.Name.Kind == "func" {
+ case ctxExpr:
+ switch r.Name.Kind {
+ case "func":
if builtinDefs[r.Name.C] != "" {
error_(r.Pos(), "use of builtin '%s' not in function call", fixGo(r.Name.C))
}
@@ -1153,25 +1127,25 @@ func (p *Package) rewriteRef(f *File) {
Fun: &ast.Ident{NamePos: (*r.Expr).Pos(), Name: "_Cgo_ptr"},
Args: []ast.Expr{ast.NewIdent(name.Mangle)},
}
- } else if r.Name.Kind == "type" {
+ case "type":
// Okay - might be new(T)
if r.Name.Type == nil {
error_(r.Pos(), "expression C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C)
break
}
expr = r.Name.Type.Go
- } else if r.Name.Kind == "var" {
+ case "var":
expr = &ast.StarExpr{Star: (*r.Expr).Pos(), X: expr}
+ case "macro":
+ expr = &ast.CallExpr{Fun: expr}
}
-
- case "selector":
+ case ctxSelector:
if r.Name.Kind == "var" {
expr = &ast.StarExpr{Star: (*r.Expr).Pos(), X: expr}
} else {
error_(r.Pos(), "only C variables allowed in selector expression %s", fixGo(r.Name.Go))
}
-
- case "type":
+ case ctxType:
if r.Name.Kind != "type" {
error_(r.Pos(), "expression C.%s used as type", fixGo(r.Name.Go))
} else if r.Name.Type == nil {
@@ -1186,6 +1160,7 @@ func (p *Package) rewriteRef(f *File) {
error_(r.Pos(), "must call C.%s", fixGo(r.Name.Go))
}
}
+
if *godefs {
// Substitute definition for mangled type name.
if id, ok := expr.(*ast.Ident); ok {
@@ -1207,7 +1182,17 @@ func (p *Package) rewriteRef(f *File) {
expr = &ast.Ident{NamePos: pos, Name: x.Name}
}
+ // Change AST, because some later processing depends on it,
+ // and also because -godefs mode still prints the AST.
+ old := *r.Expr
*r.Expr = expr
+
+ // Record source-level edit for cgo output.
+ repl := gofmt(expr)
+ if r.Name.Kind != "type" {
+ repl = "(" + repl + ")"
+ }
+ f.Edit.Replace(f.offset(old.Pos()), f.offset(old.End()), repl)
}
// Remove functions only used as expressions, so their respective
@@ -1232,7 +1217,7 @@ func (p *Package) gccBaseCmd() []string {
if ret := strings.Fields(os.Getenv("GCC")); len(ret) > 0 {
return ret
}
- return strings.Fields(defaultCC)
+ return strings.Fields(defaultCC(goos, goarch))
}
// gccMachine returns the gcc -m flag to use, either "-m32", "-m64" or "-marm".
@@ -2072,6 +2057,12 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type {
name := c.Ident("_Ctype_" + dt.Name)
goIdent[name.Name] = name
sub := c.Type(dt.Type, pos)
+ if badPointerTypedef(dt.Name) {
+ // Treat this typedef as a uintptr.
+ s := *sub
+ s.Go = c.uintptr
+ sub = &s
+ }
t.Go = name
if unionWithPointer[sub.Go] {
unionWithPointer[t.Go] = true
@@ -2152,7 +2143,7 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type {
if ss, ok := dwarfToName[s]; ok {
s = ss
}
- s = strings.Join(strings.Split(s, " "), "") // strip spaces
+ s = strings.Replace(s, " ", "", -1)
name := c.Ident("_Ctype_" + s)
tt := *t
typedef[name.Name] = &tt
@@ -2230,6 +2221,17 @@ func (c *typeConv) FuncArg(dtype dwarf.Type, pos token.Pos) *Type {
if _, void := base(ptr.Type).(*dwarf.VoidType); void {
break
}
+ // ...or the typedef is one in which we expect bad pointers.
+ // It will be a uintptr instead of *X.
+ if badPointerTypedef(dt.Name) {
+ break
+ }
+
+ // If we already know the typedef for t just use that.
+ // See issue 19832.
+ if def := typedef[t.Go.(*ast.Ident).Name]; def != nil {
+ break
+ }
t = c.Type(ptr, pos)
if t == nil {
@@ -2386,7 +2388,9 @@ func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.Struct
size := t.Size
talign := t.Align
if f.BitSize > 0 {
- if f.BitSize%8 != 0 {
+ switch f.BitSize {
+ case 8, 16, 32, 64:
+ default:
continue
}
size = f.BitSize / 8
@@ -2562,3 +2566,51 @@ func fieldPrefix(fld []*ast.Field) string {
}
return prefix
}
+
+// badPointerTypedef reports whether t is a C typedef that should not be considered a pointer in Go.
+// A typedef is bad if C code sometimes stores non-pointers in this type.
+// TODO: Currently our best solution is to find these manually and list them as
+// they come up. A better solution is desired.
+func badPointerTypedef(t string) bool {
+ // The real bad types are CFNumberRef and CFTypeRef.
+ // Sometimes non-pointers are stored in these types.
+ // CFTypeRef is a supertype of those, so it can have bad pointers in it as well.
+ // We return true for the other CF*Ref types just so casting between them is easier.
+ // See comment below for details about the bad pointers.
+ return goos == "darwin" && strings.HasPrefix(t, "CF") && strings.HasSuffix(t, "Ref")
+}
+
+// Comment from Darwin's CFInternal.h
+/*
+// Tagged pointer support
+// Low-bit set means tagged object, next 3 bits (currently)
+// define the tagged object class, next 4 bits are for type
+// information for the specific tagged object class. Thus,
+// the low byte is for type info, and the rest of a pointer
+// (32 or 64-bit) is for payload, whatever the tagged class.
+//
+// Note that the specific integers used to identify the
+// specific tagged classes can and will change from release
+// to release (that's why this stuff is in CF*Internal*.h),
+// as can the definition of type info vs payload above.
+//
+#if __LP64__
+#define CF_IS_TAGGED_OBJ(PTR) ((uintptr_t)(PTR) & 0x1)
+#define CF_TAGGED_OBJ_TYPE(PTR) ((uintptr_t)(PTR) & 0xF)
+#else
+#define CF_IS_TAGGED_OBJ(PTR) 0
+#define CF_TAGGED_OBJ_TYPE(PTR) 0
+#endif
+
+enum {
+ kCFTaggedObjectID_Invalid = 0,
+ kCFTaggedObjectID_Atom = (0 << 1) + 1,
+ kCFTaggedObjectID_Undefined3 = (1 << 1) + 1,
+ kCFTaggedObjectID_Undefined2 = (2 << 1) + 1,
+ kCFTaggedObjectID_Integer = (3 << 1) + 1,
+ kCFTaggedObjectID_DateTS = (4 << 1) + 1,
+ kCFTaggedObjectID_ManagedObjectID = (5 << 1) + 1, // Core Data
+ kCFTaggedObjectID_Date = (6 << 1) + 1,
+ kCFTaggedObjectID_Undefined7 = (7 << 1) + 1,
+};
+*/
diff --git a/src/cmd/cgo/main.go b/src/cmd/cgo/main.go
index 3ad13ef9c73..0c1c863a7a0 100644
--- a/src/cmd/cgo/main.go
+++ b/src/cmd/cgo/main.go
@@ -24,6 +24,9 @@ import (
"runtime"
"sort"
"strings"
+
+ "cmd/internal/edit"
+ "cmd/internal/objabi"
)
// A Package collects information about the package we're going to write.
@@ -54,6 +57,12 @@ type File struct {
Calls []*Call // all calls to C.xxx in AST
ExpFunc []*ExpFunc // exported functions for this file
Name map[string]*Name // map from Go name to Name
+ NamePos map[*Name]token.Pos // map from Name to position of the first reference
+ Edit *edit.Buffer
+}
+
+func (f *File) offset(p token.Pos) int {
+ return fset.Position(p).Offset
}
func nameKeys(m map[string]*Name) []string {
@@ -75,7 +84,7 @@ type Call struct {
type Ref struct {
Name *Name
Expr *ast.Expr
- Context string // "type", "expr", "call", or "call2"
+ Context astContext
}
func (r *Ref) Pos() token.Pos {
@@ -88,7 +97,7 @@ type Name struct {
Mangle string // name used in generated Go
C string // name used in C
Define string // #define expansion
- Kind string // "iconst", "fconst", "sconst", "type", "var", "fpvar", "func", "not-type"
+ Kind string // "iconst", "fconst", "sconst", "type", "var", "fpvar", "func", "macro", "not-type"
Type *Type // the type of xxx
FuncType *FuncType
AddError bool
@@ -105,7 +114,7 @@ func (n *Name) IsConst() bool {
return strings.HasSuffix(n.Kind, "const")
}
-// A ExpFunc is an exported function, callable from C.
+// An ExpFunc is an exported function, callable from C.
// Such functions are identified in the Go input file
// by doc comments containing the line //export ExpName
type ExpFunc struct {
@@ -200,6 +209,7 @@ var importSyscall = flag.Bool("import_syscall", true, "import syscall in generat
var goarch, goos string
func main() {
+ objabi.AddVersionFlag() // -V
flag.Usage = usage
flag.Parse()
@@ -280,6 +290,7 @@ func main() {
}
f := new(File)
+ f.Edit = edit.NewBuffer(b)
f.ParseGo(input, b)
f.DiscardCgoDirectives()
fs[i] = f
@@ -300,11 +311,13 @@ func main() {
p.Translate(f)
for _, cref := range f.Ref {
switch cref.Context {
- case "call", "call2":
+ case ctxCall, ctxCall2:
if cref.Name.Kind != "type" {
break
}
+ old := *cref.Expr
*cref.Expr = cref.Name.Type.Go
+ f.Edit.Replace(f.offset(old.Pos()), f.offset(old.End()), gofmt(cref.Name.Type.Go))
}
}
if nerrors > 0 {
diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go
index 9ab6bd8f977..8834c3db5a1 100644
--- a/src/cmd/cgo/out.go
+++ b/src/cmd/cgo/out.go
@@ -15,6 +15,7 @@ import (
"go/token"
"io"
"os"
+ "path/filepath"
"sort"
"strings"
)
@@ -110,7 +111,13 @@ func (p *Package) writeDefs() {
// Which is not useful. Moreover we never override source info,
// so subsequent source code uses the same source info.
// Moreover, empty file name makes compile emit no source debug info at all.
- noSourceConf.Fprint(fgo2, fset, def.Go)
+ var buf bytes.Buffer
+ noSourceConf.Fprint(&buf, fset, def.Go)
+ if bytes.HasPrefix(buf.Bytes(), []byte("_Ctype_")) {
+ // This typedef is of the form `typedef a b` and should be an alias.
+ fmt.Fprintf(fgo2, "= ")
+ }
+ fmt.Fprintf(fgo2, "%s", buf.Bytes())
fmt.Fprintf(fgo2, "\n\n")
}
if *gccgo {
@@ -400,10 +407,12 @@ func (p *Package) writeDefsFunc(fgo2 io.Writer, n *Name, callsMalloc *bool) {
inProlog := builtinDefs[name] != ""
cname := fmt.Sprintf("_cgo%s%s", cPrefix, n.Mangle)
paramnames := []string(nil)
- for i, param := range d.Type.Params.List {
- paramName := fmt.Sprintf("p%d", i)
- param.Names = []*ast.Ident{ast.NewIdent(paramName)}
- paramnames = append(paramnames, paramName)
+ if d.Type.Params != nil {
+ for i, param := range d.Type.Params.List {
+ paramName := fmt.Sprintf("p%d", i)
+ param.Names = []*ast.Ident{ast.NewIdent(paramName)}
+ paramnames = append(paramnames, paramName)
+ }
}
if *gccgo {
@@ -502,8 +511,10 @@ func (p *Package) writeDefsFunc(fgo2 io.Writer, n *Name, callsMalloc *bool) {
fmt.Fprintf(fgo2, "\tif errno != 0 { r2 = syscall.Errno(errno) }\n")
}
fmt.Fprintf(fgo2, "\tif _Cgo_always_false {\n")
- for i := range d.Type.Params.List {
- fmt.Fprintf(fgo2, "\t\t_Cgo_use(p%d)\n", i)
+ if d.Type.Params != nil {
+ for i := range d.Type.Params.List {
+ fmt.Fprintf(fgo2, "\t\t_Cgo_use(p%d)\n", i)
+ }
}
fmt.Fprintf(fgo2, "\t}\n")
fmt.Fprintf(fgo2, "\treturn\n")
@@ -516,7 +527,7 @@ func (p *Package) writeOutput(f *File, srcfile string) {
if strings.HasSuffix(base, ".go") {
base = base[0 : len(base)-3]
}
- base = strings.Map(slashToUnderscore, base)
+ base = filepath.Base(base)
fgo1 := creat(*objDir + base + ".cgo1.go")
fgcc := creat(*objDir + base + ".cgo2.c")
@@ -525,10 +536,12 @@ func (p *Package) writeOutput(f *File, srcfile string) {
// Write Go output: Go input with rewrites of C.xxx to _C_xxx.
fmt.Fprintf(fgo1, "// Created by cgo - DO NOT EDIT\n\n")
- conf.Fprint(fgo1, fset, f.AST)
+ fmt.Fprintf(fgo1, "//line %s:1\n", srcfile)
+ fgo1.Write(f.Edit.Bytes())
// While we process the vars and funcs, also write gcc output.
// Gcc output starts with the preamble.
+ fmt.Fprintf(fgcc, "%s\n", builtinProlog)
fmt.Fprintf(fgcc, "%s\n", f.Preamble)
fmt.Fprintf(fgcc, "%s\n", gccProlog)
fmt.Fprintf(fgcc, "%s\n", tsanProlog)
@@ -615,14 +628,18 @@ func (p *Package) writeOutputFunc(fgcc *os.File, n *Name) {
fmt.Fprint(fgcc, "(__typeof__(a->r)) ")
}
}
- fmt.Fprintf(fgcc, "%s(", n.C)
- for i := range n.FuncType.Params {
- if i > 0 {
- fmt.Fprintf(fgcc, ", ")
+ if n.Kind == "macro" {
+ fmt.Fprintf(fgcc, "%s;\n", n.C)
+ } else {
+ fmt.Fprintf(fgcc, "%s(", n.C)
+ for i := range n.FuncType.Params {
+ if i > 0 {
+ fmt.Fprintf(fgcc, ", ")
+ }
+ fmt.Fprintf(fgcc, "a->p%d", i)
}
- fmt.Fprintf(fgcc, "a->p%d", i)
+ fmt.Fprintf(fgcc, ");\n")
}
- fmt.Fprintf(fgcc, ");\n")
if n.AddError {
fmt.Fprintf(fgcc, "\t_cgo_errno = errno;\n")
}
@@ -985,7 +1002,7 @@ func (p *Package) writeGccgoExports(fgo2, fm, fgcc, fgcch io.Writer) {
default:
// Declare a result struct.
fmt.Fprintf(fgcch, "\n/* Return type for %s */\n", exp.ExpName)
- fmt.Fprintf(fgcch, "struct %s_result {\n", exp.ExpName)
+ fmt.Fprintf(fgcch, "struct %s_return {\n", exp.ExpName)
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
t := p.cgoType(atype)
@@ -996,7 +1013,7 @@ func (p *Package) writeGccgoExports(fgo2, fm, fgcc, fgcch io.Writer) {
fmt.Fprint(fgcch, "\n")
})
fmt.Fprintf(fgcch, "};\n")
- fmt.Fprintf(cdeclBuf, "struct %s_result", exp.ExpName)
+ fmt.Fprintf(cdeclBuf, "struct %s_return", exp.ExpName)
}
cRet := cdeclBuf.String()
@@ -1022,7 +1039,7 @@ func (p *Package) writeGccgoExports(fgo2, fm, fgcc, fgcch io.Writer) {
fmt.Fprintf(fgcch, "\n%s", exp.Doc)
}
- fmt.Fprintf(fgcch, "extern %s %s %s;\n", cRet, exp.ExpName, cParams)
+ fmt.Fprintf(fgcch, "extern %s %s%s;\n", cRet, exp.ExpName, cParams)
// We need to use a name that will be exported by the
// Go code; otherwise gccgo will make it static and we
@@ -1131,6 +1148,7 @@ func (p *Package) writeExportHeader(fgcch io.Writer) {
pkg = p.PackagePath
}
fmt.Fprintf(fgcch, "/* package %s */\n\n", pkg)
+ fmt.Fprintf(fgcch, "%s\n", builtinExportProlog)
fmt.Fprintf(fgcch, "/* Start of preamble from import \"C\" comments. */\n\n")
fmt.Fprintf(fgcch, "%s\n", p.Preamble)
@@ -1223,8 +1241,9 @@ func (p *Package) cgoType(e ast.Expr) *Type {
// Slice: pointer, len, cap.
return &Type{Size: p.PtrSize * 3, Align: p.PtrSize, C: c("GoSlice")}
}
+ // Non-slice array types are not supported.
case *ast.StructType:
- // TODO
+ // Not supported.
case *ast.FuncType:
return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("void*")}
case *ast.InterfaceType:
@@ -1374,7 +1393,7 @@ const builtinProlog = `
/* Define intgo when compiling with GCC. */
typedef ptrdiff_t intgo;
-typedef struct { char *p; intgo n; } _GoString_;
+typedef struct { const char *p; intgo n; } _GoString_;
typedef struct { char *p; intgo n; intgo c; } _GoBytes_;
_GoString_ GoString(char *p);
_GoString_ GoStringN(char *p, int l);
@@ -1382,6 +1401,12 @@ _GoBytes_ GoBytes(void *p, int n);
char *CString(_GoString_);
void *CBytes(_GoBytes_);
void *_CMalloc(size_t);
+
+__attribute__ ((unused))
+static size_t _GoStringLen(_GoString_ s) { return s.n; }
+
+__attribute__ ((unused))
+static const char *_GoStringPtr(_GoString_ s) { return s.p; }
`
const goProlog = `
@@ -1613,6 +1638,27 @@ void localCgoCheckResult(Eface val) {
}
`
+// builtinExportProlog is a shorter version of builtinProlog,
+// to be put into the _cgo_export.h file.
+// For historical reasons we can't use builtinProlog in _cgo_export.h,
+// because _cgo_export.h defines GoString as a struct while builtinProlog
+// defines it as a function. We don't change this to avoid unnecessarily
+// breaking existing code.
+const builtinExportProlog = `
+#line 1 "cgo-builtin-prolog"
+
+#include /* for ptrdiff_t below */
+
+#ifndef GO_CGO_EXPORT_PROLOGUE_H
+#define GO_CGO_EXPORT_PROLOGUE_H
+
+typedef ptrdiff_t intgo;
+
+typedef struct { const char *p; intgo n; } _GoString_;
+
+#endif
+`
+
func (p *Package) gccExportHeaderProlog() string {
return strings.Replace(gccExportHeaderProlog, "GOINTBITS", fmt.Sprint(8*p.IntSize), -1)
}
@@ -1646,7 +1692,7 @@ typedef double _Complex GoComplex128;
*/
typedef char _check_for_GOINTBITS_bit_pointer_matching_GoInt[sizeof(void*)==GOINTBITS/8 ? 1:-1];
-typedef struct { const char *p; GoInt n; } GoString;
+typedef _GoString_ GoString;
typedef void *GoMap;
typedef void *GoChan;
typedef struct { void *t; void *v; } GoInterface;
diff --git a/src/cmd/compile/fmt_test.go b/src/cmd/compile/fmt_test.go
index 59de326a919..3f329dea9cc 100644
--- a/src/cmd/compile/fmt_test.go
+++ b/src/cmd/compile/fmt_test.go
@@ -229,7 +229,7 @@ func TestFormats(t *testing.T) {
}
}
if mismatch {
- t.Errorf("knownFormats is out of date; please run with -v to regenerate")
+ t.Errorf("knownFormats is out of date; please 'go test -v fmt_test.go > foo', then extract new definition of knownFormats from foo")
}
}
@@ -419,7 +419,7 @@ func stringVal(tv types.TypeAndValue) (string, bool) {
// formatIter iterates through the string s in increasing
// index order and calls f for each format specifier '%..v'.
// The arguments for f describe the specifier's index range.
-// If a format specifier contains a "*", f is called with
+// If a format specifier contains a "*", f is called with
// the index range for "*" alone, before being called for
// the entire specifier. The result of f is the index of
// the rune at which iteration continues.
@@ -571,9 +571,14 @@ var knownFormats = map[string]string{
"*cmd/compile/internal/ssa.Block %s": "",
"*cmd/compile/internal/ssa.Block %v": "",
"*cmd/compile/internal/ssa.Func %s": "",
+ "*cmd/compile/internal/ssa.Func %v": "",
+ "*cmd/compile/internal/ssa.LocalSlot %+v": "",
+ "*cmd/compile/internal/ssa.LocalSlot %v": "",
+ "*cmd/compile/internal/ssa.Register %s": "",
"*cmd/compile/internal/ssa.SparseTreeNode %v": "",
"*cmd/compile/internal/ssa.Value %s": "",
"*cmd/compile/internal/ssa.Value %v": "",
+ "*cmd/compile/internal/ssa.VarLoc %v": "",
"*cmd/compile/internal/ssa.sparseTreeMapEntry %v": "",
"*cmd/compile/internal/types.Field %p": "",
"*cmd/compile/internal/types.Field %v": "",
@@ -592,25 +597,30 @@ var knownFormats = map[string]string{
"*cmd/compile/internal/types.Type %p": "",
"*cmd/compile/internal/types.Type %s": "",
"*cmd/compile/internal/types.Type %v": "",
+ "*cmd/internal/dwarf.Location %#v": "",
"*cmd/internal/obj.Addr %v": "",
"*cmd/internal/obj.LSym %v": "",
- "*cmd/internal/obj.Prog %s": "",
"*math/big.Int %#x": "",
"*math/big.Int %s": "",
"[16]byte %x": "",
"[]*cmd/compile/internal/gc.Node %v": "",
"[]*cmd/compile/internal/gc.Sig %#v": "",
"[]*cmd/compile/internal/ssa.Value %v": "",
+ "[][]cmd/compile/internal/ssa.SlotID %v": "",
"[]byte %s": "",
"[]byte %x": "",
"[]cmd/compile/internal/ssa.Edge %v": "",
"[]cmd/compile/internal/ssa.ID %v": "",
+ "[]cmd/compile/internal/ssa.VarLocList %v": "",
+ "[]cmd/compile/internal/syntax.token %s": "",
"[]string %v": "",
"bool %v": "",
"byte %08b": "",
"byte %c": "",
"cmd/compile/internal/arm.shift %d": "",
"cmd/compile/internal/gc.Class %d": "",
+ "cmd/compile/internal/gc.Class %s": "",
+ "cmd/compile/internal/gc.Class %v": "",
"cmd/compile/internal/gc.Ctype %d": "",
"cmd/compile/internal/gc.Ctype %v": "",
"cmd/compile/internal/gc.Level %d": "",
@@ -620,21 +630,25 @@ var knownFormats = map[string]string{
"cmd/compile/internal/gc.Nodes %.v": "",
"cmd/compile/internal/gc.Nodes %v": "",
"cmd/compile/internal/gc.Op %#v": "",
+ "cmd/compile/internal/gc.Op %d": "",
"cmd/compile/internal/gc.Op %v": "",
"cmd/compile/internal/gc.Val %#v": "",
"cmd/compile/internal/gc.Val %T": "",
"cmd/compile/internal/gc.Val %v": "",
"cmd/compile/internal/gc.fmtMode %d": "",
"cmd/compile/internal/gc.initKind %d": "",
+ "cmd/compile/internal/gc.locID %v": "",
"cmd/compile/internal/ssa.BranchPrediction %d": "",
"cmd/compile/internal/ssa.Edge %v": "",
"cmd/compile/internal/ssa.GCNode %v": "",
"cmd/compile/internal/ssa.ID %d": "",
- "cmd/compile/internal/ssa.LocalSlot %v": "",
- "cmd/compile/internal/ssa.Location %v": "",
+ "cmd/compile/internal/ssa.ID %v": "",
+ "cmd/compile/internal/ssa.LocalSlot %s": "",
+ "cmd/compile/internal/ssa.Location %s": "",
"cmd/compile/internal/ssa.Op %s": "",
"cmd/compile/internal/ssa.Op %v": "",
"cmd/compile/internal/ssa.ValAndOff %s": "",
+ "cmd/compile/internal/ssa.VarLocList %v": "",
"cmd/compile/internal/ssa.rbrank %d": "",
"cmd/compile/internal/ssa.regMask %d": "",
"cmd/compile/internal/ssa.register %d": "",
@@ -648,6 +662,7 @@ var knownFormats = map[string]string{
"cmd/compile/internal/types.EType %d": "",
"cmd/compile/internal/types.EType %s": "",
"cmd/compile/internal/types.EType %v": "",
+ "cmd/internal/dwarf.Location %#v": "",
"cmd/internal/src.Pos %s": "",
"cmd/internal/src.Pos %v": "",
"error %v": "",
@@ -670,6 +685,7 @@ var knownFormats = map[string]string{
"int32 %x": "",
"int64 %+d": "",
"int64 %-10d": "",
+ "int64 %.5d": "",
"int64 %X": "",
"int64 %d": "",
"int64 %v": "",
@@ -687,6 +703,7 @@ var knownFormats = map[string]string{
"rune %c": "",
"string %-*s": "",
"string %-16s": "",
+ "string %-6s": "",
"string %.*s": "",
"string %q": "",
"string %s": "",
diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go
index e294bce66b9..df0a69a4417 100644
--- a/src/cmd/compile/internal/amd64/ggen.go
+++ b/src/cmd/compile/internal/amd64/ggen.go
@@ -14,14 +14,14 @@ import (
// no floating point in note handlers on Plan 9
var isPlan9 = objabi.GOOS == "plan9"
-// DUFFZERO consists of repeated blocks of 4 MOVUPSs + ADD,
+// DUFFZERO consists of repeated blocks of 4 MOVUPSs + LEAQ,
// See runtime/mkduff.go.
const (
dzBlocks = 16 // number of MOV/ADD blocks
dzBlockLen = 4 // number of clears per block
dzBlockSize = 19 // size of instructions in a single block
dzMovSize = 4 // size of single MOV instruction w/ offset
- dzAddSize = 4 // size of single ADD instruction
+ dzLeaqSize = 4 // size of single LEAQ instruction
dzClearStep = 16 // number of bytes cleared by each MOV instruction
dzClearLen = dzClearStep * dzBlockLen // bytes cleared by one block
@@ -35,7 +35,7 @@ func dzOff(b int64) int64 {
off -= b / dzClearLen * dzBlockSize
tailLen := b % dzClearLen
if tailLen >= dzClearStep {
- off -= dzAddSize + dzMovSize*(tailLen/dzClearStep)
+ off -= dzLeaqSize + dzMovSize*(tailLen/dzClearStep)
}
return off
}
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index 2d7727b2700..ce322e5e990 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -117,7 +117,7 @@ func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
return p
}
-// DUFFZERO consists of repeated blocks of 4 MOVUPSs + ADD,
+// DUFFZERO consists of repeated blocks of 4 MOVUPSs + LEAQ,
// See runtime/mkduff.go.
func duffStart(size int64) int64 {
x, _ := duff(size)
@@ -140,7 +140,7 @@ func duff(size int64) (int64, int64) {
off := dzBlockSize * (dzBlocks - blocks)
var adj int64
if steps != 0 {
- off -= dzAddSize
+ off -= dzLeaqSize
off -= dzMovSize * steps
adj -= dzClearStep * (dzBlockLen - steps)
}
@@ -494,6 +494,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Args[0].Reg()
case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
x := v.Reg()
+
+ // If flags aren't live (indicated by v.Aux == nil),
+ // then we can rewrite MOV $0, AX into XOR AX, AX.
+ if v.AuxInt == 0 && v.Aux == nil {
+ p := s.Prog(x86.AXORL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ break
+ }
+
asm := v.Op.Asm()
// Use MOVL to move a small constant into a register
// when the constant is positive and fits into 32 bits.
@@ -506,11 +518,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = x
- // If flags are live at this instruction, suppress the
- // MOV $0,AX -> XOR AX,AX optimization.
- if v.Aux != nil {
- p.Mark |= x86.PRESERVEFLAGS
- }
case ssa.OpAMD64MOVSSconst, ssa.OpAMD64MOVSDconst:
x := v.Reg()
p := s.Prog(v.Op.Asm())
@@ -525,7 +532,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
- case ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8:
+ case ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
@@ -573,7 +580,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
- case ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8:
+ case ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
@@ -614,6 +621,29 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Scale = 1
p.To.Index = i
gc.AddAux(&p.To, v)
+ case ssa.OpAMD64ADDQconstmem, ssa.OpAMD64ADDLconstmem:
+ sc := v.AuxValAndOff()
+ off := sc.Off()
+ val := sc.Val()
+ if val == 1 {
+ var asm obj.As
+ if v.Op == ssa.OpAMD64ADDQconstmem {
+ asm = x86.AINCQ
+ } else {
+ asm = x86.AINCL
+ }
+ p := s.Prog(asm)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux2(&p.To, v, off)
+ } else {
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = val
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux2(&p.To, v, off)
+ }
case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
@@ -655,6 +685,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// Break false dependency on destination register.
opregreg(s, x86.AXORPS, r, r)
opregreg(s, v.Op.Asm(), r, v.Args[0].Reg())
+ case ssa.OpAMD64MOVQi2f, ssa.OpAMD64MOVQf2i:
+ p := s.Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64MOVLi2f, ssa.OpAMD64MOVLf2i:
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
case ssa.OpAMD64ADDQmem, ssa.OpAMD64ADDLmem, ssa.OpAMD64SUBQmem, ssa.OpAMD64SUBLmem,
ssa.OpAMD64ANDQmem, ssa.OpAMD64ANDLmem, ssa.OpAMD64ORQmem, ssa.OpAMD64ORLmem,
ssa.OpAMD64XORQmem, ssa.OpAMD64XORLmem, ssa.OpAMD64ADDSDmem, ssa.OpAMD64ADDSSmem,
@@ -673,9 +715,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
adj := duffAdj(v.AuxInt)
var p *obj.Prog
if adj != 0 {
- p = s.Prog(x86.AADDQ)
- p.From.Type = obj.TYPE_CONST
+ p = s.Prog(x86.ALEAQ)
+ p.From.Type = obj.TYPE_MEM
p.From.Offset = adj
+ p.From.Reg = x86.REG_DI
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_DI
}
@@ -695,7 +738,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Sym = gc.Duffcopy
p.To.Offset = v.AuxInt
- case ssa.OpCopy, ssa.OpAMD64MOVQconvert, ssa.OpAMD64MOVLconvert: // TODO: use MOVQreg for reg->reg copies instead of OpCopy?
+ case ssa.OpAMD64MOVQconvert, ssa.OpAMD64MOVLconvert:
+ if v.Args[0].Reg() != v.Reg() {
+ v.Fatalf("MOVXconvert should be a no-op")
+ }
+ case ssa.OpCopy: // TODO: use MOVQreg for reg->reg copies instead of OpCopy?
if v.Type.IsMemory() {
return
}
@@ -755,6 +802,34 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
case ssa.OpAMD64CALLstatic, ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter:
s.Call(v)
+
+ case ssa.OpAMD64LoweredGetCallerPC:
+ p := s.Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = -8 // PC is stored 8 bytes below first parameter.
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64LoweredGetCallerSP:
+ // caller's SP is the address of the first arg
+ mov := x86.AMOVQ
+ if gc.Widthptr == 4 {
+ mov = x86.AMOVL
+ }
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = v.Aux.(*obj.LSym)
+
case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL,
ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL:
@@ -777,6 +852,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
+ case ssa.OpAMD64ROUNDSD:
+ p := s.Prog(v.Op.Asm())
+ val := v.AuxInt
+ // 0 means math.RoundToEven, 1 Floor, 2 Ceil, 3 Trunc
+ if val != 0 && val != 1 && val != 2 && val != 3 {
+ v.Fatalf("Invalid rounding mode")
+ }
+ p.From.Offset = val
+ p.From.Type = obj.TYPE_CONST
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[0].Reg()})
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
case ssa.OpAMD64POPCNTQ, ssa.OpAMD64POPCNTL:
if v.Args[0].Reg() != v.Reg() {
// POPCNT on Intel has a false dependency on the destination register.
@@ -792,6 +879,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
+
case ssa.OpAMD64SETEQ, ssa.OpAMD64SETNE,
ssa.OpAMD64SETL, ssa.OpAMD64SETLE,
ssa.OpAMD64SETG, ssa.OpAMD64SETGE,
@@ -803,6 +891,16 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
+ case ssa.OpAMD64SETEQmem, ssa.OpAMD64SETNEmem,
+ ssa.OpAMD64SETLmem, ssa.OpAMD64SETLEmem,
+ ssa.OpAMD64SETGmem, ssa.OpAMD64SETGEmem,
+ ssa.OpAMD64SETBmem, ssa.OpAMD64SETBEmem,
+ ssa.OpAMD64SETAmem, ssa.OpAMD64SETAEmem:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+
case ssa.OpAMD64SETNEF:
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
@@ -838,7 +936,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpAMD64LoweredNilCheck:
// Issue a load which will fault if the input is nil.
// TODO: We currently use the 2-byte instruction TESTB AX, (reg).
- // Should we use the 3-byte TESTB $0, (reg) instead? It is larger
+ // Should we use the 3-byte TESTB $0, (reg) instead? It is larger
// but it doesn't have false dependency on AX.
// Or maybe allocate an output register and use MOVL (reg),reg2 ?
// That trades clobbering flags for clobbering a register.
diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go
index 93abee3da0f..300672d9cf8 100644
--- a/src/cmd/compile/internal/arm/ssa.go
+++ b/src/cmd/compile/internal/arm/ssa.go
@@ -13,6 +13,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm"
+ "cmd/internal/objabi"
)
// loadByType returns the load instruction of the given type.
@@ -184,6 +185,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARMSUBD,
ssa.OpARMMULF,
ssa.OpARMMULD,
+ ssa.OpARMNMULF,
+ ssa.OpARMNMULD,
ssa.OpARMDIVF,
ssa.OpARMDIVD:
r := v.Reg()
@@ -195,6 +198,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.Reg = r1
p.To.Type = obj.TYPE_REG
p.To.Reg = r
+ case ssa.OpARMMULAF, ssa.OpARMMULAD, ssa.OpARMMULSF, ssa.OpARMMULSD:
+ r := v.Reg()
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ if r != r0 {
+ v.Fatalf("result and addend are not in the same register: %v", v.LongString())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
case ssa.OpARMADDS,
ssa.OpARMSUBS:
r := v.Reg0()
@@ -242,6 +259,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.Reg = r1
p.To.Type = obj.TYPE_REG
p.To.Reg = r
+ case ssa.OpARMBFX, ssa.OpARMBFXU:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt >> 8
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt & 0xff})
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
case ssa.OpARMADDconst,
ssa.OpARMADCconst,
ssa.OpARMSUBconst,
@@ -402,7 +427,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REGREG
p.To.Reg = v.Reg0() // high 32-bit
p.To.Offset = int64(v.Reg1()) // low 32-bit
- case ssa.OpARMMULA:
+ case ssa.OpARMMULA, ssa.OpARMMULS:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
@@ -449,17 +474,17 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
- case ssa.OpARMCMPshiftLL:
+ case ssa.OpARMCMPshiftLL, ssa.OpARMCMNshiftLL, ssa.OpARMTSTshiftLL, ssa.OpARMTEQshiftLL:
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt)
- case ssa.OpARMCMPshiftRL:
+ case ssa.OpARMCMPshiftRL, ssa.OpARMCMNshiftRL, ssa.OpARMTSTshiftRL, ssa.OpARMTEQshiftRL:
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt)
- case ssa.OpARMCMPshiftRA:
+ case ssa.OpARMCMPshiftRA, ssa.OpARMCMNshiftRA, ssa.OpARMTSTshiftRA, ssa.OpARMTEQshiftRA:
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt)
- case ssa.OpARMCMPshiftLLreg:
+ case ssa.OpARMCMPshiftLLreg, ssa.OpARMCMNshiftLLreg, ssa.OpARMTSTshiftLLreg, ssa.OpARMTEQshiftLLreg:
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL)
- case ssa.OpARMCMPshiftRLreg:
+ case ssa.OpARMCMPshiftRLreg, ssa.OpARMCMNshiftRLreg, ssa.OpARMTSTshiftRLreg, ssa.OpARMTEQshiftRLreg:
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR)
- case ssa.OpARMCMPshiftRAreg:
+ case ssa.OpARMCMPshiftRAreg, ssa.OpARMCMNshiftRAreg, ssa.OpARMTSTshiftRAreg, ssa.OpARMTEQshiftRAreg:
genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR)
case ssa.OpARMMOVWaddr:
p := s.Prog(arm.AMOVW)
@@ -477,10 +502,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
switch v.Aux.(type) {
default:
v.Fatalf("aux is of unknown type %T", v.Aux)
- case *ssa.ExternSymbol:
+ case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *ssa.ArgSymbol, *ssa.AutoSymbol:
+ case *gc.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
@@ -516,7 +541,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
- case ssa.OpARMMOVWloadidx:
+ case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx:
// this is just shift 0 bits
fallthrough
case ssa.OpARMMOVWloadshiftLL:
@@ -528,7 +553,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpARMMOVWloadshiftRA:
p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
p.From.Reg = v.Args[0].Reg()
- case ssa.OpARMMOVWstoreidx:
+ case ssa.OpARMMOVWstoreidx, ssa.OpARMMOVBstoreidx, ssa.OpARMMOVHstoreidx:
// this is just shift 0 bits
fallthrough
case ssa.OpARMMOVWstoreshiftLL:
@@ -580,6 +605,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
default:
}
}
+ if objabi.GOARM >= 6 {
+ // generate more efficient "MOVB/MOVBU/MOVH/MOVHU Reg@>0, Reg" on ARMv6 & ARMv7
+ genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, 0)
+ return
+ }
fallthrough
case ssa.OpARMMVN,
ssa.OpARMCLZ,
@@ -754,6 +784,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpARMLoweredGetClosurePtr:
// Closure pointer is R7 (arm.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
+ case ssa.OpARMLoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
case ssa.OpARMFlagEQ,
ssa.OpARMFlagLT_ULT,
ssa.OpARMFlagLT_UGT,
diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go
index 52a8e3f3e37..f7b3851398f 100644
--- a/src/cmd/compile/internal/arm64/ggen.go
+++ b/src/cmd/compile/internal/arm64/ggen.go
@@ -31,13 +31,18 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
}
} else if cnt <= int64(128*gc.Widthptr) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
+ if cnt%(2*int64(gc.Widthptr)) != 0 {
+ p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
+ off += int64(gc.Widthptr)
+ cnt -= int64(gc.Widthptr)
+ }
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
- p = pp.Appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, arm64.REGRT1, 0)
+ p = pp.Appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REGRT1, 0)
p.Reg = arm64.REGRT1
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
- p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
+ p.To.Offset = 4 * (64 - cnt/(2*int64(gc.Widthptr)))
} else {
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, arm64.REGTMP, 0)
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index 0f9e82c727d..6fa01912f50 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -273,10 +273,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
switch v.Aux.(type) {
default:
v.Fatalf("aux is of unknown type %T", v.Aux)
- case *ssa.ExternSymbol:
+ case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *ssa.ArgSymbol, *ssa.AutoSymbol:
+ case *gc.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
@@ -324,6 +324,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
+ case ssa.OpARM64STP:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REGREG
+ p.From.Reg = v.Args[1].Reg()
+ p.From.Offset = int64(v.Args[2].Reg())
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
case ssa.OpARM64MOVBstorezero,
ssa.OpARM64MOVHstorezero,
ssa.OpARM64MOVWstorezero,
@@ -334,6 +342,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
+ case ssa.OpARM64MOVQstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REGREG
+ p.From.Reg = arm64.REGZERO
+ p.From.Offset = int64(arm64.REGZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
case ssa.OpARM64LoweredAtomicExchange64,
ssa.OpARM64LoweredAtomicExchange32:
// LDAXR (Rarg0), Rout
@@ -555,34 +571,29 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p.From.Reg = arm64.COND_LO
p.Reg = v.Args[0].Reg()
- p.From3 = &obj.Addr{Type: obj.TYPE_REG, Reg: r1}
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: r1})
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARM64DUFFZERO:
- // runtime.duffzero expects start address - 8 in R16
- p := s.Prog(arm64.ASUB)
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = 8
- p.Reg = v.Args[0].Reg()
- p.To.Type = obj.TYPE_REG
- p.To.Reg = arm64.REG_R16
- p = s.Prog(obj.ADUFFZERO)
+ // runtime.duffzero expects start address in R16
+ p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpARM64LoweredZero:
- // MOVD.P ZR, 8(R16)
+ // STP.P (ZR,ZR), 16(R16)
// CMP Rarg1, R16
// BLE -2(PC)
- // arg1 is the address of the last element to zero
- p := s.Prog(arm64.AMOVD)
+ // arg1 is the address of the last 16-byte unit to zero
+ p := s.Prog(arm64.ASTP)
p.Scond = arm64.C_XPOST
- p.From.Type = obj.TYPE_REG
+ p.From.Type = obj.TYPE_REGREG
p.From.Reg = arm64.REGZERO
+ p.From.Offset = int64(arm64.REGZERO)
p.To.Type = obj.TYPE_MEM
p.To.Reg = arm64.REG_R16
- p.To.Offset = 8
+ p.To.Offset = 16
p2 := s.Prog(arm64.ACMP)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = v.Args[1].Reg()
@@ -655,6 +666,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpARM64LoweredGetClosurePtr:
// Closure pointer is R26 (arm64.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
+ case ssa.OpARM64LoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
case ssa.OpARM64FlagEQ,
ssa.OpARM64FlagLT_ULT,
ssa.OpARM64FlagLT_UGT,
@@ -686,20 +705,22 @@ var condBits = map[ssa.Op]int16{
var blockJump = map[ssa.BlockKind]struct {
asm, invasm obj.As
}{
- ssa.BlockARM64EQ: {arm64.ABEQ, arm64.ABNE},
- ssa.BlockARM64NE: {arm64.ABNE, arm64.ABEQ},
- ssa.BlockARM64LT: {arm64.ABLT, arm64.ABGE},
- ssa.BlockARM64GE: {arm64.ABGE, arm64.ABLT},
- ssa.BlockARM64LE: {arm64.ABLE, arm64.ABGT},
- ssa.BlockARM64GT: {arm64.ABGT, arm64.ABLE},
- ssa.BlockARM64ULT: {arm64.ABLO, arm64.ABHS},
- ssa.BlockARM64UGE: {arm64.ABHS, arm64.ABLO},
- ssa.BlockARM64UGT: {arm64.ABHI, arm64.ABLS},
- ssa.BlockARM64ULE: {arm64.ABLS, arm64.ABHI},
- ssa.BlockARM64Z: {arm64.ACBZ, arm64.ACBNZ},
- ssa.BlockARM64NZ: {arm64.ACBNZ, arm64.ACBZ},
- ssa.BlockARM64ZW: {arm64.ACBZW, arm64.ACBNZW},
- ssa.BlockARM64NZW: {arm64.ACBNZW, arm64.ACBZW},
+ ssa.BlockARM64EQ: {arm64.ABEQ, arm64.ABNE},
+ ssa.BlockARM64NE: {arm64.ABNE, arm64.ABEQ},
+ ssa.BlockARM64LT: {arm64.ABLT, arm64.ABGE},
+ ssa.BlockARM64GE: {arm64.ABGE, arm64.ABLT},
+ ssa.BlockARM64LE: {arm64.ABLE, arm64.ABGT},
+ ssa.BlockARM64GT: {arm64.ABGT, arm64.ABLE},
+ ssa.BlockARM64ULT: {arm64.ABLO, arm64.ABHS},
+ ssa.BlockARM64UGE: {arm64.ABHS, arm64.ABLO},
+ ssa.BlockARM64UGT: {arm64.ABHI, arm64.ABLS},
+ ssa.BlockARM64ULE: {arm64.ABLS, arm64.ABHI},
+ ssa.BlockARM64Z: {arm64.ACBZ, arm64.ACBNZ},
+ ssa.BlockARM64NZ: {arm64.ACBNZ, arm64.ACBZ},
+ ssa.BlockARM64ZW: {arm64.ACBZW, arm64.ACBNZW},
+ ssa.BlockARM64NZW: {arm64.ACBNZW, arm64.ACBZW},
+ ssa.BlockARM64TBZ: {arm64.ATBZ, arm64.ATBNZ},
+ ssa.BlockARM64TBNZ: {arm64.ATBNZ, arm64.ATBZ},
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
@@ -770,6 +791,35 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.From.Type = obj.TYPE_REG
p.From.Reg = b.Control.Reg()
}
+ case ssa.BlockARM64TBZ, ssa.BlockARM64TBNZ:
+ jmp := blockJump[b.Kind]
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = s.Prog(jmp.invasm)
+ p.To.Type = obj.TYPE_BRANCH
+ p.From.Offset = b.Aux.(int64)
+ p.From.Type = obj.TYPE_CONST
+ p.Reg = b.Control.Reg()
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ case b.Succs[1].Block():
+ p = s.Prog(jmp.asm)
+ p.To.Type = obj.TYPE_BRANCH
+ p.From.Offset = b.Aux.(int64)
+ p.From.Type = obj.TYPE_CONST
+ p.Reg = b.Control.Reg()
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ default:
+ p = s.Prog(jmp.asm)
+ p.To.Type = obj.TYPE_BRANCH
+ p.From.Offset = b.Aux.(int64)
+ p.From.Type = obj.TYPE_CONST
+ p.Reg = b.Control.Reg()
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ q := s.Prog(obj.AJMP)
+ q.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
+ }
default:
b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go
index 0b4c9c7b3f6..e98df71b34b 100644
--- a/src/cmd/compile/internal/gc/alg.go
+++ b/src/cmd/compile/internal/gc/alg.go
@@ -292,7 +292,7 @@ func genhash(sym *types.Sym, t *types.Type) {
dumplist("genhash body", fn.Nbody)
}
- funcbody(fn)
+ funcbody()
Curfn = fn
fn.Func.SetDupok(true)
fn = typecheck(fn, Etop)
@@ -476,7 +476,7 @@ func geneq(sym *types.Sym, t *types.Type) {
dumplist("geneq body", fn.Nbody)
}
- funcbody(fn)
+ funcbody()
Curfn = fn
fn.Func.SetDupok(true)
fn = typecheck(fn, Etop)
diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go
index f29c587877b..dc2d04a8ed8 100644
--- a/src/cmd/compile/internal/gc/align.go
+++ b/src/cmd/compile/internal/gc/align.go
@@ -176,6 +176,7 @@ func dowidth(t *types.Type) {
}
t.Width = 0
+ t.Align = 1
return
}
@@ -290,6 +291,7 @@ func dowidth(t *types.Type) {
case TFORW: // should have been filled in
if !t.Broke() {
+ t.SetBroke(true)
yyerror("invalid recursive type %v", t)
}
w = 1 // anything will do
diff --git a/src/cmd/compile/internal/gc/asm_test.go b/src/cmd/compile/internal/gc/asm_test.go
index 08ec638f44e..a7b817da257 100644
--- a/src/cmd/compile/internal/gc/asm_test.go
+++ b/src/cmd/compile/internal/gc/asm_test.go
@@ -18,6 +18,52 @@ import (
"testing"
)
+// This file contains code generation tests.
+//
+// Each test is defined in a variable of type asmTest. Tests are
+// architecture-specific, and they are grouped in arrays of tests, one
+// for each architecture.
+//
+// Each asmTest consists of a function to compile, an array of
+// positive regexps that must match the generated assembly and
+// an array of negative regexps that must not match generated assembly.
+// For example, the following amd64 test
+//
+// {
+// fn: `
+// func f0(x int) int {
+// return x * 64
+// }
+// `,
+// pos: []string{"\tSHLQ\t[$]6,"},
+// neg: []string{"MULQ"}
+// }
+//
+// verifies that the code the compiler generates for a multiplication
+// by 64 contains a 'SHLQ' instruction and does not contain a MULQ.
+//
+// Since all the tests for a given architecture are dumped in the same
+// file, the function names must be unique. As a workaround for this
+// restriction, the test harness supports the use of a '$' placeholder
+// for function names. The func f0 above can be also written as
+//
+// {
+// fn: `
+// func $(x int) int {
+// return x * 64
+// }
+// `,
+// pos: []string{"\tSHLQ\t[$]6,"},
+// neg: []string{"MULQ"}
+// }
+//
+// Each '$'-function will be given a unique name of form f_,
+// where is the test index in the test array, and is the
+// test's architecture.
+//
+// It is allowed to mix named and unnamed functions in the same test
+// array; the named functions will retain their original names.
+
// TestAssembly checks to make sure the assembly generated for
// functions contains certain expected instructions.
func TestAssembly(t *testing.T) {
@@ -41,8 +87,13 @@ func TestAssembly(t *testing.T) {
asm := ats.compileToAsm(tt, dir)
- for _, at := range ats.tests {
- funcName := nameRegexp.FindString(at.function)[len("func "):]
+ for i, at := range ats.tests {
+ var funcName string
+ if strings.Contains(at.fn, "func $") {
+ funcName = fmt.Sprintf("f%d_%s", i, ats.arch)
+ } else {
+ funcName = nameRegexp.FindString(at.fn)[len("func "):]
+ }
fa := funcAsm(tt, asm, funcName)
if fa != "" {
at.verifyAsm(tt, fa)
@@ -74,17 +125,23 @@ func funcAsm(t *testing.T, asm string, funcName string) string {
}
type asmTest struct {
- // function to compile, must be named fX,
- // where X is this test's index in asmTests.tests.
- function string
- // regexps that must match the generated assembly
- regexps []string
+ // function to compile
+ fn string
+ // regular expressions that must match the generated assembly
+ pos []string
+ // regular expressions that must not match the generated assembly
+ neg []string
}
func (at asmTest) verifyAsm(t *testing.T, fa string) {
- for _, r := range at.regexps {
+ for _, r := range at.pos {
if b, err := regexp.MatchString(r, fa); !b || err != nil {
- t.Errorf("expected:%s\ngo:%s\nasm:%s\n", r, at.function, fa)
+ t.Errorf("expected:%s\ngo:%s\nasm:%s\n", r, at.fn, fa)
+ }
+ }
+ for _, r := range at.neg {
+ if b, err := regexp.MatchString(r, fa); b || err != nil {
+ t.Errorf("not expected:%s\ngo:%s\nasm:%s\n", r, at.fn, fa)
}
}
}
@@ -103,8 +160,9 @@ func (ats *asmTests) generateCode() []byte {
fmt.Fprintf(&buf, "import %q\n", s)
}
- for _, t := range ats.tests {
- fmt.Fprintln(&buf, t.function)
+ for i, t := range ats.tests {
+ function := strings.Replace(t.fn, "func $", fmt.Sprintf("func f%d_%s", i, ats.arch), 1)
+ fmt.Fprintln(&buf, function)
}
return buf.Bytes()
@@ -166,7 +224,7 @@ var allAsmTests = []*asmTests{
{
arch: "amd64",
os: "linux",
- imports: []string{"encoding/binary", "math/bits", "unsafe"},
+ imports: []string{"encoding/binary", "math", "math/bits", "unsafe", "runtime"},
tests: linuxAMD64Tests,
},
{
@@ -178,13 +236,13 @@ var allAsmTests = []*asmTests{
{
arch: "s390x",
os: "linux",
- imports: []string{"encoding/binary", "math/bits"},
+ imports: []string{"encoding/binary", "math", "math/bits"},
tests: linuxS390XTests,
},
{
arch: "arm",
os: "linux",
- imports: []string{"math/bits"},
+ imports: []string{"math/bits", "runtime"},
tests: linuxARMTests,
},
{
@@ -200,173 +258,196 @@ var allAsmTests = []*asmTests{
tests: linuxMIPSTests,
},
{
- arch: "ppc64le",
+ arch: "mips64",
os: "linux",
- tests: linuxPPC64LETests,
+ tests: linuxMIPS64Tests,
+ },
+ {
+ arch: "ppc64le",
+ os: "linux",
+ imports: []string{"encoding/binary", "math", "math/bits"},
+ tests: linuxPPC64LETests,
+ },
+ {
+ arch: "amd64",
+ os: "plan9",
+ tests: plan9AMD64Tests,
},
}
var linuxAMD64Tests = []*asmTest{
+ // multiplication by powers of two
{
- `
- func f0(x int) int {
- return x * 64
+ fn: `
+ func $(n int) int {
+ return n * 64
}
`,
- []string{"\tSHLQ\t\\$6,"},
+ pos: []string{"\tSHLQ\t\\$6,"},
+ neg: []string{"IMULQ"},
},
{
- `
- func f1(x int) int {
+ fn: `
+ func $(n int) int {
+ return -128*n
+ }
+ `,
+ pos: []string{"SHLQ"},
+ neg: []string{"IMULQ"},
+ },
+
+ {
+ fn: `
+ func $(x int) int {
return x * 96
}
`,
- []string{"\tSHLQ\t\\$5,", "\tLEAQ\t\\(.*\\)\\(.*\\*2\\),"},
+ pos: []string{"\tSHLQ\t\\$5,", "\tLEAQ\t\\(.*\\)\\(.*\\*2\\),"},
},
// Load-combining tests.
{
- `
+ fn: `
func f2(b []byte) uint64 {
return binary.LittleEndian.Uint64(b)
}
`,
- []string{"\tMOVQ\t\\(.*\\),"},
+ pos: []string{"\tMOVQ\t\\(.*\\),"},
},
{
- `
+ fn: `
func f3(b []byte, i int) uint64 {
return binary.LittleEndian.Uint64(b[i:])
}
`,
- []string{"\tMOVQ\t\\(.*\\)\\(.*\\*1\\),"},
+ pos: []string{"\tMOVQ\t\\(.*\\)\\(.*\\*1\\),"},
},
{
- `
+ fn: `
func f4(b []byte) uint32 {
return binary.LittleEndian.Uint32(b)
}
`,
- []string{"\tMOVL\t\\(.*\\),"},
+ pos: []string{"\tMOVL\t\\(.*\\),"},
},
{
- `
+ fn: `
func f5(b []byte, i int) uint32 {
return binary.LittleEndian.Uint32(b[i:])
}
`,
- []string{"\tMOVL\t\\(.*\\)\\(.*\\*1\\),"},
+ pos: []string{"\tMOVL\t\\(.*\\)\\(.*\\*1\\),"},
},
{
- `
+ fn: `
func f6(b []byte) uint64 {
return binary.BigEndian.Uint64(b)
}
`,
- []string{"\tBSWAPQ\t"},
+ pos: []string{"\tBSWAPQ\t"},
},
{
- `
+ fn: `
func f7(b []byte, i int) uint64 {
return binary.BigEndian.Uint64(b[i:])
}
`,
- []string{"\tBSWAPQ\t"},
+ pos: []string{"\tBSWAPQ\t"},
},
{
- `
+ fn: `
func f8(b []byte, v uint64) {
binary.BigEndian.PutUint64(b, v)
}
`,
- []string{"\tBSWAPQ\t"},
+ pos: []string{"\tBSWAPQ\t"},
},
{
- `
+ fn: `
func f9(b []byte, i int, v uint64) {
binary.BigEndian.PutUint64(b[i:], v)
}
`,
- []string{"\tBSWAPQ\t"},
+ pos: []string{"\tBSWAPQ\t"},
},
{
- `
+ fn: `
func f10(b []byte) uint32 {
return binary.BigEndian.Uint32(b)
}
`,
- []string{"\tBSWAPL\t"},
+ pos: []string{"\tBSWAPL\t"},
},
{
- `
+ fn: `
func f11(b []byte, i int) uint32 {
return binary.BigEndian.Uint32(b[i:])
}
`,
- []string{"\tBSWAPL\t"},
+ pos: []string{"\tBSWAPL\t"},
},
{
- `
+ fn: `
func f12(b []byte, v uint32) {
binary.BigEndian.PutUint32(b, v)
}
`,
- []string{"\tBSWAPL\t"},
+ pos: []string{"\tBSWAPL\t"},
},
{
- `
+ fn: `
func f13(b []byte, i int, v uint32) {
binary.BigEndian.PutUint32(b[i:], v)
}
`,
- []string{"\tBSWAPL\t"},
+ pos: []string{"\tBSWAPL\t"},
},
{
- `
+ fn: `
func f14(b []byte) uint16 {
return binary.BigEndian.Uint16(b)
}
`,
- []string{"\tROLW\t\\$8,"},
+ pos: []string{"\tROLW\t\\$8,"},
},
{
- `
+ fn: `
func f15(b []byte, i int) uint16 {
return binary.BigEndian.Uint16(b[i:])
}
`,
- []string{"\tROLW\t\\$8,"},
+ pos: []string{"\tROLW\t\\$8,"},
},
{
- `
+ fn: `
func f16(b []byte, v uint16) {
binary.BigEndian.PutUint16(b, v)
}
`,
- []string{"\tROLW\t\\$8,"},
+ pos: []string{"\tROLW\t\\$8,"},
},
{
- `
+ fn: `
func f17(b []byte, i int, v uint16) {
binary.BigEndian.PutUint16(b[i:], v)
}
`,
- []string{"\tROLW\t\\$8,"},
+ pos: []string{"\tROLW\t\\$8,"},
},
// Structure zeroing. See issue #18370.
{
- `
+ fn: `
type T1 struct {
a, b, c int
}
- func f18(t *T1) {
+ func $(t *T1) {
*t = T1{}
}
`,
- []string{"\tMOVQ\t\\$0, \\(.*\\)", "\tMOVQ\t\\$0, 8\\(.*\\)", "\tMOVQ\t\\$0, 16\\(.*\\)"},
+ pos: []string{"\tXORPS\tX., X", "\tMOVUPS\tX., \\(.*\\)", "\tMOVQ\t\\$0, 16\\(.*\\)"},
},
// SSA-able composite literal initialization. Issue 18872.
{
- `
+ fn: `
type T18872 struct {
a, b, c, d int
}
@@ -375,11 +456,11 @@ var linuxAMD64Tests = []*asmTest{
*p = T18872{1, 2, 3, 4}
}
`,
- []string{"\tMOVQ\t[$]1", "\tMOVQ\t[$]2", "\tMOVQ\t[$]3", "\tMOVQ\t[$]4"},
+ pos: []string{"\tMOVQ\t[$]1", "\tMOVQ\t[$]2", "\tMOVQ\t[$]3", "\tMOVQ\t[$]4"},
},
// Also test struct containing pointers (this was special because of write barriers).
{
- `
+ fn: `
type T2 struct {
a, b, c *int
}
@@ -387,108 +468,108 @@ var linuxAMD64Tests = []*asmTest{
*t = T2{}
}
`,
- []string{"\tMOVQ\t\\$0, \\(.*\\)", "\tMOVQ\t\\$0, 8\\(.*\\)", "\tMOVQ\t\\$0, 16\\(.*\\)", "\tCALL\truntime\\.writebarrierptr\\(SB\\)"},
+ pos: []string{"\tXORPS\tX., X", "\tMOVUPS\tX., \\(.*\\)", "\tMOVQ\t\\$0, 16\\(.*\\)", "\tCALL\truntime\\.(writebarrierptr|gcWriteBarrier)\\(SB\\)"},
},
// Rotate tests
{
- `
+ fn: `
func f20(x uint64) uint64 {
return x<<7 | x>>57
}
`,
- []string{"\tROLQ\t[$]7,"},
+ pos: []string{"\tROLQ\t[$]7,"},
},
{
- `
+ fn: `
func f21(x uint64) uint64 {
return x<<7 + x>>57
}
`,
- []string{"\tROLQ\t[$]7,"},
+ pos: []string{"\tROLQ\t[$]7,"},
},
{
- `
+ fn: `
func f22(x uint64) uint64 {
return x<<7 ^ x>>57
}
`,
- []string{"\tROLQ\t[$]7,"},
+ pos: []string{"\tROLQ\t[$]7,"},
},
{
- `
+ fn: `
func f23(x uint32) uint32 {
return x<<7 + x>>25
}
`,
- []string{"\tROLL\t[$]7,"},
+ pos: []string{"\tROLL\t[$]7,"},
},
{
- `
+ fn: `
func f24(x uint32) uint32 {
return x<<7 | x>>25
}
`,
- []string{"\tROLL\t[$]7,"},
+ pos: []string{"\tROLL\t[$]7,"},
},
{
- `
+ fn: `
func f25(x uint32) uint32 {
return x<<7 ^ x>>25
}
`,
- []string{"\tROLL\t[$]7,"},
+ pos: []string{"\tROLL\t[$]7,"},
},
{
- `
+ fn: `
func f26(x uint16) uint16 {
return x<<7 + x>>9
}
`,
- []string{"\tROLW\t[$]7,"},
+ pos: []string{"\tROLW\t[$]7,"},
},
{
- `
+ fn: `
func f27(x uint16) uint16 {
return x<<7 | x>>9
}
`,
- []string{"\tROLW\t[$]7,"},
+ pos: []string{"\tROLW\t[$]7,"},
},
{
- `
+ fn: `
func f28(x uint16) uint16 {
return x<<7 ^ x>>9
}
`,
- []string{"\tROLW\t[$]7,"},
+ pos: []string{"\tROLW\t[$]7,"},
},
{
- `
+ fn: `
func f29(x uint8) uint8 {
return x<<7 + x>>1
}
`,
- []string{"\tROLB\t[$]7,"},
+ pos: []string{"\tROLB\t[$]7,"},
},
{
- `
+ fn: `
func f30(x uint8) uint8 {
return x<<7 | x>>1
}
`,
- []string{"\tROLB\t[$]7,"},
+ pos: []string{"\tROLB\t[$]7,"},
},
{
- `
+ fn: `
func f31(x uint8) uint8 {
return x<<7 ^ x>>1
}
`,
- []string{"\tROLB\t[$]7,"},
+ pos: []string{"\tROLB\t[$]7,"},
},
// Rotate after inlining (see issue 18254).
{
- `
+ fn: `
func f32(x uint32) uint32 {
return g(x, 7)
}
@@ -496,46 +577,46 @@ var linuxAMD64Tests = []*asmTest{
return x<>(32-k)
}
`,
- []string{"\tROLL\t[$]7,"},
+ pos: []string{"\tROLL\t[$]7,"},
},
{
- `
+ fn: `
func f33(m map[int]int) int {
return m[5]
}
`,
- []string{"\tMOVQ\t[$]5,"},
+ pos: []string{"\tMOVQ\t[$]5,"},
},
// Direct use of constants in fast map access calls. Issue 19015.
{
- `
+ fn: `
func f34(m map[int]int) bool {
_, ok := m[5]
return ok
}
`,
- []string{"\tMOVQ\t[$]5,"},
+ pos: []string{"\tMOVQ\t[$]5,"},
},
{
- `
+ fn: `
func f35(m map[string]int) int {
return m["abc"]
}
`,
- []string{"\"abc\""},
+ pos: []string{"\"abc\""},
},
{
- `
+ fn: `
func f36(m map[string]int) bool {
_, ok := m["abc"]
return ok
}
`,
- []string{"\"abc\""},
+ pos: []string{"\"abc\""},
},
// Bit test ops on amd64, issue 18943.
{
- `
+ fn: `
func f37(a, b uint64) int {
if a&(1<<(b&63)) != 0 {
return 1
@@ -543,18 +624,18 @@ var linuxAMD64Tests = []*asmTest{
return -1
}
`,
- []string{"\tBTQ\t"},
+ pos: []string{"\tBTQ\t"},
},
{
- `
+ fn: `
func f38(a, b uint64) bool {
return a&(1<<(b&63)) != 0
}
`,
- []string{"\tBTQ\t"},
+ pos: []string{"\tBTQ\t"},
},
{
- `
+ fn: `
func f39(a uint64) int {
if a&(1<<60) != 0 {
return 1
@@ -562,1107 +643,2408 @@ var linuxAMD64Tests = []*asmTest{
return -1
}
`,
- []string{"\tBTQ\t\\$60"},
+ pos: []string{"\tBTQ\t\\$60"},
},
{
- `
+ fn: `
func f40(a uint64) bool {
return a&(1<<60) != 0
}
`,
- []string{"\tBTQ\t\\$60"},
+ pos: []string{"\tBTQ\t\\$60"},
},
// Intrinsic tests for math/bits
{
- `
+ fn: `
func f41(a uint64) int {
return bits.TrailingZeros64(a)
}
`,
- []string{"\tBSFQ\t", "\tMOVL\t\\$64,", "\tCMOVQEQ\t"},
+ pos: []string{"\tBSFQ\t", "\tMOVL\t\\$64,", "\tCMOVQEQ\t"},
},
{
- `
+ fn: `
func f42(a uint32) int {
return bits.TrailingZeros32(a)
}
`,
- []string{"\tBSFQ\t", "\tORQ\t[^$]", "\tMOVQ\t\\$4294967296,"},
+ pos: []string{"\tBSFQ\t", "\tORQ\t[^$]", "\tMOVQ\t\\$4294967296,"},
},
{
- `
+ fn: `
func f43(a uint16) int {
return bits.TrailingZeros16(a)
}
`,
- []string{"\tBSFQ\t", "\tORQ\t\\$65536,"},
+ pos: []string{"\tBSFQ\t", "\tORQ\t\\$65536,"},
},
{
- `
+ fn: `
func f44(a uint8) int {
return bits.TrailingZeros8(a)
}
`,
- []string{"\tBSFQ\t", "\tORQ\t\\$256,"},
+ pos: []string{"\tBSFQ\t", "\tORQ\t\\$256,"},
},
{
- `
+ fn: `
func f45(a uint64) uint64 {
return bits.ReverseBytes64(a)
}
`,
- []string{"\tBSWAPQ\t"},
+ pos: []string{"\tBSWAPQ\t"},
},
{
- `
+ fn: `
func f46(a uint32) uint32 {
return bits.ReverseBytes32(a)
}
`,
- []string{"\tBSWAPL\t"},
+ pos: []string{"\tBSWAPL\t"},
},
{
- `
+ fn: `
func f47(a uint16) uint16 {
return bits.ReverseBytes16(a)
}
`,
- []string{"\tROLW\t\\$8,"},
+ pos: []string{"\tROLW\t\\$8,"},
},
{
- `
+ fn: `
func f48(a uint64) int {
return bits.Len64(a)
}
`,
- []string{"\tBSRQ\t"},
+ pos: []string{"\tBSRQ\t"},
},
{
- `
+ fn: `
func f49(a uint32) int {
return bits.Len32(a)
}
`,
- []string{"\tBSRQ\t"},
+ pos: []string{"\tBSRQ\t"},
},
{
- `
+ fn: `
func f50(a uint16) int {
return bits.Len16(a)
}
`,
- []string{"\tBSRQ\t"},
+ pos: []string{"\tBSRQ\t"},
},
/* see ssa.go
{
- `
+ fn:`
func f51(a uint8) int {
return bits.Len8(a)
}
`,
- []string{"\tBSRQ\t"},
+ pos:[]string{"\tBSRQ\t"},
},
*/
{
- `
+ fn: `
func f52(a uint) int {
return bits.Len(a)
}
`,
- []string{"\tBSRQ\t"},
+ pos: []string{"\tBSRQ\t"},
},
{
- `
+ fn: `
func f53(a uint64) int {
return bits.LeadingZeros64(a)
}
`,
- []string{"\tBSRQ\t"},
+ pos: []string{"\tBSRQ\t"},
},
{
- `
+ fn: `
func f54(a uint32) int {
return bits.LeadingZeros32(a)
}
`,
- []string{"\tBSRQ\t"},
+ pos: []string{"\tBSRQ\t"},
},
{
- `
+ fn: `
func f55(a uint16) int {
return bits.LeadingZeros16(a)
}
`,
- []string{"\tBSRQ\t"},
+ pos: []string{"\tBSRQ\t"},
},
/* see ssa.go
{
- `
+ fn:`
func f56(a uint8) int {
return bits.LeadingZeros8(a)
}
`,
- []string{"\tBSRQ\t"},
+ pos:[]string{"\tBSRQ\t"},
},
*/
{
- `
+ fn: `
func f57(a uint) int {
return bits.LeadingZeros(a)
}
`,
- []string{"\tBSRQ\t"},
+ pos: []string{"\tBSRQ\t"},
},
{
- `
+ fn: `
func pop1(x uint64) int {
return bits.OnesCount64(x)
}`,
- []string{"\tPOPCNTQ\t", "support_popcnt"},
+ pos: []string{"\tPOPCNTQ\t", "support_popcnt"},
},
{
- `
+ fn: `
func pop2(x uint32) int {
return bits.OnesCount32(x)
}`,
- []string{"\tPOPCNTL\t", "support_popcnt"},
+ pos: []string{"\tPOPCNTL\t", "support_popcnt"},
},
{
- `
+ fn: `
func pop3(x uint16) int {
return bits.OnesCount16(x)
}`,
- []string{"\tPOPCNTL\t", "support_popcnt"},
+ pos: []string{"\tPOPCNTL\t", "support_popcnt"},
},
{
- `
+ fn: `
func pop4(x uint) int {
return bits.OnesCount(x)
}`,
- []string{"\tPOPCNTQ\t", "support_popcnt"},
+ pos: []string{"\tPOPCNTQ\t", "support_popcnt"},
},
+ // multiplication merging tests
+ {
+ fn: `
+ func mul1(n int) int {
+ return 15*n + 31*n
+ }`,
+ pos: []string{"\tIMULQ\t[$]46"}, // 46*n
+ },
+ {
+ fn: `
+ func mul2(n int) int {
+ return 5*n + 7*(n+1) + 11*(n+2)
+ }`,
+ pos: []string{"\tIMULQ\t[$]23", "\tADDQ\t[$]29"}, // 23*n + 29
+ },
+ {
+ fn: `
+ func mul3(a, n int) int {
+ return a*n + 19*n
+ }`,
+ pos: []string{"\tADDQ\t[$]19", "\tIMULQ"}, // (a+19)*n
+ },
+ {
+ fn: `
+ func mul4(n int) int {
+ return 23*n - 9*n
+ }`,
+ pos: []string{"\tIMULQ\t[$]14"}, // 14*n
+ },
+ {
+ fn: `
+ func mul5(a, n int) int {
+ return a*n - 19*n
+ }`,
+ pos: []string{"\tADDQ\t[$]-19", "\tIMULQ"}, // (a-19)*n
+ },
+
// see issue 19595.
// We want to merge load+op in f58, but not in f59.
{
- `
+ fn: `
func f58(p, q *int) {
x := *p
*q += x
}`,
- []string{"\tADDQ\t\\("},
+ pos: []string{"\tADDQ\t\\("},
},
{
- `
+ fn: `
func f59(p, q *int) {
x := *p
for i := 0; i < 10; i++ {
*q += x
}
}`,
- []string{"\tADDQ\t[A-Z]"},
+ pos: []string{"\tADDQ\t[A-Z]"},
},
// Floating-point strength reduction
{
- `
+ fn: `
func f60(f float64) float64 {
return f * 2.0
}`,
- []string{"\tADDSD\t"},
+ pos: []string{"\tADDSD\t"},
},
{
- `
+ fn: `
func f62(f float64) float64 {
return f / 16.0
}`,
- []string{"\tMULSD\t"},
+ pos: []string{"\tMULSD\t"},
},
{
- `
+ fn: `
func f63(f float64) float64 {
return f / 0.125
}`,
- []string{"\tMULSD\t"},
+ pos: []string{"\tMULSD\t"},
},
{
- `
+ fn: `
func f64(f float64) float64 {
return f / 0.5
}`,
- []string{"\tADDSD\t"},
+ pos: []string{"\tADDSD\t"},
},
// Check that compare to constant string uses 2/4/8 byte compares
{
- `
+ fn: `
func f65(a string) bool {
return a == "xx"
}`,
- []string{"\tCMPW\t[A-Z]"},
+ pos: []string{"\tCMPW\t[A-Z]"},
},
{
- `
+ fn: `
func f66(a string) bool {
return a == "xxxx"
}`,
- []string{"\tCMPL\t[A-Z]"},
+ pos: []string{"\tCMPL\t[A-Z]"},
},
{
- `
+ fn: `
func f67(a string) bool {
return a == "xxxxxxxx"
}`,
- []string{"\tCMPQ\t[A-Z]"},
+ pos: []string{"\tCMPQ\t[A-Z]"},
},
// Non-constant rotate
{
- `func rot64l(x uint64, y int) uint64 {
+ fn: `func rot64l(x uint64, y int) uint64 {
z := uint(y & 63)
return x << z | x >> (64-z)
}`,
- []string{"\tROLQ\t"},
+ pos: []string{"\tROLQ\t"},
},
{
- `func rot64r(x uint64, y int) uint64 {
+ fn: `func rot64r(x uint64, y int) uint64 {
z := uint(y & 63)
return x >> z | x << (64-z)
}`,
- []string{"\tRORQ\t"},
+ pos: []string{"\tRORQ\t"},
},
{
- `func rot32l(x uint32, y int) uint32 {
+ fn: `func rot32l(x uint32, y int) uint32 {
z := uint(y & 31)
return x << z | x >> (32-z)
}`,
- []string{"\tROLL\t"},
+ pos: []string{"\tROLL\t"},
},
{
- `func rot32r(x uint32, y int) uint32 {
+ fn: `func rot32r(x uint32, y int) uint32 {
z := uint(y & 31)
return x >> z | x << (32-z)
}`,
- []string{"\tRORL\t"},
+ pos: []string{"\tRORL\t"},
},
{
- `func rot16l(x uint16, y int) uint16 {
+ fn: `func rot16l(x uint16, y int) uint16 {
z := uint(y & 15)
return x << z | x >> (16-z)
}`,
- []string{"\tROLW\t"},
+ pos: []string{"\tROLW\t"},
},
{
- `func rot16r(x uint16, y int) uint16 {
+ fn: `func rot16r(x uint16, y int) uint16 {
z := uint(y & 15)
return x >> z | x << (16-z)
}`,
- []string{"\tRORW\t"},
+ pos: []string{"\tRORW\t"},
},
{
- `func rot8l(x uint8, y int) uint8 {
+ fn: `func rot8l(x uint8, y int) uint8 {
z := uint(y & 7)
return x << z | x >> (8-z)
}`,
- []string{"\tROLB\t"},
+ pos: []string{"\tROLB\t"},
},
{
- `func rot8r(x uint8, y int) uint8 {
+ fn: `func rot8r(x uint8, y int) uint8 {
z := uint(y & 7)
return x >> z | x << (8-z)
}`,
- []string{"\tRORB\t"},
+ pos: []string{"\tRORB\t"},
},
// Check that array compare uses 2/4/8 byte compares
{
- `
+ fn: `
func f68(a,b [2]byte) bool {
return a == b
}`,
- []string{"\tCMPW\t[A-Z]"},
+ pos: []string{"\tCMPW\t[A-Z]"},
},
{
- `
+ fn: `
func f69(a,b [3]uint16) bool {
return a == b
}`,
- []string{"\tCMPL\t[A-Z]"},
+ pos: []string{"\tCMPL\t[A-Z]"},
},
{
- `
+ fn: `
func f70(a,b [15]byte) bool {
return a == b
}`,
- []string{"\tCMPQ\t[A-Z]"},
+ pos: []string{"\tCMPQ\t[A-Z]"},
},
{
- `
+ fn: `
func f71(a,b unsafe.Pointer) bool { // This was a TODO in mapaccess1_faststr
return *((*[4]byte)(a)) != *((*[4]byte)(b))
}`,
- []string{"\tCMPL\t[A-Z]"},
+ pos: []string{"\tCMPL\t[A-Z]"},
},
{
// make sure assembly output has matching offset and base register.
- `
+ fn: `
func f72(a, b int) int {
- var x [16]byte // use some frame
- _ = x
+ runtime.GC() // use some frame
return b
}
`,
- []string{"b\\+40\\(SP\\)"},
+ pos: []string{"b\\+24\\(SP\\)"},
+ },
+ {
+ // check load combining
+ fn: `
+ func f73(a, b byte) (byte,byte) {
+ return f73(f73(a,b))
+ }
+ `,
+ pos: []string{"\tMOVW\t"},
+ },
+ {
+ fn: `
+ func f74(a, b uint16) (uint16,uint16) {
+ return f74(f74(a,b))
+ }
+ `,
+ pos: []string{"\tMOVL\t"},
+ },
+ {
+ fn: `
+ func f75(a, b uint32) (uint32,uint32) {
+ return f75(f75(a,b))
+ }
+ `,
+ pos: []string{"\tMOVQ\t"},
+ },
+ {
+ fn: `
+ func f76(a, b uint64) (uint64,uint64) {
+ return f76(f76(a,b))
+ }
+ `,
+ pos: []string{"\tMOVUPS\t"},
+ },
+ // Make sure we don't put pointers in SSE registers across safe points.
+ {
+ fn: `
+ func $(p, q *[2]*int) {
+ a, b := p[0], p[1]
+ runtime.GC()
+ q[0], q[1] = a, b
+ }
+ `,
+ neg: []string{"MOVUPS"},
+ },
+ {
+ // check that stack store is optimized away
+ fn: `
+ func $() int {
+ var x int
+ return *(&x)
+ }
+ `,
+ pos: []string{"TEXT\t.*, [$]0-8"},
+ },
+ // math.Abs using integer registers
+ {
+ fn: `
+ func $(x float64) float64 {
+ return math.Abs(x)
+ }
+ `,
+ pos: []string{"\tSHLQ\t[$]1,", "\tSHRQ\t[$]1,"},
+ },
+ // math.Copysign using integer registers
+ {
+ fn: `
+ func $(x, y float64) float64 {
+ return math.Copysign(x, y)
+ }
+ `,
+ pos: []string{"\tSHLQ\t[$]1,", "\tSHRQ\t[$]1,", "\tSHRQ\t[$]63,", "\tSHLQ\t[$]63,", "\tORQ\t"},
+ },
+ // int <-> fp moves
+ {
+ fn: `
+ func $(x float64) uint64 {
+ return math.Float64bits(x+1) + 1
+ }
+ `,
+ pos: []string{"\tMOVQ\tX.*, [^X].*"},
+ },
+ {
+ fn: `
+ func $(x float32) uint32 {
+ return math.Float32bits(x+1) + 1
+ }
+ `,
+ pos: []string{"\tMOVL\tX.*, [^X].*"},
+ },
+ {
+ fn: `
+ func $(x uint64) float64 {
+ return math.Float64frombits(x+1) + 1
+ }
+ `,
+ pos: []string{"\tMOVQ\t[^X].*, X.*"},
+ },
+ {
+ fn: `
+ func $(x uint32) float32 {
+ return math.Float32frombits(x+1) + 1
+ }
+ `,
+ pos: []string{"\tMOVL\t[^X].*, X.*"},
+ },
+ {
+ fn: `
+ func $(x uint32) bool {
+ return x > 4
+ }
+ `,
+ pos: []string{"\tSETHI\t\\("},
+ },
+ // Check that len() and cap() div by a constant power of two
+ // are compiled into SHRQ.
+ {
+ fn: `
+ func $(a []int) int {
+ return len(a) / 1024
+ }
+ `,
+ pos: []string{"\tSHRQ\t\\$10,"},
+ },
+ {
+ fn: `
+ func $(s string) int {
+ return len(s) / (4097 >> 1)
+ }
+ `,
+ pos: []string{"\tSHRQ\t\\$11,"},
+ },
+ {
+ fn: `
+ func $(a []int) int {
+ return cap(a) / ((1 << 11) + 2048)
+ }
+ `,
+ pos: []string{"\tSHRQ\t\\$12,"},
+ },
+ // Check that len() and cap() mod by a constant power of two
+ // are compiled into ANDQ.
+ {
+ fn: `
+ func $(a []int) int {
+ return len(a) % 1024
+ }
+ `,
+ pos: []string{"\tANDQ\t\\$1023,"},
+ },
+ {
+ fn: `
+ func $(s string) int {
+ return len(s) % (4097 >> 1)
+ }
+ `,
+ pos: []string{"\tANDQ\t\\$2047,"},
+ },
+ {
+ fn: `
+ func $(a []int) int {
+ return cap(a) % ((1 << 11) + 2048)
+ }
+ `,
+ pos: []string{"\tANDQ\t\\$4095,"},
+ },
+ {
+ // Test that small memmove was replaced with direct movs
+ fn: `
+ func $() {
+ x := [...]byte{1, 2, 3, 4, 5, 6, 7}
+ copy(x[1:], x[:])
+ }
+ `,
+ neg: []string{"memmove"},
+ },
+ {
+ // Same as above but with different size
+ fn: `
+ func $() {
+ x := [...]byte{1, 2, 3, 4}
+ copy(x[1:], x[:])
+ }
+ `,
+ neg: []string{"memmove"},
+ },
+ {
+ // Same as above but with different size
+ fn: `
+ func $() {
+ x := [...]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
+ copy(x[1:], x[:])
+ }
+ `,
+ neg: []string{"memmove"},
+ },
+ // Nil checks before calling interface methods
+ {
+ fn: `
+ type I interface {
+ foo000()
+ foo001()
+ foo002()
+ foo003()
+ foo004()
+ foo005()
+ foo006()
+ foo007()
+ foo008()
+ foo009()
+ foo010()
+ foo011()
+ foo012()
+ foo013()
+ foo014()
+ foo015()
+ foo016()
+ foo017()
+ foo018()
+ foo019()
+ foo020()
+ foo021()
+ foo022()
+ foo023()
+ foo024()
+ foo025()
+ foo026()
+ foo027()
+ foo028()
+ foo029()
+ foo030()
+ foo031()
+ foo032()
+ foo033()
+ foo034()
+ foo035()
+ foo036()
+ foo037()
+ foo038()
+ foo039()
+ foo040()
+ foo041()
+ foo042()
+ foo043()
+ foo044()
+ foo045()
+ foo046()
+ foo047()
+ foo048()
+ foo049()
+ foo050()
+ foo051()
+ foo052()
+ foo053()
+ foo054()
+ foo055()
+ foo056()
+ foo057()
+ foo058()
+ foo059()
+ foo060()
+ foo061()
+ foo062()
+ foo063()
+ foo064()
+ foo065()
+ foo066()
+ foo067()
+ foo068()
+ foo069()
+ foo070()
+ foo071()
+ foo072()
+ foo073()
+ foo074()
+ foo075()
+ foo076()
+ foo077()
+ foo078()
+ foo079()
+ foo080()
+ foo081()
+ foo082()
+ foo083()
+ foo084()
+ foo085()
+ foo086()
+ foo087()
+ foo088()
+ foo089()
+ foo090()
+ foo091()
+ foo092()
+ foo093()
+ foo094()
+ foo095()
+ foo096()
+ foo097()
+ foo098()
+ foo099()
+ foo100()
+ foo101()
+ foo102()
+ foo103()
+ foo104()
+ foo105()
+ foo106()
+ foo107()
+ foo108()
+ foo109()
+ foo110()
+ foo111()
+ foo112()
+ foo113()
+ foo114()
+ foo115()
+ foo116()
+ foo117()
+ foo118()
+ foo119()
+ foo120()
+ foo121()
+ foo122()
+ foo123()
+ foo124()
+ foo125()
+ foo126()
+ foo127()
+ foo128()
+ foo129()
+ foo130()
+ foo131()
+ foo132()
+ foo133()
+ foo134()
+ foo135()
+ foo136()
+ foo137()
+ foo138()
+ foo139()
+ foo140()
+ foo141()
+ foo142()
+ foo143()
+ foo144()
+ foo145()
+ foo146()
+ foo147()
+ foo148()
+ foo149()
+ foo150()
+ foo151()
+ foo152()
+ foo153()
+ foo154()
+ foo155()
+ foo156()
+ foo157()
+ foo158()
+ foo159()
+ foo160()
+ foo161()
+ foo162()
+ foo163()
+ foo164()
+ foo165()
+ foo166()
+ foo167()
+ foo168()
+ foo169()
+ foo170()
+ foo171()
+ foo172()
+ foo173()
+ foo174()
+ foo175()
+ foo176()
+ foo177()
+ foo178()
+ foo179()
+ foo180()
+ foo181()
+ foo182()
+ foo183()
+ foo184()
+ foo185()
+ foo186()
+ foo187()
+ foo188()
+ foo189()
+ foo190()
+ foo191()
+ foo192()
+ foo193()
+ foo194()
+ foo195()
+ foo196()
+ foo197()
+ foo198()
+ foo199()
+ foo200()
+ foo201()
+ foo202()
+ foo203()
+ foo204()
+ foo205()
+ foo206()
+ foo207()
+ foo208()
+ foo209()
+ foo210()
+ foo211()
+ foo212()
+ foo213()
+ foo214()
+ foo215()
+ foo216()
+ foo217()
+ foo218()
+ foo219()
+ foo220()
+ foo221()
+ foo222()
+ foo223()
+ foo224()
+ foo225()
+ foo226()
+ foo227()
+ foo228()
+ foo229()
+ foo230()
+ foo231()
+ foo232()
+ foo233()
+ foo234()
+ foo235()
+ foo236()
+ foo237()
+ foo238()
+ foo239()
+ foo240()
+ foo241()
+ foo242()
+ foo243()
+ foo244()
+ foo245()
+ foo246()
+ foo247()
+ foo248()
+ foo249()
+ foo250()
+ foo251()
+ foo252()
+ foo253()
+ foo254()
+ foo255()
+ foo256()
+ foo257()
+ foo258()
+ foo259()
+ foo260()
+ foo261()
+ foo262()
+ foo263()
+ foo264()
+ foo265()
+ foo266()
+ foo267()
+ foo268()
+ foo269()
+ foo270()
+ foo271()
+ foo272()
+ foo273()
+ foo274()
+ foo275()
+ foo276()
+ foo277()
+ foo278()
+ foo279()
+ foo280()
+ foo281()
+ foo282()
+ foo283()
+ foo284()
+ foo285()
+ foo286()
+ foo287()
+ foo288()
+ foo289()
+ foo290()
+ foo291()
+ foo292()
+ foo293()
+ foo294()
+ foo295()
+ foo296()
+ foo297()
+ foo298()
+ foo299()
+ foo300()
+ foo301()
+ foo302()
+ foo303()
+ foo304()
+ foo305()
+ foo306()
+ foo307()
+ foo308()
+ foo309()
+ foo310()
+ foo311()
+ foo312()
+ foo313()
+ foo314()
+ foo315()
+ foo316()
+ foo317()
+ foo318()
+ foo319()
+ foo320()
+ foo321()
+ foo322()
+ foo323()
+ foo324()
+ foo325()
+ foo326()
+ foo327()
+ foo328()
+ foo329()
+ foo330()
+ foo331()
+ foo332()
+ foo333()
+ foo334()
+ foo335()
+ foo336()
+ foo337()
+ foo338()
+ foo339()
+ foo340()
+ foo341()
+ foo342()
+ foo343()
+ foo344()
+ foo345()
+ foo346()
+ foo347()
+ foo348()
+ foo349()
+ foo350()
+ foo351()
+ foo352()
+ foo353()
+ foo354()
+ foo355()
+ foo356()
+ foo357()
+ foo358()
+ foo359()
+ foo360()
+ foo361()
+ foo362()
+ foo363()
+ foo364()
+ foo365()
+ foo366()
+ foo367()
+ foo368()
+ foo369()
+ foo370()
+ foo371()
+ foo372()
+ foo373()
+ foo374()
+ foo375()
+ foo376()
+ foo377()
+ foo378()
+ foo379()
+ foo380()
+ foo381()
+ foo382()
+ foo383()
+ foo384()
+ foo385()
+ foo386()
+ foo387()
+ foo388()
+ foo389()
+ foo390()
+ foo391()
+ foo392()
+ foo393()
+ foo394()
+ foo395()
+ foo396()
+ foo397()
+ foo398()
+ foo399()
+ foo400()
+ foo401()
+ foo402()
+ foo403()
+ foo404()
+ foo405()
+ foo406()
+ foo407()
+ foo408()
+ foo409()
+ foo410()
+ foo411()
+ foo412()
+ foo413()
+ foo414()
+ foo415()
+ foo416()
+ foo417()
+ foo418()
+ foo419()
+ foo420()
+ foo421()
+ foo422()
+ foo423()
+ foo424()
+ foo425()
+ foo426()
+ foo427()
+ foo428()
+ foo429()
+ foo430()
+ foo431()
+ foo432()
+ foo433()
+ foo434()
+ foo435()
+ foo436()
+ foo437()
+ foo438()
+ foo439()
+ foo440()
+ foo441()
+ foo442()
+ foo443()
+ foo444()
+ foo445()
+ foo446()
+ foo447()
+ foo448()
+ foo449()
+ foo450()
+ foo451()
+ foo452()
+ foo453()
+ foo454()
+ foo455()
+ foo456()
+ foo457()
+ foo458()
+ foo459()
+ foo460()
+ foo461()
+ foo462()
+ foo463()
+ foo464()
+ foo465()
+ foo466()
+ foo467()
+ foo468()
+ foo469()
+ foo470()
+ foo471()
+ foo472()
+ foo473()
+ foo474()
+ foo475()
+ foo476()
+ foo477()
+ foo478()
+ foo479()
+ foo480()
+ foo481()
+ foo482()
+ foo483()
+ foo484()
+ foo485()
+ foo486()
+ foo487()
+ foo488()
+ foo489()
+ foo490()
+ foo491()
+ foo492()
+ foo493()
+ foo494()
+ foo495()
+ foo496()
+ foo497()
+ foo498()
+ foo499()
+ foo500()
+ foo501()
+ foo502()
+ foo503()
+ foo504()
+ foo505()
+ foo506()
+ foo507()
+ foo508()
+ foo509()
+ foo510()
+ foo511()
+ }
+ func $(i I) {
+ i.foo511()
+ }
+ `,
+ pos: []string{"TESTB"},
+ },
+ {
+ fn: `
+ func $(i I) {
+ i.foo001()
+ }
+ `,
+ neg: []string{"TESTB"},
},
}
var linux386Tests = []*asmTest{
{
- `
+ fn: `
func f0(b []byte) uint32 {
return binary.LittleEndian.Uint32(b)
}
`,
- []string{"\tMOVL\t\\(.*\\),"},
+ pos: []string{"\tMOVL\t\\(.*\\),"},
},
{
- `
+ fn: `
func f1(b []byte, i int) uint32 {
return binary.LittleEndian.Uint32(b[i:])
}
`,
- []string{"\tMOVL\t\\(.*\\)\\(.*\\*1\\),"},
+ pos: []string{"\tMOVL\t\\(.*\\)\\(.*\\*1\\),"},
+ },
+
+ // multiplication by powers of two
+ {
+ fn: `
+ func $(n int) int {
+ return 32*n
+ }
+ `,
+ pos: []string{"SHLL"},
+ neg: []string{"IMULL"},
+ },
+ {
+ fn: `
+ func $(n int) int {
+ return -64*n
+ }
+ `,
+ pos: []string{"SHLL"},
+ neg: []string{"IMULL"},
+ },
+
+ // multiplication merging tests
+ {
+ fn: `
+ func $(n int) int {
+ return 9*n + 14*n
+ }`,
+ pos: []string{"\tIMULL\t[$]23"}, // 23*n
+ },
+ {
+ fn: `
+ func $(a, n int) int {
+ return 19*a + a*n
+ }`,
+ pos: []string{"\tADDL\t[$]19", "\tIMULL"}, // (n+19)*a
+ },
+ {
+ // check that stack store is optimized away
+ fn: `
+ func $() int {
+ var x int
+ return *(&x)
+ }
+ `,
+ pos: []string{"TEXT\t.*, [$]0-4"},
+ },
+ {
+ fn: `
+ func mul3(n int) int {
+ return 23*n - 9*n
+ }`,
+ pos: []string{"\tIMULL\t[$]14"}, // 14*n
+ },
+ {
+ fn: `
+ func mul4(a, n int) int {
+ return n*a - a*19
+ }`,
+ pos: []string{"\tADDL\t[$]-19", "\tIMULL"}, // (n-19)*a
+ },
+ // Check that len() and cap() div by a constant power of two
+ // are compiled into SHRL.
+ {
+ fn: `
+ func $(a []int) int {
+ return len(a) / 1024
+ }
+ `,
+ pos: []string{"\tSHRL\t\\$10,"},
+ },
+ {
+ fn: `
+ func $(s string) int {
+ return len(s) / (4097 >> 1)
+ }
+ `,
+ pos: []string{"\tSHRL\t\\$11,"},
+ },
+ {
+ fn: `
+ func $(a []int) int {
+ return cap(a) / ((1 << 11) + 2048)
+ }
+ `,
+ pos: []string{"\tSHRL\t\\$12,"},
+ },
+ // Check that len() and cap() mod by a constant power of two
+ // are compiled into ANDL.
+ {
+ fn: `
+ func $(a []int) int {
+ return len(a) % 1024
+ }
+ `,
+ pos: []string{"\tANDL\t\\$1023,"},
+ },
+ {
+ fn: `
+ func $(s string) int {
+ return len(s) % (4097 >> 1)
+ }
+ `,
+ pos: []string{"\tANDL\t\\$2047,"},
+ },
+ {
+ fn: `
+ func $(a []int) int {
+ return cap(a) % ((1 << 11) + 2048)
+ }
+ `,
+ pos: []string{"\tANDL\t\\$4095,"},
+ },
+ {
+ // Test that small memmove was replaced with direct movs
+ fn: `
+ func $() {
+ x := [...]byte{1, 2, 3, 4, 5, 6, 7}
+ copy(x[1:], x[:])
+ }
+ `,
+ neg: []string{"memmove"},
+ },
+ {
+ // Same as above but with different size
+ fn: `
+ func $() {
+ x := [...]byte{1, 2, 3, 4}
+ copy(x[1:], x[:])
+ }
+ `,
+ neg: []string{"memmove"},
},
}
var linuxS390XTests = []*asmTest{
{
- `
+ fn: `
func f0(b []byte) uint32 {
return binary.LittleEndian.Uint32(b)
}
`,
- []string{"\tMOVWBR\t\\(.*\\),"},
+ pos: []string{"\tMOVWBR\t\\(.*\\),"},
},
{
- `
+ fn: `
func f1(b []byte, i int) uint32 {
return binary.LittleEndian.Uint32(b[i:])
}
`,
- []string{"\tMOVWBR\t\\(.*\\)\\(.*\\*1\\),"},
+ pos: []string{"\tMOVWBR\t\\(.*\\)\\(.*\\*1\\),"},
},
{
- `
+ fn: `
func f2(b []byte) uint64 {
return binary.LittleEndian.Uint64(b)
}
`,
- []string{"\tMOVDBR\t\\(.*\\),"},
+ pos: []string{"\tMOVDBR\t\\(.*\\),"},
},
{
- `
+ fn: `
func f3(b []byte, i int) uint64 {
return binary.LittleEndian.Uint64(b[i:])
}
`,
- []string{"\tMOVDBR\t\\(.*\\)\\(.*\\*1\\),"},
+ pos: []string{"\tMOVDBR\t\\(.*\\)\\(.*\\*1\\),"},
},
{
- `
+ fn: `
func f4(b []byte) uint32 {
return binary.BigEndian.Uint32(b)
}
`,
- []string{"\tMOVWZ\t\\(.*\\),"},
+ pos: []string{"\tMOVWZ\t\\(.*\\),"},
},
{
- `
+ fn: `
func f5(b []byte, i int) uint32 {
return binary.BigEndian.Uint32(b[i:])
}
`,
- []string{"\tMOVWZ\t\\(.*\\)\\(.*\\*1\\),"},
+ pos: []string{"\tMOVWZ\t\\(.*\\)\\(.*\\*1\\),"},
},
{
- `
+ fn: `
func f6(b []byte) uint64 {
return binary.BigEndian.Uint64(b)
}
`,
- []string{"\tMOVD\t\\(.*\\),"},
+ pos: []string{"\tMOVD\t\\(.*\\),"},
},
{
- `
+ fn: `
func f7(b []byte, i int) uint64 {
return binary.BigEndian.Uint64(b[i:])
}
`,
- []string{"\tMOVD\t\\(.*\\)\\(.*\\*1\\),"},
+ pos: []string{"\tMOVD\t\\(.*\\)\\(.*\\*1\\),"},
},
{
- `
+ fn: `
func f8(x uint64) uint64 {
return x<<7 + x>>57
}
`,
- []string{"\tRLLG\t[$]7,"},
+ pos: []string{"\tRLLG\t[$]7,"},
},
{
- `
+ fn: `
func f9(x uint64) uint64 {
return x<<7 | x>>57
}
`,
- []string{"\tRLLG\t[$]7,"},
+ pos: []string{"\tRLLG\t[$]7,"},
},
{
- `
+ fn: `
func f10(x uint64) uint64 {
return x<<7 ^ x>>57
}
`,
- []string{"\tRLLG\t[$]7,"},
+ pos: []string{"\tRLLG\t[$]7,"},
},
{
- `
+ fn: `
func f11(x uint32) uint32 {
return x<<7 + x>>25
}
`,
- []string{"\tRLL\t[$]7,"},
+ pos: []string{"\tRLL\t[$]7,"},
},
{
- `
+ fn: `
func f12(x uint32) uint32 {
return x<<7 | x>>25
}
`,
- []string{"\tRLL\t[$]7,"},
+ pos: []string{"\tRLL\t[$]7,"},
},
{
- `
+ fn: `
func f13(x uint32) uint32 {
return x<<7 ^ x>>25
}
`,
- []string{"\tRLL\t[$]7,"},
+ pos: []string{"\tRLL\t[$]7,"},
},
// Fused multiply-add/sub instructions.
{
- `
+ fn: `
func f14(x, y, z float64) float64 {
return x * y + z
}
`,
- []string{"\tFMADD\t"},
+ pos: []string{"\tFMADD\t"},
},
{
- `
+ fn: `
func f15(x, y, z float64) float64 {
return x * y - z
}
`,
- []string{"\tFMSUB\t"},
+ pos: []string{"\tFMSUB\t"},
},
{
- `
+ fn: `
func f16(x, y, z float32) float32 {
return x * y + z
}
`,
- []string{"\tFMADDS\t"},
+ pos: []string{"\tFMADDS\t"},
},
{
- `
+ fn: `
func f17(x, y, z float32) float32 {
return x * y - z
}
`,
- []string{"\tFMSUBS\t"},
+ pos: []string{"\tFMSUBS\t"},
},
// Intrinsic tests for math/bits
{
- `
+ fn: `
func f18(a uint64) int {
return bits.TrailingZeros64(a)
}
`,
- []string{"\tFLOGR\t"},
+ pos: []string{"\tFLOGR\t"},
},
{
- `
+ fn: `
func f19(a uint32) int {
return bits.TrailingZeros32(a)
}
`,
- []string{"\tFLOGR\t", "\tMOVWZ\t"},
+ pos: []string{"\tFLOGR\t", "\tMOVWZ\t"},
},
{
- `
+ fn: `
func f20(a uint16) int {
return bits.TrailingZeros16(a)
}
`,
- []string{"\tFLOGR\t", "\tOR\t\\$65536,"},
+ pos: []string{"\tFLOGR\t", "\tOR\t\\$65536,"},
},
{
- `
+ fn: `
func f21(a uint8) int {
return bits.TrailingZeros8(a)
}
`,
- []string{"\tFLOGR\t", "\tOR\t\\$256,"},
+ pos: []string{"\tFLOGR\t", "\tOR\t\\$256,"},
},
// Intrinsic tests for math/bits
{
- `
+ fn: `
func f22(a uint64) uint64 {
return bits.ReverseBytes64(a)
}
`,
- []string{"\tMOVDBR\t"},
+ pos: []string{"\tMOVDBR\t"},
},
{
- `
+ fn: `
func f23(a uint32) uint32 {
return bits.ReverseBytes32(a)
}
`,
- []string{"\tMOVWBR\t"},
+ pos: []string{"\tMOVWBR\t"},
},
{
- `
+ fn: `
func f24(a uint64) int {
return bits.Len64(a)
}
`,
- []string{"\tFLOGR\t"},
+ pos: []string{"\tFLOGR\t"},
},
{
- `
+ fn: `
func f25(a uint32) int {
return bits.Len32(a)
}
`,
- []string{"\tFLOGR\t"},
+ pos: []string{"\tFLOGR\t"},
},
{
- `
+ fn: `
func f26(a uint16) int {
return bits.Len16(a)
}
`,
- []string{"\tFLOGR\t"},
+ pos: []string{"\tFLOGR\t"},
},
{
- `
+ fn: `
func f27(a uint8) int {
return bits.Len8(a)
}
`,
- []string{"\tFLOGR\t"},
+ pos: []string{"\tFLOGR\t"},
},
{
- `
+ fn: `
func f28(a uint) int {
return bits.Len(a)
}
`,
- []string{"\tFLOGR\t"},
+ pos: []string{"\tFLOGR\t"},
},
{
- `
+ fn: `
func f29(a uint64) int {
return bits.LeadingZeros64(a)
}
`,
- []string{"\tFLOGR\t"},
+ pos: []string{"\tFLOGR\t"},
},
{
- `
+ fn: `
func f30(a uint32) int {
return bits.LeadingZeros32(a)
}
`,
- []string{"\tFLOGR\t"},
+ pos: []string{"\tFLOGR\t"},
},
{
- `
+ fn: `
func f31(a uint16) int {
return bits.LeadingZeros16(a)
}
`,
- []string{"\tFLOGR\t"},
+ pos: []string{"\tFLOGR\t"},
},
{
- `
+ fn: `
func f32(a uint8) int {
return bits.LeadingZeros8(a)
}
`,
- []string{"\tFLOGR\t"},
+ pos: []string{"\tFLOGR\t"},
},
{
- `
+ fn: `
func f33(a uint) int {
return bits.LeadingZeros(a)
}
`,
- []string{"\tFLOGR\t"},
+ pos: []string{"\tFLOGR\t"},
+ },
+ // Intrinsic tests for math.
+ {
+ fn: `
+ func ceil(x float64) float64 {
+ return math.Ceil(x)
+ }
+ `,
+ pos: []string{"\tFIDBR\t[$]6"},
+ },
+ {
+ fn: `
+ func floor(x float64) float64 {
+ return math.Floor(x)
+ }
+ `,
+ pos: []string{"\tFIDBR\t[$]7"},
+ },
+ {
+ fn: `
+ func round(x float64) float64 {
+ return math.Round(x)
+ }
+ `,
+ pos: []string{"\tFIDBR\t[$]1"},
+ },
+ {
+ fn: `
+ func trunc(x float64) float64 {
+ return math.Trunc(x)
+ }
+ `,
+ pos: []string{"\tFIDBR\t[$]5"},
+ },
+ {
+ fn: `
+ func roundToEven(x float64) float64 {
+ return math.RoundToEven(x)
+ }
+ `,
+ pos: []string{"\tFIDBR\t[$]4"},
+ },
+ {
+ // check that stack store is optimized away
+ fn: `
+ func $() int {
+ var x int
+ return *(&x)
+ }
+ `,
+ pos: []string{"TEXT\t.*, [$]0-8"},
+ },
+ // Constant propagation through raw bits conversions.
+ {
+ // uint32 constant converted to float32 constant
+ fn: `
+ func $(x float32) float32 {
+ if x > math.Float32frombits(0x3f800000) {
+ return -x
+ }
+ return x
+ }
+ `,
+ pos: []string{"\tFMOVS\t[$]f32.3f800000\\(SB\\)"},
+ },
+ {
+ // float32 constant converted to uint32 constant
+ fn: `
+ func $(x uint32) uint32 {
+ if x > math.Float32bits(1) {
+ return -x
+ }
+ return x
+ }
+ `,
+ neg: []string{"\tFMOVS\t"},
+ },
+ // Constant propagation through float comparisons.
+ {
+ fn: `
+ func $() bool {
+ return 0.5 == float64(uint32(1)) ||
+ 1.5 > float64(uint64(1<<63)) ||
+ math.NaN() == math.NaN()
+ }
+ `,
+ pos: []string{"\tMOV(B|BZ|D)\t[$]0,"},
+ neg: []string{"\tFCMPU\t", "\tMOV(B|BZ|D)\t[$]1,"},
+ },
+ {
+ fn: `
+ func $() bool {
+ return float32(0.5) <= float32(int64(1)) &&
+ float32(1.5) >= float32(int32(-1<<31)) &&
+ float32(math.NaN()) != float32(math.NaN())
+ }
+ `,
+ pos: []string{"\tMOV(B|BZ|D)\t[$]1,"},
+ neg: []string{"\tCEBR\t", "\tMOV(B|BZ|D)\t[$]0,"},
+ },
+ // math tests
+ {
+ fn: `
+ func $(x float64) float64 {
+ return math.Abs(x)
+ }
+ `,
+ pos: []string{"\tLPDFR\t"},
+ neg: []string{"\tMOVD\t"}, // no integer loads/stores
+ },
+ {
+ fn: `
+ func $(x float32) float32 {
+ return float32(math.Abs(float64(x)))
+ }
+ `,
+ pos: []string{"\tLPDFR\t"},
+ neg: []string{"\tLDEBR\t", "\tLEDBR\t"}, // no float64 conversion
+ },
+ {
+ fn: `
+ func $(x float64) float64 {
+ return math.Float64frombits(math.Float64bits(x)|1<<63)
+ }
+ `,
+ pos: []string{"\tLNDFR\t"},
+ neg: []string{"\tMOVD\t"}, // no integer loads/stores
+ },
+ {
+ fn: `
+ func $(x float64) float64 {
+ return -math.Abs(x)
+ }
+ `,
+ pos: []string{"\tLNDFR\t"},
+ neg: []string{"\tMOVD\t"}, // no integer loads/stores
+ },
+ {
+ fn: `
+ func $(x, y float64) float64 {
+ return math.Copysign(x, y)
+ }
+ `,
+ pos: []string{"\tCPSDR\t"},
+ neg: []string{"\tMOVD\t"}, // no integer loads/stores
+ },
+ {
+ fn: `
+ func $(x float64) float64 {
+ return math.Copysign(x, -1)
+ }
+ `,
+ pos: []string{"\tLNDFR\t"},
+ neg: []string{"\tMOVD\t"}, // no integer loads/stores
+ },
+ {
+ fn: `
+ func $(x float64) float64 {
+ return math.Copysign(-1, x)
+ }
+ `,
+ pos: []string{"\tCPSDR\t"},
+ neg: []string{"\tMOVD\t"}, // no integer loads/stores
},
}
var linuxARMTests = []*asmTest{
+ // multiplication by powers of two
{
- `
+ fn: `
+ func $(n int) int {
+ return 16*n
+ }
+ `,
+ pos: []string{"\tSLL\t[$]4"},
+ neg: []string{"\tMUL\t"},
+ },
+ {
+ fn: `
+ func $(n int) int {
+ return -32*n
+ }
+ `,
+ pos: []string{"\tSLL\t[$]5"},
+ neg: []string{"\tMUL\t"},
+ },
+
+ {
+ fn: `
func f0(x uint32) uint32 {
return x<<7 + x>>25
}
`,
- []string{"\tMOVW\tR[0-9]+@>25,"},
+ pos: []string{"\tMOVW\tR[0-9]+@>25,"},
},
{
- `
+ fn: `
func f1(x uint32) uint32 {
return x<<7 | x>>25
}
`,
- []string{"\tMOVW\tR[0-9]+@>25,"},
+ pos: []string{"\tMOVW\tR[0-9]+@>25,"},
},
{
- `
+ fn: `
func f2(x uint32) uint32 {
return x<<7 ^ x>>25
}
`,
- []string{"\tMOVW\tR[0-9]+@>25,"},
+ pos: []string{"\tMOVW\tR[0-9]+@>25,"},
},
{
- `
+ fn: `
func f3(a uint64) int {
return bits.Len64(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f4(a uint32) int {
return bits.Len32(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f5(a uint16) int {
return bits.Len16(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f6(a uint8) int {
return bits.Len8(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f7(a uint) int {
return bits.Len(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f8(a uint64) int {
return bits.LeadingZeros64(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f9(a uint32) int {
return bits.LeadingZeros32(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f10(a uint16) int {
return bits.LeadingZeros16(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f11(a uint8) int {
return bits.LeadingZeros8(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f12(a uint) int {
return bits.LeadingZeros(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
// make sure assembly output has matching offset and base register.
- `
+ fn: `
func f13(a, b int) int {
- var x [16]byte // use some frame
- _ = x
+ runtime.GC() // use some frame
return b
}
`,
- []string{"b\\+4\\(FP\\)"},
+ pos: []string{"b\\+4\\(FP\\)"},
+ },
+ {
+ // check that stack store is optimized away
+ fn: `
+ func $() int {
+ var x int
+ return *(&x)
+ }
+ `,
+ pos: []string{"TEXT\t.*, [$]-4-4"},
},
}
var linuxARM64Tests = []*asmTest{
+ // multiplication by powers of two
{
- `
+ fn: `
+ func $(n int) int {
+ return 64*n
+ }
+ `,
+ pos: []string{"\tLSL\t[$]6"},
+ neg: []string{"\tMUL\t"},
+ },
+ {
+ fn: `
+ func $(n int) int {
+ return -128*n
+ }
+ `,
+ pos: []string{"\tLSL\t[$]7"},
+ neg: []string{"\tMUL\t"},
+ },
+
+ {
+ fn: `
func f0(x uint64) uint64 {
return x<<7 + x>>57
}
`,
- []string{"\tROR\t[$]57,"},
+ pos: []string{"\tROR\t[$]57,"},
},
{
- `
+ fn: `
func f1(x uint64) uint64 {
return x<<7 | x>>57
}
`,
- []string{"\tROR\t[$]57,"},
+ pos: []string{"\tROR\t[$]57,"},
},
{
- `
+ fn: `
func f2(x uint64) uint64 {
return x<<7 ^ x>>57
}
`,
- []string{"\tROR\t[$]57,"},
+ pos: []string{"\tROR\t[$]57,"},
},
{
- `
+ fn: `
func f3(x uint32) uint32 {
return x<<7 + x>>25
}
`,
- []string{"\tRORW\t[$]25,"},
+ pos: []string{"\tRORW\t[$]25,"},
},
{
- `
+ fn: `
func f4(x uint32) uint32 {
return x<<7 | x>>25
}
`,
- []string{"\tRORW\t[$]25,"},
+ pos: []string{"\tRORW\t[$]25,"},
},
{
- `
+ fn: `
func f5(x uint32) uint32 {
return x<<7 ^ x>>25
}
`,
- []string{"\tRORW\t[$]25,"},
+ pos: []string{"\tRORW\t[$]25,"},
},
{
- `
+ fn: `
func f22(a uint64) uint64 {
return bits.ReverseBytes64(a)
}
`,
- []string{"\tREV\t"},
+ pos: []string{"\tREV\t"},
},
{
- `
+ fn: `
func f23(a uint32) uint32 {
return bits.ReverseBytes32(a)
}
`,
- []string{"\tREVW\t"},
+ pos: []string{"\tREVW\t"},
},
{
- `
+ fn: `
func f24(a uint64) int {
return bits.Len64(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f25(a uint32) int {
return bits.Len32(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f26(a uint16) int {
return bits.Len16(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f27(a uint8) int {
return bits.Len8(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f28(a uint) int {
return bits.Len(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f29(a uint64) int {
return bits.LeadingZeros64(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f30(a uint32) int {
return bits.LeadingZeros32(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f31(a uint16) int {
return bits.LeadingZeros16(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f32(a uint8) int {
return bits.LeadingZeros8(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f33(a uint) int {
return bits.LeadingZeros(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f34(a uint64) uint64 {
return a & ((1<<63)-1)
}
`,
- []string{"\tAND\t"},
+ pos: []string{"\tAND\t"},
},
{
- `
+ fn: `
func f35(a uint64) uint64 {
return a & (1<<63)
}
`,
- []string{"\tAND\t"},
+ pos: []string{"\tAND\t"},
},
{
// make sure offsets are folded into load and store.
- `
+ fn: `
func f36(_, a [20]byte) (b [20]byte) {
b = a
return
}
`,
- []string{"\tMOVD\t\"\"\\.a\\+[0-9]+\\(FP\\), R[0-9]+", "\tMOVD\tR[0-9]+, \"\"\\.b\\+[0-9]+\\(FP\\)"},
+ pos: []string{"\tMOVD\t\"\"\\.a\\+[0-9]+\\(FP\\), R[0-9]+", "\tMOVD\tR[0-9]+, \"\"\\.b\\+[0-9]+\\(FP\\)"},
+ },
+ {
+ // check that stack store is optimized away
+ fn: `
+ func $() int {
+ var x int
+ return *(&x)
+ }
+ `,
+ pos: []string{"TEXT\t.*, [$]-8-8"},
+ },
+ {
+ // check that we don't emit comparisons for constant shift
+ fn: `
+//go:nosplit
+ func $(x int) int {
+ return x << 17
+ }
+ `,
+ pos: []string{"LSL\t\\$17"},
+ neg: []string{"CMP"},
},
}
var linuxMIPSTests = []*asmTest{
{
- `
+ fn: `
func f0(a uint64) int {
return bits.Len64(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f1(a uint32) int {
return bits.Len32(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f2(a uint16) int {
return bits.Len16(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f3(a uint8) int {
return bits.Len8(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f4(a uint) int {
return bits.Len(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f5(a uint64) int {
return bits.LeadingZeros64(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f6(a uint32) int {
return bits.LeadingZeros32(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f7(a uint16) int {
return bits.LeadingZeros16(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f8(a uint8) int {
return bits.LeadingZeros8(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
},
{
- `
+ fn: `
func f9(a uint) int {
return bits.LeadingZeros(a)
}
`,
- []string{"\tCLZ\t"},
+ pos: []string{"\tCLZ\t"},
+ },
+ {
+ // check that stack store is optimized away
+ fn: `
+ func $() int {
+ var x int
+ return *(&x)
+ }
+ `,
+ pos: []string{"TEXT\t.*, [$]-4-4"},
+ },
+}
+
+var linuxMIPS64Tests = []*asmTest{
+ {
+ // check that we don't emit comparisons for constant shift
+ fn: `
+ func $(x int) int {
+ return x << 17
+ }
+ `,
+ pos: []string{"SLLV\t\\$17"},
+ neg: []string{"SGT"},
},
}
var linuxPPC64LETests = []*asmTest{
// Fused multiply-add/sub instructions.
{
- `
+ fn: `
func f0(x, y, z float64) float64 {
return x * y + z
}
`,
- []string{"\tFMADD\t"},
+ pos: []string{"\tFMADD\t"},
},
{
- `
+ fn: `
func f1(x, y, z float64) float64 {
return x * y - z
}
`,
- []string{"\tFMSUB\t"},
+ pos: []string{"\tFMSUB\t"},
},
{
- `
+ fn: `
func f2(x, y, z float32) float32 {
return x * y + z
}
`,
- []string{"\tFMADDS\t"},
+ pos: []string{"\tFMADDS\t"},
},
{
- `
+ fn: `
func f3(x, y, z float32) float32 {
return x * y - z
}
`,
- []string{"\tFMSUBS\t"},
+ pos: []string{"\tFMSUBS\t"},
},
{
- `
+ fn: `
func f4(x uint32) uint32 {
return x<<7 | x>>25
}
`,
- []string{"\tROTLW\t"},
+ pos: []string{"\tROTLW\t"},
},
{
- `
+ fn: `
func f5(x uint32) uint32 {
return x<<7 + x>>25
}
`,
- []string{"\tROTLW\t"},
+ pos: []string{"\tROTLW\t"},
},
{
- `
+ fn: `
func f6(x uint32) uint32 {
return x<<7 ^ x>>25
}
`,
- []string{"\tROTLW\t"},
+ pos: []string{"\tROTLW\t"},
},
{
- `
+ fn: `
func f7(x uint64) uint64 {
return x<<7 | x>>57
}
`,
- []string{"\tROTL\t"},
+ pos: []string{"\tROTL\t"},
},
{
- `
+ fn: `
func f8(x uint64) uint64 {
return x<<7 + x>>57
}
`,
- []string{"\tROTL\t"},
+ pos: []string{"\tROTL\t"},
},
{
- `
+ fn: `
func f9(x uint64) uint64 {
return x<<7 ^ x>>57
}
`,
- []string{"\tROTL\t"},
+ pos: []string{"\tROTL\t"},
+ },
+ {
+ fn: `
+ func f10(a uint32) uint32 {
+ return bits.RotateLeft32(a, 9)
+ }
+ `,
+ pos: []string{"\tROTLW\t"},
+ },
+ {
+ fn: `
+ func f11(a uint64) uint64 {
+ return bits.RotateLeft64(a, 37)
+ }
+ `,
+ pos: []string{"\tROTL\t"},
+ },
+
+ {
+ fn: `
+ func f12(a, b float64) float64 {
+ return math.Copysign(a, b)
+ }
+ `,
+ pos: []string{"\tFCPSGN\t"},
+ },
+
+ {
+ fn: `
+ func f13(a float64) float64 {
+ return math.Abs(a)
+ }
+ `,
+ pos: []string{"\tFABS\t"},
+ },
+
+ {
+ fn: `
+ func f14(b []byte) uint16 {
+ return binary.LittleEndian.Uint16(b)
+ }
+ `,
+ pos: []string{"\tMOVHZ\t"},
+ },
+ {
+ fn: `
+ func f15(b []byte) uint32 {
+ return binary.LittleEndian.Uint32(b)
+ }
+ `,
+ pos: []string{"\tMOVWZ\t"},
+ },
+
+ {
+ fn: `
+ func f16(b []byte) uint64 {
+ return binary.LittleEndian.Uint64(b)
+ }
+ `,
+ pos: []string{"\tMOVD\t"},
+ neg: []string{"MOVBZ", "MOVHZ", "MOVWZ"},
+ },
+
+ {
+ fn: `
+ func f17(b []byte, v uint16) {
+ binary.LittleEndian.PutUint16(b, v)
+ }
+ `,
+ pos: []string{"\tMOVH\t"},
+ },
+
+ {
+ fn: `
+ func f18(b []byte, v uint32) {
+ binary.LittleEndian.PutUint32(b, v)
+ }
+ `,
+ pos: []string{"\tMOVW\t"},
+ },
+
+ {
+ fn: `
+ func f19(b []byte, v uint64) {
+ binary.LittleEndian.PutUint64(b, v)
+ }
+ `,
+ pos: []string{"\tMOVD\t"},
+ neg: []string{"MOVB", "MOVH", "MOVW"},
+ },
+
+ {
+ // check that stack store is optimized away
+ fn: `
+ func $() int {
+ var x int
+ return *(&x)
+ }
+ `,
+ pos: []string{"TEXT\t.*, [$]0-8"},
+ },
+ // Constant propagation through raw bits conversions.
+ {
+ // uint32 constant converted to float32 constant
+ fn: `
+ func $(x float32) float32 {
+ if x > math.Float32frombits(0x3f800000) {
+ return -x
+ }
+ return x
+ }
+ `,
+ pos: []string{"\tFMOVS\t[$]f32.3f800000\\(SB\\)"},
+ },
+ {
+ // float32 constant converted to uint32 constant
+ fn: `
+ func $(x uint32) uint32 {
+ if x > math.Float32bits(1) {
+ return -x
+ }
+ return x
+ }
+ `,
+ neg: []string{"\tFMOVS\t"},
+ },
+}
+
+var plan9AMD64Tests = []*asmTest{
+ // We should make sure that the compiler doesn't generate floating point
+ // instructions for non-float operations on Plan 9, because floating point
+ // operations are not allowed in the note handler.
+ // Array zeroing.
+ {
+ fn: `
+ func $() [16]byte {
+ var a [16]byte
+ return a
+ }
+ `,
+ pos: []string{"\tMOVQ\t\\$0, \"\""},
+ },
+ // Array copy.
+ {
+ fn: `
+ func $(a [16]byte) (b [16]byte) {
+ b = a
+ return
+ }
+ `,
+ pos: []string{"\tMOVQ\t\"\"\\.a\\+[0-9]+\\(SP\\), (AX|CX)", "\tMOVQ\t(AX|CX), \"\"\\.b\\+[0-9]+\\(SP\\)"},
},
}
diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go
index 3ac81367048..52ee4defc2a 100644
--- a/src/cmd/compile/internal/gc/bexport.go
+++ b/src/cmd/compile/internal/gc/bexport.go
@@ -174,6 +174,8 @@ type exporter struct {
typIndex map[*types.Type]int
funcList []*Func
+ marked map[*types.Type]bool // types already seen by markType
+
// position encoding
posInfoFormat bool
prevFile string
@@ -230,6 +232,23 @@ func export(out *bufio.Writer, trace bool) int {
p.tracef("\n")
}
+ // Mark all inlineable functions that the importer could call.
+ // This is done by tracking down all inlineable methods
+ // reachable from exported types.
+ p.marked = make(map[*types.Type]bool)
+ for _, n := range exportlist {
+ sym := n.Sym
+ if sym.Exported() {
+ // Closures are added to exportlist, but with Exported
+ // already set. The export code below skips over them, so
+ // we have to here as well.
+ // TODO(mdempsky): Investigate why. This seems suspicious.
+ continue
+ }
+ p.markType(asNode(sym.Def).Type)
+ }
+ p.marked = nil
+
// export objects
//
// First, export all exported (package-level) objects; i.e., all objects
@@ -377,6 +396,7 @@ func export(out *bufio.Writer, trace bool) int {
p.tracef("\n----\nfunc { %#v }\n", f.Inl)
}
p.int(i)
+ p.int(int(f.InlCost))
p.stmtList(f.Inl)
if p.trace {
p.tracef("\n")
@@ -435,6 +455,72 @@ func unidealType(typ *types.Type, val Val) *types.Type {
return typ
}
+// markType recursively visits types reachable from t to identify
+// functions whose inline bodies may be needed.
+func (p *exporter) markType(t *types.Type) {
+ if p.marked[t] {
+ return
+ }
+ p.marked[t] = true
+
+ // If this is a named type, mark all of its associated
+ // methods. Skip interface types because t.Methods contains
+ // only their unexpanded method set (i.e., exclusive of
+ // interface embeddings), and the switch statement below
+ // handles their full method set.
+ if t.Sym != nil && t.Etype != TINTER {
+ for _, m := range t.Methods().Slice() {
+ if exportname(m.Sym.Name) {
+ p.markType(m.Type)
+ }
+ }
+ }
+
+ // Recursively mark any types that can be produced given a
+ // value of type t: dereferencing a pointer; indexing an
+ // array, slice, or map; receiving from a channel; accessing a
+ // struct field or interface method; or calling a function.
+ //
+ // Notably, we don't mark map key or function parameter types,
+ // because the user already needs some way to construct values
+ // of those types.
+ //
+ // It's not critical for correctness that this algorithm is
+ // perfect. Worst case, we might miss opportunities to inline
+ // some function calls in downstream packages.
+ switch t.Etype {
+ case TPTR32, TPTR64, TARRAY, TSLICE, TCHAN:
+ p.markType(t.Elem())
+
+ case TMAP:
+ p.markType(t.Val())
+
+ case TSTRUCT:
+ for _, f := range t.FieldSlice() {
+ if exportname(f.Sym.Name) || f.Embedded != 0 {
+ p.markType(f.Type)
+ }
+ }
+
+ case TFUNC:
+ // If t is the type of a function or method, then
+ // t.Nname() is its ONAME. Mark its inline body and
+ // any recursively called functions for export.
+ inlFlood(asNode(t.Nname()))
+
+ for _, f := range t.Results().FieldSlice() {
+ p.markType(f.Type)
+ }
+
+ case TINTER:
+ for _, f := range t.FieldSlice() {
+ if exportname(f.Sym.Name) {
+ p.markType(f.Type)
+ }
+ }
+ }
+}
+
func (p *exporter) obj(sym *types.Sym) {
// Exported objects may be from different packages because they
// may be re-exported via an exported alias or as dependencies in
@@ -504,7 +590,7 @@ func (p *exporter) obj(sym *types.Sym) {
p.paramList(sig.Results(), inlineable)
var f *Func
- if inlineable {
+ if inlineable && asNode(sym.Def).Func.ExportInline() {
f = asNode(sym.Def).Func
// TODO(gri) re-examine reexportdeplist:
// Because we can trivially export types
@@ -590,10 +676,28 @@ func fileLine(n *Node) (file string, line int) {
}
func isInlineable(n *Node) bool {
- if exportInlined && n != nil && n.Func != nil && n.Func.Inl.Len() != 0 {
- // when lazily typechecking inlined bodies, some re-exported ones may not have been typechecked yet.
- // currently that can leave unresolved ONONAMEs in import-dot-ed packages in the wrong package
- if Debug['l'] < 2 {
+ if exportInlined && n != nil && n.Func != nil {
+ // When lazily typechecking inlined bodies, some
+ // re-exported ones may not have been typechecked yet.
+ // Currently that can leave unresolved ONONAMEs in
+ // import-dot-ed packages in the wrong package.
+ //
+ // TODO(mdempsky): Having the ExportInline check here
+ // instead of the outer if statement means we end up
+ // exporting parameter names even for functions whose
+ // inline body won't be exported by this package. This
+ // is currently necessary because we might first
+ // import a function/method from a package where it
+ // doesn't need to be re-exported, and then from a
+ // package where it does. If this happens, we'll need
+ // the parameter names.
+ //
+ // We could initially do without the parameter names,
+ // and then fill them in when importing the inline
+ // body. But parameter names are attached to the
+ // function type, and modifying types after the fact
+ // is a little sketchy.
+ if Debug_typecheckinl == 0 && n.Func.ExportInline() {
typecheckinl(n)
}
return true
@@ -601,8 +705,6 @@ func isInlineable(n *Node) bool {
return false
}
-var errorInterface *types.Type // lazily initialized
-
func (p *exporter) typ(t *types.Type) {
if t == nil {
Fatalf("exporter: nil type")
@@ -654,19 +756,7 @@ func (p *exporter) typ(t *types.Type) {
p.qualifiedName(tsym)
// write underlying type
- orig := t.Orig
- if orig == types.Errortype {
- // The error type is the only predeclared type which has
- // a composite underlying type. When we encode that type,
- // make sure to encode the underlying interface rather than
- // the named type again. See also the comment in universe.go
- // regarding the errortype and issue #15920.
- if errorInterface == nil {
- errorInterface = makeErrorInterface()
- }
- orig = errorInterface
- }
- p.typ(orig)
+ p.typ(t.Orig)
// interfaces don't have associated methods
if t.Orig.IsInterface() {
@@ -677,9 +767,7 @@ func (p *exporter) typ(t *types.Type) {
// TODO(gri) Determine if they are already sorted
// in which case we can drop this step.
var methods []*types.Field
- for _, m := range t.Methods().Slice() {
- methods = append(methods, m)
- }
+ methods = append(methods, t.Methods().Slice()...)
sort.Sort(methodbyname(methods))
p.int(len(methods))
@@ -708,7 +796,7 @@ func (p *exporter) typ(t *types.Type) {
p.bool(m.Nointerface()) // record go:nointerface pragma value (see also #16243)
var f *Func
- if inlineable {
+ if inlineable && mfn.Func.ExportInline() {
f = mfn.Func
reexportdeplist(mfn.Func.Inl)
}
@@ -968,18 +1056,17 @@ func parName(f *types.Field, numbered bool) string {
// Take the name from the original, lest we substituted it with ~r%d or ~b%d.
// ~r%d is a (formerly) unnamed result.
if asNode(f.Nname) != nil {
- if asNode(f.Nname).Orig != nil {
- s = asNode(f.Nname).Orig.Sym
- if s != nil && s.Name[0] == '~' {
- if s.Name[1] == 'r' { // originally an unnamed result
- return "" // s = nil
- } else if s.Name[1] == 'b' { // originally the blank identifier _
- return "_" // belongs to localpkg
- }
- }
- } else {
+ if asNode(f.Nname).Orig == nil {
return "" // s = nil
}
+ s = asNode(f.Nname).Orig.Sym
+ if s != nil && s.Name[0] == '~' {
+ if s.Name[1] == 'r' { // originally an unnamed result
+ return "" // s = nil
+ } else if s.Name[1] == 'b' { // originally the blank identifier _
+ return "_" // belongs to localpkg
+ }
+ }
}
if s == nil {
@@ -1204,26 +1291,10 @@ func (p *exporter) expr(n *Node) {
p.value(n.Val())
case ONAME:
- // Special case: name used as local variable in export.
- // _ becomes ~b%d internally; print as _ for export
- if n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' {
- p.op(ONAME)
- p.pos(n)
- p.string("_") // inlined and customized version of p.sym(n)
- break
- }
-
- if n.Sym != nil && !isblank(n) && n.Name.Vargen > 0 {
- p.op(ONAME)
- p.pos(n)
- p.sym(n)
- break
- }
-
// Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method,
// but for export, this should be rendered as (*pkg.T).meth.
// These nodes have the special property that they are names with a left OTYPE and a right ONAME.
- if n.Left != nil && n.Left.Op == OTYPE && n.Right != nil && n.Right.Op == ONAME {
+ if n.isMethodExpression() {
p.op(OXDOT)
p.pos(n)
p.expr(n.Left) // n.Left.Op == OTYPE
@@ -1241,11 +1312,7 @@ func (p *exporter) expr(n *Node) {
case OTYPE:
p.op(OTYPE)
p.pos(n)
- if p.bool(n.Type == nil) {
- p.sym(n)
- } else {
- p.typ(n.Type)
- }
+ p.typ(n.Type)
// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
// should have been resolved by typechecking - handled by default case
@@ -1345,7 +1412,7 @@ func (p *exporter) expr(n *Node) {
if op == OAPPEND {
p.bool(n.Isddd())
} else if n.Isddd() {
- Fatalf("exporter: unexpected '...' with %s call", opnames[op])
+ Fatalf("exporter: unexpected '...' with %v call", op)
}
case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG:
@@ -1520,8 +1587,8 @@ func (p *exporter) stmt(n *Node) {
p.stmtList(n.List)
p.stmtList(n.Nbody)
- case OFALL, OXFALL:
- p.op(OXFALL)
+ case OFALL:
+ p.op(OFALL)
p.pos(n)
case OBREAK, OCONTINUE:
diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go
index 29629620898..71d20ec37bd 100644
--- a/src/cmd/compile/internal/gc/bimport.go
+++ b/src/cmd/compile/internal/gc/bimport.go
@@ -187,7 +187,8 @@ func Import(imp *types.Pkg, in *bufio.Reader) {
// them only for functions with inlineable bodies. funchdr does
// parameter renaming which doesn't matter if we don't have a body.
- if f := p.funcList[i]; f != nil {
+ inlCost := p.int()
+ if f := p.funcList[i]; f != nil && f.Func.Inl.Len() == 0 {
// function not yet imported - read body and set it
funchdr(f)
body := p.stmtList()
@@ -200,7 +201,15 @@ func Import(imp *types.Pkg, in *bufio.Reader) {
body = []*Node{nod(OEMPTY, nil, nil)}
}
f.Func.Inl.Set(body)
- funcbody(f)
+ f.Func.InlCost = int32(inlCost)
+ if Debug['E'] > 0 && Debug['m'] > 2 && f.Func.Inl.Len() != 0 {
+ if Debug['m'] > 3 {
+ fmt.Printf("inl body for %v: %+v\n", f, f.Func.Inl)
+ } else {
+ fmt.Printf("inl body for %v: %v\n", f, f.Func.Inl)
+ }
+ }
+ funcbody()
} else {
// function already imported - read body but discard declarations
dclcontext = PDISCARD // throw away any declarations
@@ -326,55 +335,59 @@ func idealType(typ *types.Type) *types.Type {
func (p *importer) obj(tag int) {
switch tag {
case constTag:
- p.pos()
+ pos := p.pos()
sym := p.qualifiedName()
typ := p.typ()
val := p.value(typ)
- importconst(p.imp, sym, idealType(typ), nodlit(val))
+ importconst(p.imp, sym, idealType(typ), npos(pos, nodlit(val)))
case aliasTag:
- p.pos()
+ pos := p.pos()
sym := p.qualifiedName()
typ := p.typ()
- importalias(p.imp, sym, typ)
+ importalias(pos, p.imp, sym, typ)
case typeTag:
p.typ()
case varTag:
- p.pos()
+ pos := p.pos()
sym := p.qualifiedName()
typ := p.typ()
- importvar(p.imp, sym, typ)
+ importvar(pos, p.imp, sym, typ)
case funcTag:
- p.pos()
+ pos := p.pos()
sym := p.qualifiedName()
params := p.paramList()
result := p.paramList()
sig := functypefield(nil, params, result)
importsym(p.imp, sym, ONAME)
- if asNode(sym.Def) != nil && asNode(sym.Def).Op == ONAME {
+ if old := asNode(sym.Def); old != nil && old.Op == ONAME {
// function was imported before (via another import)
- if !eqtype(sig, asNode(sym.Def).Type) {
- p.formatErrorf("inconsistent definition for func %v during import\n\t%v\n\t%v", sym, asNode(sym.Def).Type, sig)
+ if !eqtype(sig, old.Type) {
+ p.formatErrorf("inconsistent definition for func %v during import\n\t%v\n\t%v", sym, old.Type, sig)
}
- p.funcList = append(p.funcList, nil)
+ n := asNode(old.Type.Nname())
+ p.funcList = append(p.funcList, n)
break
}
- n := newfuncname(sym)
+ n := newfuncnamel(pos, sym)
n.Type = sig
+ // TODO(mdempsky): Stop clobbering n.Pos in declare.
+ savedlineno := lineno
+ lineno = pos
declare(n, PFUNC)
+ lineno = savedlineno
p.funcList = append(p.funcList, n)
importlist = append(importlist, n)
+ sig.SetNname(asTypesNode(n))
+
if Debug['E'] > 0 {
fmt.Printf("import [%q] func %v \n", p.imp.Path, n)
- if Debug['m'] > 2 && n.Func.Inl.Len() != 0 {
- fmt.Printf("inl body: %v\n", n.Func.Inl)
- }
}
default:
@@ -479,15 +492,20 @@ func (p *importer) typ() *types.Type {
var t *types.Type
switch i {
case namedTag:
- p.pos()
+ pos := p.pos()
tsym := p.qualifiedName()
- t = pkgtype(p.imp, tsym)
+ t = pkgtype(pos, p.imp, tsym)
p.typList = append(p.typList, t)
+ dup := !t.IsKind(types.TFORW) // type already imported
// read underlying type
t0 := p.typ()
+ // TODO(mdempsky): Stop clobbering n.Pos in declare.
+ savedlineno := lineno
+ lineno = pos
p.importtype(t, t0)
+ lineno = savedlineno
// interfaces don't have associated methods
if t0.IsInterface() {
@@ -501,7 +519,7 @@ func (p *importer) typ() *types.Type {
// read associated methods
for i := p.int(); i > 0; i-- {
- p.pos()
+ mpos := p.pos()
sym := p.fieldSym()
// during import unexported method names should be in the type's package
@@ -514,10 +532,21 @@ func (p *importer) typ() *types.Type {
result := p.paramList()
nointerface := p.bool()
- n := newfuncname(methodname(sym, recv[0].Type))
- n.Type = functypefield(recv[0], params, result)
+ mt := functypefield(recv[0], params, result)
+ oldm := addmethod(sym, mt, false, nointerface)
+
+ if dup {
+ // An earlier import already declared this type and its methods.
+ // Discard the duplicate method declaration.
+ n := asNode(oldm.Type.Nname())
+ p.funcList = append(p.funcList, n)
+ continue
+ }
+
+ n := newfuncnamel(mpos, methodname(sym, recv[0].Type))
+ n.Type = mt
+ n.SetClass(PFUNC)
checkwidth(n.Type)
- addmethod(sym, n.Type, false, nointerface)
p.funcList = append(p.funcList, n)
importlist = append(importlist, n)
@@ -526,13 +555,10 @@ func (p *importer) typ() *types.Type {
// (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
// out by typecheck's lookdot as this $$.ttype. So by providing
// this back link here we avoid special casing there.
- n.Type.FuncType().Nname = asTypesNode(n)
+ mt.SetNname(asTypesNode(n))
if Debug['E'] > 0 {
fmt.Printf("import [%q] meth %v \n", p.imp.Path, n)
- if Debug['m'] > 2 && n.Func.Inl.Len() != 0 {
- fmt.Printf("inl body: %v\n", n.Func.Inl)
- }
}
}
@@ -616,7 +642,7 @@ func (p *importer) fieldList() (fields []*types.Field) {
}
func (p *importer) field() *types.Field {
- p.pos()
+ pos := p.pos()
sym, alias := p.fieldName()
typ := p.typ()
note := p.string()
@@ -636,7 +662,7 @@ func (p *importer) field() *types.Field {
}
f.Sym = sym
- f.Nname = asTypesNode(newname(sym))
+ f.Nname = asTypesNode(newnamel(pos, sym))
f.Type = typ
f.Note = note
@@ -660,14 +686,14 @@ func (p *importer) methodList() (methods []*types.Field) {
}
func (p *importer) method() *types.Field {
- p.pos()
+ pos := p.pos()
sym := p.methodName()
params := p.paramList()
result := p.paramList()
f := types.NewField()
f.Sym = sym
- f.Nname = asTypesNode(newname(sym))
+ f.Nname = asTypesNode(newnamel(pos, sym))
f.Type = functypefield(fakeRecvField(), params, result)
return f
}
@@ -922,10 +948,10 @@ func (p *importer) node() *Node {
// again. Re-introduce explicit uintptr(c) conversion.
// (issue 16317).
if typ.IsUnsafePtr() {
- n = nod(OCONV, n, nil)
+ n = nodl(pos, OCONV, n, nil)
n.Type = types.Types[TUINTPTR]
}
- n = nod(OCONV, n, nil)
+ n = nodl(pos, OCONV, n, nil)
n.Type = typ
}
return n
@@ -937,11 +963,7 @@ func (p *importer) node() *Node {
// unreachable - should have been resolved by typechecking
case OTYPE:
- pos := p.pos()
- if p.bool() {
- return npos(pos, mkname(p.sym()))
- }
- return npos(pos, typenod(p.typ()))
+ return npos(p.pos(), typenod(p.typ()))
// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
// unreachable - should have been resolved by typechecking
@@ -950,21 +972,26 @@ func (p *importer) node() *Node {
// unimplemented
case OPTRLIT:
- n := npos(p.pos(), p.expr())
+ pos := p.pos()
+ n := npos(pos, p.expr())
if !p.bool() /* !implicit, i.e. '&' operator */ {
if n.Op == OCOMPLIT {
// Special case for &T{...}: turn into (*T){...}.
- n.Right = nod(OIND, n.Right, nil)
+ n.Right = nodl(pos, OIND, n.Right, nil)
n.Right.SetImplicit(true)
} else {
- n = nod(OADDR, n, nil)
+ n = nodl(pos, OADDR, n, nil)
}
}
return n
case OSTRUCTLIT:
- n := nodl(p.pos(), OCOMPLIT, nil, typenod(p.typ()))
+ // TODO(mdempsky): Export position information for OSTRUCTKEY nodes.
+ savedlineno := lineno
+ lineno = p.pos()
+ n := nodl(lineno, OCOMPLIT, nil, typenod(p.typ()))
n.List.Set(p.elemList()) // special handling of field names
+ lineno = savedlineno
return n
// case OARRAYLIT, OSLICELIT, OMAPLIT:
@@ -1128,62 +1155,50 @@ func (p *importer) node() *Node {
return nodl(p.pos(), op, p.expr(), nil)
case OIF:
- types.Markdcl()
n := nodl(p.pos(), OIF, nil, nil)
n.Ninit.Set(p.stmtList())
n.Left = p.expr()
n.Nbody.Set(p.stmtList())
n.Rlist.Set(p.stmtList())
- types.Popdcl()
return n
case OFOR:
- types.Markdcl()
n := nodl(p.pos(), OFOR, nil, nil)
n.Ninit.Set(p.stmtList())
n.Left, n.Right = p.exprsOrNil()
n.Nbody.Set(p.stmtList())
- types.Popdcl()
return n
case ORANGE:
- types.Markdcl()
n := nodl(p.pos(), ORANGE, nil, nil)
n.List.Set(p.stmtList())
n.Right = p.expr()
n.Nbody.Set(p.stmtList())
- types.Popdcl()
return n
case OSELECT, OSWITCH:
- types.Markdcl()
n := nodl(p.pos(), op, nil, nil)
n.Ninit.Set(p.stmtList())
n.Left, _ = p.exprsOrNil()
n.List.Set(p.stmtList())
- types.Popdcl()
return n
// case OCASE, OXCASE:
// unreachable - mapped to OXCASE case below by exporter
case OXCASE:
- types.Markdcl()
n := nodl(p.pos(), OXCASE, nil, nil)
- n.Xoffset = int64(types.Block)
n.List.Set(p.exprList())
// TODO(gri) eventually we must declare variables for type switch
// statements (type switch statements are not yet exported)
n.Nbody.Set(p.stmtList())
- types.Popdcl()
return n
// case OFALL:
// unreachable - mapped to OXFALL case below by exporter
- case OXFALL:
- n := nodl(p.pos(), OXFALL, nil, nil)
- n.Xoffset = int64(types.Block)
+ case OFALL:
+ n := nodl(p.pos(), OFALL, nil, nil)
return n
case OBREAK, OCONTINUE:
diff --git a/src/cmd/compile/internal/gc/bitset.go b/src/cmd/compile/internal/gc/bitset.go
index 90babd5a9f6..ed5eea0a11b 100644
--- a/src/cmd/compile/internal/gc/bitset.go
+++ b/src/cmd/compile/internal/gc/bitset.go
@@ -14,6 +14,16 @@ func (f *bitset8) set(mask uint8, b bool) {
}
}
+type bitset16 uint16
+
+func (f *bitset16) set(mask uint16, b bool) {
+ if b {
+ *(*uint16)(f) |= mask
+ } else {
+ *(*uint16)(f) &^= mask
+ }
+}
+
type bitset32 uint32
func (f *bitset32) set(mask uint32, b bool) {
diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go
index f21a4da4913..0733a460d5b 100644
--- a/src/cmd/compile/internal/gc/builtin.go
+++ b/src/cmd/compile/internal/gc/builtin.go
@@ -39,116 +39,122 @@ var runtimeDecls = [...]struct {
{"concatstring5", funcTag, 29},
{"concatstrings", funcTag, 31},
{"cmpstring", funcTag, 33},
- {"eqstring", funcTag, 34},
- {"intstring", funcTag, 37},
- {"slicebytetostring", funcTag, 39},
- {"slicebytetostringtmp", funcTag, 40},
- {"slicerunetostring", funcTag, 43},
- {"stringtoslicebyte", funcTag, 44},
- {"stringtoslicerune", funcTag, 47},
- {"decoderune", funcTag, 48},
- {"slicecopy", funcTag, 50},
- {"slicestringcopy", funcTag, 51},
- {"convI2I", funcTag, 52},
- {"convT2E", funcTag, 53},
- {"convT2E16", funcTag, 53},
- {"convT2E32", funcTag, 53},
- {"convT2E64", funcTag, 53},
- {"convT2Estring", funcTag, 53},
- {"convT2Eslice", funcTag, 53},
- {"convT2Enoptr", funcTag, 53},
- {"convT2I", funcTag, 53},
- {"convT2I16", funcTag, 53},
- {"convT2I32", funcTag, 53},
- {"convT2I64", funcTag, 53},
- {"convT2Istring", funcTag, 53},
- {"convT2Islice", funcTag, 53},
- {"convT2Inoptr", funcTag, 53},
- {"assertE2I", funcTag, 52},
- {"assertE2I2", funcTag, 54},
- {"assertI2I", funcTag, 52},
- {"assertI2I2", funcTag, 54},
- {"panicdottypeE", funcTag, 55},
- {"panicdottypeI", funcTag, 55},
- {"panicnildottype", funcTag, 56},
- {"ifaceeq", funcTag, 59},
- {"efaceeq", funcTag, 59},
- {"makemap", funcTag, 61},
- {"mapaccess1", funcTag, 62},
- {"mapaccess1_fast32", funcTag, 63},
- {"mapaccess1_fast64", funcTag, 63},
- {"mapaccess1_faststr", funcTag, 63},
- {"mapaccess1_fat", funcTag, 64},
- {"mapaccess2", funcTag, 65},
- {"mapaccess2_fast32", funcTag, 66},
- {"mapaccess2_fast64", funcTag, 66},
- {"mapaccess2_faststr", funcTag, 66},
- {"mapaccess2_fat", funcTag, 67},
- {"mapassign", funcTag, 62},
- {"mapassign_fast32", funcTag, 63},
- {"mapassign_fast64", funcTag, 63},
- {"mapassign_faststr", funcTag, 63},
- {"mapiterinit", funcTag, 68},
- {"mapdelete", funcTag, 68},
- {"mapdelete_fast32", funcTag, 69},
- {"mapdelete_fast64", funcTag, 69},
- {"mapdelete_faststr", funcTag, 69},
- {"mapiternext", funcTag, 70},
- {"makechan", funcTag, 72},
- {"chanrecv1", funcTag, 74},
- {"chanrecv2", funcTag, 75},
- {"chansend1", funcTag, 77},
+ {"intstring", funcTag, 36},
+ {"slicebytetostring", funcTag, 38},
+ {"slicebytetostringtmp", funcTag, 39},
+ {"slicerunetostring", funcTag, 42},
+ {"stringtoslicebyte", funcTag, 43},
+ {"stringtoslicerune", funcTag, 46},
+ {"decoderune", funcTag, 47},
+ {"slicecopy", funcTag, 49},
+ {"slicestringcopy", funcTag, 50},
+ {"convI2I", funcTag, 51},
+ {"convT2E", funcTag, 52},
+ {"convT2E16", funcTag, 52},
+ {"convT2E32", funcTag, 52},
+ {"convT2E64", funcTag, 52},
+ {"convT2Estring", funcTag, 52},
+ {"convT2Eslice", funcTag, 52},
+ {"convT2Enoptr", funcTag, 52},
+ {"convT2I", funcTag, 52},
+ {"convT2I16", funcTag, 52},
+ {"convT2I32", funcTag, 52},
+ {"convT2I64", funcTag, 52},
+ {"convT2Istring", funcTag, 52},
+ {"convT2Islice", funcTag, 52},
+ {"convT2Inoptr", funcTag, 52},
+ {"assertE2I", funcTag, 51},
+ {"assertE2I2", funcTag, 53},
+ {"assertI2I", funcTag, 51},
+ {"assertI2I2", funcTag, 53},
+ {"panicdottypeE", funcTag, 54},
+ {"panicdottypeI", funcTag, 54},
+ {"panicnildottype", funcTag, 55},
+ {"ifaceeq", funcTag, 58},
+ {"efaceeq", funcTag, 58},
+ {"fastrand", funcTag, 60},
+ {"makemap64", funcTag, 62},
+ {"makemap", funcTag, 63},
+ {"makemap_small", funcTag, 64},
+ {"mapaccess1", funcTag, 65},
+ {"mapaccess1_fast32", funcTag, 66},
+ {"mapaccess1_fast64", funcTag, 66},
+ {"mapaccess1_faststr", funcTag, 66},
+ {"mapaccess1_fat", funcTag, 67},
+ {"mapaccess2", funcTag, 68},
+ {"mapaccess2_fast32", funcTag, 69},
+ {"mapaccess2_fast64", funcTag, 69},
+ {"mapaccess2_faststr", funcTag, 69},
+ {"mapaccess2_fat", funcTag, 70},
+ {"mapassign", funcTag, 65},
+ {"mapassign_fast32", funcTag, 66},
+ {"mapassign_fast32ptr", funcTag, 66},
+ {"mapassign_fast64", funcTag, 66},
+ {"mapassign_fast64ptr", funcTag, 66},
+ {"mapassign_faststr", funcTag, 66},
+ {"mapiterinit", funcTag, 71},
+ {"mapdelete", funcTag, 71},
+ {"mapdelete_fast32", funcTag, 72},
+ {"mapdelete_fast64", funcTag, 72},
+ {"mapdelete_faststr", funcTag, 72},
+ {"mapiternext", funcTag, 73},
+ {"makechan64", funcTag, 75},
+ {"makechan", funcTag, 76},
+ {"chanrecv1", funcTag, 78},
+ {"chanrecv2", funcTag, 79},
+ {"chansend1", funcTag, 81},
{"closechan", funcTag, 23},
- {"writeBarrier", varTag, 79},
- {"writebarrierptr", funcTag, 80},
- {"typedmemmove", funcTag, 81},
- {"typedmemclr", funcTag, 82},
- {"typedslicecopy", funcTag, 83},
- {"selectnbsend", funcTag, 84},
- {"selectnbrecv", funcTag, 85},
- {"selectnbrecv2", funcTag, 87},
- {"newselect", funcTag, 88},
- {"selectsend", funcTag, 89},
- {"selectrecv", funcTag, 90},
- {"selectdefault", funcTag, 56},
- {"selectgo", funcTag, 91},
+ {"writeBarrier", varTag, 83},
+ {"writebarrierptr", funcTag, 84},
+ {"typedmemmove", funcTag, 85},
+ {"typedmemclr", funcTag, 86},
+ {"typedslicecopy", funcTag, 87},
+ {"selectnbsend", funcTag, 88},
+ {"selectnbrecv", funcTag, 89},
+ {"selectnbrecv2", funcTag, 91},
+ {"newselect", funcTag, 92},
+ {"selectsend", funcTag, 93},
+ {"selectrecv", funcTag, 94},
+ {"selectdefault", funcTag, 55},
+ {"selectgo", funcTag, 95},
{"block", funcTag, 5},
- {"makeslice", funcTag, 93},
- {"makeslice64", funcTag, 94},
- {"growslice", funcTag, 95},
- {"memmove", funcTag, 96},
- {"memclrNoHeapPointers", funcTag, 97},
- {"memclrHasPointers", funcTag, 97},
- {"memequal", funcTag, 98},
- {"memequal8", funcTag, 99},
- {"memequal16", funcTag, 99},
- {"memequal32", funcTag, 99},
- {"memequal64", funcTag, 99},
- {"memequal128", funcTag, 99},
- {"int64div", funcTag, 100},
- {"uint64div", funcTag, 101},
- {"int64mod", funcTag, 100},
- {"uint64mod", funcTag, 101},
- {"float64toint64", funcTag, 102},
- {"float64touint64", funcTag, 103},
- {"float64touint32", funcTag, 105},
- {"int64tofloat64", funcTag, 106},
- {"uint64tofloat64", funcTag, 107},
- {"uint32tofloat64", funcTag, 108},
- {"complex128div", funcTag, 109},
- {"racefuncenter", funcTag, 110},
+ {"makeslice", funcTag, 97},
+ {"makeslice64", funcTag, 98},
+ {"growslice", funcTag, 99},
+ {"memmove", funcTag, 100},
+ {"memclrNoHeapPointers", funcTag, 101},
+ {"memclrHasPointers", funcTag, 101},
+ {"memequal", funcTag, 102},
+ {"memequal8", funcTag, 103},
+ {"memequal16", funcTag, 103},
+ {"memequal32", funcTag, 103},
+ {"memequal64", funcTag, 103},
+ {"memequal128", funcTag, 103},
+ {"int64div", funcTag, 104},
+ {"uint64div", funcTag, 105},
+ {"int64mod", funcTag, 104},
+ {"uint64mod", funcTag, 105},
+ {"float64toint64", funcTag, 106},
+ {"float64touint64", funcTag, 107},
+ {"float64touint32", funcTag, 108},
+ {"int64tofloat64", funcTag, 109},
+ {"uint64tofloat64", funcTag, 110},
+ {"uint32tofloat64", funcTag, 111},
+ {"complex128div", funcTag, 112},
+ {"racefuncenter", funcTag, 113},
{"racefuncexit", funcTag, 5},
- {"raceread", funcTag, 110},
- {"racewrite", funcTag, 110},
- {"racereadrange", funcTag, 111},
- {"racewriterange", funcTag, 111},
- {"msanread", funcTag, 111},
- {"msanwrite", funcTag, 111},
+ {"raceread", funcTag, 113},
+ {"racewrite", funcTag, 113},
+ {"racereadrange", funcTag, 114},
+ {"racewriterange", funcTag, 114},
+ {"msanread", funcTag, 114},
+ {"msanwrite", funcTag, 114},
{"support_popcnt", varTag, 11},
+ {"support_sse41", varTag, 11},
}
func runtimeTypes() []*types.Type {
- var typs [112]*types.Type
+ var typs [115]*types.Type
typs[0] = types.Bytetype
typs[1] = types.NewPtr(typs[0])
typs[2] = types.Types[TANY]
@@ -183,83 +189,86 @@ func runtimeTypes() []*types.Type {
typs[31] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[30])}, []*Node{anonfield(typs[21])})
typs[32] = types.Types[TINT]
typs[33] = functype(nil, []*Node{anonfield(typs[21]), anonfield(typs[21])}, []*Node{anonfield(typs[32])})
- typs[34] = functype(nil, []*Node{anonfield(typs[21]), anonfield(typs[21])}, []*Node{anonfield(typs[11])})
- typs[35] = types.NewArray(typs[0], 4)
- typs[36] = types.NewPtr(typs[35])
- typs[37] = functype(nil, []*Node{anonfield(typs[36]), anonfield(typs[15])}, []*Node{anonfield(typs[21])})
- typs[38] = types.NewSlice(typs[0])
- typs[39] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[38])}, []*Node{anonfield(typs[21])})
- typs[40] = functype(nil, []*Node{anonfield(typs[38])}, []*Node{anonfield(typs[21])})
- typs[41] = types.Runetype
- typs[42] = types.NewSlice(typs[41])
- typs[43] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[42])}, []*Node{anonfield(typs[21])})
- typs[44] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[21])}, []*Node{anonfield(typs[38])})
- typs[45] = types.NewArray(typs[41], 32)
- typs[46] = types.NewPtr(typs[45])
- typs[47] = functype(nil, []*Node{anonfield(typs[46]), anonfield(typs[21])}, []*Node{anonfield(typs[42])})
- typs[48] = functype(nil, []*Node{anonfield(typs[21]), anonfield(typs[32])}, []*Node{anonfield(typs[41]), anonfield(typs[32])})
- typs[49] = types.Types[TUINTPTR]
- typs[50] = functype(nil, []*Node{anonfield(typs[2]), anonfield(typs[2]), anonfield(typs[49])}, []*Node{anonfield(typs[32])})
- typs[51] = functype(nil, []*Node{anonfield(typs[2]), anonfield(typs[2])}, []*Node{anonfield(typs[32])})
- typs[52] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
- typs[53] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
- typs[54] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[11])})
- typs[55] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
- typs[56] = functype(nil, []*Node{anonfield(typs[1])}, nil)
- typs[57] = types.NewPtr(typs[49])
- typs[58] = types.Types[TUNSAFEPTR]
- typs[59] = functype(nil, []*Node{anonfield(typs[57]), anonfield(typs[58]), anonfield(typs[58])}, []*Node{anonfield(typs[11])})
- typs[60] = types.NewMap(typs[2], typs[2])
- typs[61] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[60])})
- typs[62] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
- typs[63] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
- typs[64] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
- typs[65] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[11])})
- typs[66] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[11])})
- typs[67] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[11])})
- typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[3])}, nil)
- typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[2])}, nil)
- typs[70] = functype(nil, []*Node{anonfield(typs[3])}, nil)
- typs[71] = types.NewChan(typs[2], types.Cboth)
- typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[71])})
- typs[73] = types.NewChan(typs[2], types.Crecv)
- typs[74] = functype(nil, []*Node{anonfield(typs[73]), anonfield(typs[3])}, nil)
- typs[75] = functype(nil, []*Node{anonfield(typs[73]), anonfield(typs[3])}, []*Node{anonfield(typs[11])})
- typs[76] = types.NewChan(typs[2], types.Csend)
- typs[77] = functype(nil, []*Node{anonfield(typs[76]), anonfield(typs[3])}, nil)
- typs[78] = types.NewArray(typs[0], 3)
- typs[79] = tostruct([]*Node{namedfield("enabled", typs[11]), namedfield("pad", typs[78]), namedfield("needed", typs[11]), namedfield("cgo", typs[11]), namedfield("alignme", typs[17])})
- typs[80] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[2])}, nil)
- typs[81] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
- typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
- typs[83] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2]), anonfield(typs[2])}, []*Node{anonfield(typs[32])})
- typs[84] = functype(nil, []*Node{anonfield(typs[76]), anonfield(typs[3])}, []*Node{anonfield(typs[11])})
- typs[85] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[73])}, []*Node{anonfield(typs[11])})
- typs[86] = types.NewPtr(typs[11])
- typs[87] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[86]), anonfield(typs[73])}, []*Node{anonfield(typs[11])})
- typs[88] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[8])}, nil)
- typs[89] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[76]), anonfield(typs[3])}, nil)
- typs[90] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[73]), anonfield(typs[3]), anonfield(typs[86])}, nil)
- typs[91] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[32])})
- typs[92] = types.NewSlice(typs[2])
- typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[32]), anonfield(typs[32])}, []*Node{anonfield(typs[92])})
- typs[94] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[92])})
- typs[95] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[92]), anonfield(typs[32])}, []*Node{anonfield(typs[92])})
- typs[96] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[49])}, nil)
- typs[97] = functype(nil, []*Node{anonfield(typs[58]), anonfield(typs[49])}, nil)
- typs[98] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[49])}, []*Node{anonfield(typs[11])})
- typs[99] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[11])})
- typs[100] = functype(nil, []*Node{anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
- typs[101] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[17])}, []*Node{anonfield(typs[17])})
- typs[102] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[15])})
- typs[103] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[17])})
- typs[104] = types.Types[TUINT32]
- typs[105] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[104])})
- typs[106] = functype(nil, []*Node{anonfield(typs[15])}, []*Node{anonfield(typs[13])})
- typs[107] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[13])})
- typs[108] = functype(nil, []*Node{anonfield(typs[104])}, []*Node{anonfield(typs[13])})
- typs[109] = functype(nil, []*Node{anonfield(typs[19]), anonfield(typs[19])}, []*Node{anonfield(typs[19])})
- typs[110] = functype(nil, []*Node{anonfield(typs[49])}, nil)
- typs[111] = functype(nil, []*Node{anonfield(typs[49]), anonfield(typs[49])}, nil)
+ typs[34] = types.NewArray(typs[0], 4)
+ typs[35] = types.NewPtr(typs[34])
+ typs[36] = functype(nil, []*Node{anonfield(typs[35]), anonfield(typs[15])}, []*Node{anonfield(typs[21])})
+ typs[37] = types.NewSlice(typs[0])
+ typs[38] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[37])}, []*Node{anonfield(typs[21])})
+ typs[39] = functype(nil, []*Node{anonfield(typs[37])}, []*Node{anonfield(typs[21])})
+ typs[40] = types.Runetype
+ typs[41] = types.NewSlice(typs[40])
+ typs[42] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[41])}, []*Node{anonfield(typs[21])})
+ typs[43] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[21])}, []*Node{anonfield(typs[37])})
+ typs[44] = types.NewArray(typs[40], 32)
+ typs[45] = types.NewPtr(typs[44])
+ typs[46] = functype(nil, []*Node{anonfield(typs[45]), anonfield(typs[21])}, []*Node{anonfield(typs[41])})
+ typs[47] = functype(nil, []*Node{anonfield(typs[21]), anonfield(typs[32])}, []*Node{anonfield(typs[40]), anonfield(typs[32])})
+ typs[48] = types.Types[TUINTPTR]
+ typs[49] = functype(nil, []*Node{anonfield(typs[2]), anonfield(typs[2]), anonfield(typs[48])}, []*Node{anonfield(typs[32])})
+ typs[50] = functype(nil, []*Node{anonfield(typs[2]), anonfield(typs[2])}, []*Node{anonfield(typs[32])})
+ typs[51] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
+ typs[52] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
+ typs[53] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[11])})
+ typs[54] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
+ typs[55] = functype(nil, []*Node{anonfield(typs[1])}, nil)
+ typs[56] = types.NewPtr(typs[48])
+ typs[57] = types.Types[TUNSAFEPTR]
+ typs[58] = functype(nil, []*Node{anonfield(typs[56]), anonfield(typs[57]), anonfield(typs[57])}, []*Node{anonfield(typs[11])})
+ typs[59] = types.Types[TUINT32]
+ typs[60] = functype(nil, nil, []*Node{anonfield(typs[59])})
+ typs[61] = types.NewMap(typs[2], typs[2])
+ typs[62] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[61])})
+ typs[63] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[32]), anonfield(typs[3])}, []*Node{anonfield(typs[61])})
+ typs[64] = functype(nil, nil, []*Node{anonfield(typs[61])})
+ typs[65] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[61]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
+ typs[66] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[61]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
+ typs[67] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[61]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
+ typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[61]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[11])})
+ typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[61]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[11])})
+ typs[70] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[61]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[11])})
+ typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[61]), anonfield(typs[3])}, nil)
+ typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[61]), anonfield(typs[2])}, nil)
+ typs[73] = functype(nil, []*Node{anonfield(typs[3])}, nil)
+ typs[74] = types.NewChan(typs[2], types.Cboth)
+ typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[74])})
+ typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[32])}, []*Node{anonfield(typs[74])})
+ typs[77] = types.NewChan(typs[2], types.Crecv)
+ typs[78] = functype(nil, []*Node{anonfield(typs[77]), anonfield(typs[3])}, nil)
+ typs[79] = functype(nil, []*Node{anonfield(typs[77]), anonfield(typs[3])}, []*Node{anonfield(typs[11])})
+ typs[80] = types.NewChan(typs[2], types.Csend)
+ typs[81] = functype(nil, []*Node{anonfield(typs[80]), anonfield(typs[3])}, nil)
+ typs[82] = types.NewArray(typs[0], 3)
+ typs[83] = tostruct([]*Node{namedfield("enabled", typs[11]), namedfield("pad", typs[82]), namedfield("needed", typs[11]), namedfield("cgo", typs[11]), namedfield("alignme", typs[17])})
+ typs[84] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[2])}, nil)
+ typs[85] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
+ typs[86] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
+ typs[87] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2]), anonfield(typs[2])}, []*Node{anonfield(typs[32])})
+ typs[88] = functype(nil, []*Node{anonfield(typs[80]), anonfield(typs[3])}, []*Node{anonfield(typs[11])})
+ typs[89] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[77])}, []*Node{anonfield(typs[11])})
+ typs[90] = types.NewPtr(typs[11])
+ typs[91] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[90]), anonfield(typs[77])}, []*Node{anonfield(typs[11])})
+ typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[8])}, nil)
+ typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[80]), anonfield(typs[3])}, nil)
+ typs[94] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[77]), anonfield(typs[3]), anonfield(typs[90])}, nil)
+ typs[95] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[32])})
+ typs[96] = types.NewSlice(typs[2])
+ typs[97] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[32]), anonfield(typs[32])}, []*Node{anonfield(typs[96])})
+ typs[98] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[96])})
+ typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[96]), anonfield(typs[32])}, []*Node{anonfield(typs[96])})
+ typs[100] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[48])}, nil)
+ typs[101] = functype(nil, []*Node{anonfield(typs[57]), anonfield(typs[48])}, nil)
+ typs[102] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[48])}, []*Node{anonfield(typs[11])})
+ typs[103] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[11])})
+ typs[104] = functype(nil, []*Node{anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
+ typs[105] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[17])}, []*Node{anonfield(typs[17])})
+ typs[106] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[15])})
+ typs[107] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[17])})
+ typs[108] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[59])})
+ typs[109] = functype(nil, []*Node{anonfield(typs[15])}, []*Node{anonfield(typs[13])})
+ typs[110] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[13])})
+ typs[111] = functype(nil, []*Node{anonfield(typs[59])}, []*Node{anonfield(typs[13])})
+ typs[112] = functype(nil, []*Node{anonfield(typs[19]), anonfield(typs[19])}, []*Node{anonfield(typs[19])})
+ typs[113] = functype(nil, []*Node{anonfield(typs[48])}, nil)
+ typs[114] = functype(nil, []*Node{anonfield(typs[48]), anonfield(typs[48])}, nil)
return typs[:]
}
diff --git a/src/cmd/compile/internal/gc/builtin/runtime.go b/src/cmd/compile/internal/gc/builtin/runtime.go
index 7f4846db9db..de17d51d8ab 100644
--- a/src/cmd/compile/internal/gc/builtin/runtime.go
+++ b/src/cmd/compile/internal/gc/builtin/runtime.go
@@ -48,7 +48,6 @@ func concatstring5(*[32]byte, string, string, string, string, string) string
func concatstrings(*[32]byte, []string) string
func cmpstring(string, string) int
-func eqstring(string, string) bool
func intstring(*[4]byte, int64) string
func slicebytetostring(*[32]byte, []byte) string
func slicebytetostringtmp([]byte) string
@@ -78,7 +77,7 @@ func convT2Istring(tab *byte, elem *any) (ret any)
func convT2Islice(tab *byte, elem *any) (ret any)
func convT2Inoptr(tab *byte, elem *any) (ret any)
-// interface type assertions x.(T)
+// interface type assertions x.(T)
func assertE2I(typ *byte, iface any) (ret any)
func assertE2I2(typ *byte, iface any) (ret any, b bool)
func assertI2I(typ *byte, iface any) (ret any)
@@ -92,8 +91,12 @@ func panicnildottype(want *byte)
func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
+func fastrand() uint32
+
// *byte is really *runtime.Type
-func makemap(mapType *byte, hint int64, mapbuf *any, bucketbuf *any) (hmap map[any]any)
+func makemap64(mapType *byte, hint int64, mapbuf *any) (hmap map[any]any)
+func makemap(mapType *byte, hint int, mapbuf *any) (hmap map[any]any)
+func makemap_small() (hmap map[any]any)
func mapaccess1(mapType *byte, hmap map[any]any, key *any) (val *any)
func mapaccess1_fast32(mapType *byte, hmap map[any]any, key any) (val *any)
func mapaccess1_fast64(mapType *byte, hmap map[any]any, key any) (val *any)
@@ -106,7 +109,9 @@ func mapaccess2_faststr(mapType *byte, hmap map[any]any, key any) (val *any, pre
func mapaccess2_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any, pres bool)
func mapassign(mapType *byte, hmap map[any]any, key *any) (val *any)
func mapassign_fast32(mapType *byte, hmap map[any]any, key any) (val *any)
+func mapassign_fast32ptr(mapType *byte, hmap map[any]any, key any) (val *any)
func mapassign_fast64(mapType *byte, hmap map[any]any, key any) (val *any)
+func mapassign_fast64ptr(mapType *byte, hmap map[any]any, key any) (val *any)
func mapassign_faststr(mapType *byte, hmap map[any]any, key any) (val *any)
func mapiterinit(mapType *byte, hmap map[any]any, hiter *any)
func mapdelete(mapType *byte, hmap map[any]any, key *any)
@@ -116,7 +121,8 @@ func mapdelete_faststr(mapType *byte, hmap map[any]any, key any)
func mapiternext(hiter *any)
// *byte is really *runtime.Type
-func makechan(chanType *byte, hint int64) (hchan chan any)
+func makechan64(chanType *byte, size int64) (hchan chan any)
+func makechan(chanType *byte, size int) (hchan chan any)
func chanrecv1(hchan <-chan any, elem *any)
func chanrecv2(hchan <-chan any, elem *any) bool
func chansend1(hchan chan<- any, elem *any)
@@ -190,3 +196,4 @@ func msanwrite(addr, size uintptr)
// architecture variants
var support_popcnt bool
+var support_sse41 bool
diff --git a/src/cmd/compile/internal/gc/bv.go b/src/cmd/compile/internal/gc/bv.go
index 72f29e82538..03c4b9d8297 100644
--- a/src/cmd/compile/internal/gc/bv.go
+++ b/src/cmd/compile/internal/gc/bv.go
@@ -5,9 +5,9 @@
package gc
const (
- WORDBITS = 32
- WORDMASK = WORDBITS - 1
- WORDSHIFT = 5
+ wordBits = 32
+ wordMask = wordBits - 1
+ wordShift = 5
)
// A bvec is a bit vector.
@@ -17,7 +17,7 @@ type bvec struct {
}
func bvalloc(n int32) bvec {
- nword := (n + WORDBITS - 1) / WORDBITS
+ nword := (n + wordBits - 1) / wordBits
return bvec{n, make([]uint32, nword)}
}
@@ -28,7 +28,7 @@ type bulkBvec struct {
}
func bvbulkalloc(nbit int32, count int32) bulkBvec {
- nword := (nbit + WORDBITS - 1) / WORDBITS
+ nword := (nbit + wordBits - 1) / wordBits
size := int64(nword) * int64(count)
if int64(int32(size*4)) != size*4 {
Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
@@ -66,24 +66,24 @@ func (bv bvec) Get(i int32) bool {
if i < 0 || i >= bv.n {
Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n)
}
- mask := uint32(1 << uint(i%WORDBITS))
- return bv.b[i>>WORDSHIFT]&mask != 0
+ mask := uint32(1 << uint(i%wordBits))
+ return bv.b[i>>wordShift]&mask != 0
}
func (bv bvec) Set(i int32) {
if i < 0 || i >= bv.n {
Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n)
}
- mask := uint32(1 << uint(i%WORDBITS))
- bv.b[i/WORDBITS] |= mask
+ mask := uint32(1 << uint(i%wordBits))
+ bv.b[i/wordBits] |= mask
}
func (bv bvec) Unset(i int32) {
if i < 0 || i >= bv.n {
Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n)
}
- mask := uint32(1 << uint(i%WORDBITS))
- bv.b[i/WORDBITS] &^= mask
+ mask := uint32(1 << uint(i%wordBits))
+ bv.b[i/wordBits] &^= mask
}
// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
@@ -94,11 +94,11 @@ func (bv bvec) Next(i int32) int32 {
}
// Jump i ahead to next word with bits.
- if bv.b[i>>WORDSHIFT]>>uint(i&WORDMASK) == 0 {
- i &^= WORDMASK
- i += WORDBITS
- for i < bv.n && bv.b[i>>WORDSHIFT] == 0 {
- i += WORDBITS
+ if bv.b[i>>wordShift]>>uint(i&wordMask) == 0 {
+ i &^= wordMask
+ i += wordBits
+ for i < bv.n && bv.b[i>>wordShift] == 0 {
+ i += wordBits
}
}
@@ -107,7 +107,7 @@ func (bv bvec) Next(i int32) int32 {
}
// Find 1 bit.
- w := bv.b[i>>WORDSHIFT] >> uint(i&WORDMASK)
+ w := bv.b[i>>wordShift] >> uint(i&wordMask)
for w&1 == 0 {
w >>= 1
@@ -118,8 +118,8 @@ func (bv bvec) Next(i int32) int32 {
}
func (bv bvec) IsEmpty() bool {
- for i := int32(0); i < bv.n; i += WORDBITS {
- if bv.b[i>>WORDSHIFT] != 0 {
+ for i := int32(0); i < bv.n; i += wordBits {
+ if bv.b[i>>wordShift] != 0 {
return false
}
}
@@ -129,7 +129,7 @@ func (bv bvec) IsEmpty() bool {
func (bv bvec) Not() {
i := int32(0)
w := int32(0)
- for ; i < bv.n; i, w = i+WORDBITS, w+1 {
+ for ; i < bv.n; i, w = i+wordBits, w+1 {
bv.b[w] = ^bv.b[w]
}
}
diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go
index 143e1969c7d..d3af16e1764 100644
--- a/src/cmd/compile/internal/gc/closure.go
+++ b/src/cmd/compile/internal/gc/closure.go
@@ -19,7 +19,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) *Node {
n.Func.Depth = funcdepth
n.Func.Outerfunc = Curfn
- old := p.funchdr(n, expr.Pos())
+ old := p.funchdr(n)
// steal ntype's argument names and
// leave a fresh copy in their place.
@@ -60,7 +60,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) *Node {
n.Nbody.Set(body)
n.Func.Endlineno = lineno
- p.funcbody(n, expr.Body.Rbrace, old)
+ p.funcbody(old)
// closure-specific variables are hanging off the
// ordinary ones in the symbol table; see oldname.
@@ -463,9 +463,8 @@ func walkclosure(func_ *Node, init *Nodes) *Node {
Warnl(func_.Pos, "closure converted to global")
}
return func_.Func.Closure.Func.Nname
- } else {
- closuredebugruntimecheck(func_)
}
+ closuredebugruntimecheck(func_)
// Create closure in the form of a composite literal.
// supposing the closure captures an int i and a string s
@@ -481,28 +480,29 @@ func walkclosure(func_ *Node, init *Nodes) *Node {
// the struct is unnamed so that closures in multiple packages with the
// same struct type can share the descriptor.
- typ := nod(OTSTRUCT, nil, nil)
-
- typ.List.Set1(namedfield(".F", types.Types[TUINTPTR]))
+ fields := []*Node{
+ namedfield(".F", types.Types[TUINTPTR]),
+ }
for _, v := range func_.Func.Cvars.Slice() {
if v.Op == OXXX {
continue
}
- typ1 := typenod(v.Type)
+ typ := v.Type
if !v.Name.Byval() {
- typ1 = nod(OIND, typ1, nil)
+ typ = types.NewPtr(typ)
}
- typ.List.Append(nod(ODCLFIELD, newname(v.Sym), typ1))
+ fields = append(fields, symfield(v.Sym, typ))
}
+ typ := tostruct(fields)
+ typ.SetNoalg(true)
- clos := nod(OCOMPLIT, nil, nod(OIND, typ, nil))
+ clos := nod(OCOMPLIT, nil, nod(OIND, typenod(typ), nil))
clos.Esc = func_.Esc
clos.Right.SetImplicit(true)
clos.List.Set(append([]*Node{nod(OCFUNC, func_.Func.Closure.Func.Nname, nil)}, func_.Func.Enter.Slice()...))
// Force type conversion from *struct to the func type.
clos = nod(OCONVNOP, clos, nil)
-
clos.Type = func_.Type
clos = typecheck(clos, Erv)
@@ -646,7 +646,7 @@ func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
call := nod(OCALL, nodSym(OXDOT, ptr, meth), nil)
call.List.Set(callargs)
call.SetIsddd(ddd)
- if t0.Results().NumFields() == 0 {
+ if t0.NumResults() == 0 {
body = append(body, call)
} else {
n := nod(OAS2, nil, nil)
@@ -683,11 +683,13 @@ func walkpartialcall(n *Node, init *Nodes) *Node {
checknil(n.Left, init)
}
- typ := nod(OTSTRUCT, nil, nil)
- typ.List.Set1(namedfield("F", types.Types[TUINTPTR]))
- typ.List.Append(namedfield("R", n.Left.Type))
+ typ := tostruct([]*Node{
+ namedfield("F", types.Types[TUINTPTR]),
+ namedfield("R", n.Left.Type),
+ })
+ typ.SetNoalg(true)
- clos := nod(OCOMPLIT, nil, nod(OIND, typ, nil))
+ clos := nod(OCOMPLIT, nil, nod(OIND, typenod(typ), nil))
clos.Esc = n.Esc
clos.Right.SetImplicit(true)
clos.List.Set1(nod(OCFUNC, n.Func.Nname, nil))
@@ -695,7 +697,6 @@ func walkpartialcall(n *Node, init *Nodes) *Node {
// Force type conversion from *struct to the func type.
clos = nod(OCONVNOP, clos, nil)
-
clos.Type = n.Type
clos = typecheck(clos, Erv)
diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go
index a465d4a7bb5..dcc16b6decd 100644
--- a/src/cmd/compile/internal/gc/const.go
+++ b/src/cmd/compile/internal/gc/const.go
@@ -12,7 +12,7 @@ import (
)
// Ctype describes the constant kind of an "ideal" (untyped) constant.
-type Ctype int8
+type Ctype uint8
const (
CTxxx Ctype = iota
@@ -247,7 +247,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, reuse canReuseNode) *Node {
return n
- // target is invalid type for a constant? leave alone.
+ // target is invalid type for a constant? leave alone.
case OLITERAL:
if !okforconst[t.Etype] && n.Type.Etype != TNIL {
return defaultlitreuse(n, nil, reuse)
@@ -297,7 +297,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, reuse canReuseNode) *Node {
ct := consttype(n)
var et types.EType
- if ct < 0 {
+ if ct == 0 {
goto bad
}
@@ -408,7 +408,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, reuse canReuseNode) *Node {
bad:
if !n.Diag() {
if !t.Broke() {
- yyerror("cannot convert %v to type %v", n, t)
+ yyerror("cannot convert %L to type %v", n, t)
}
n.SetDiag(true)
}
@@ -591,7 +591,7 @@ func tostr(v Val) Val {
func consttype(n *Node) Ctype {
if n == nil || n.Op != OLITERAL {
- return -1
+ return 0
}
return n.Val().Ctype()
}
@@ -693,7 +693,7 @@ func evconst(n *Node) {
if nl == nil || nl.Type == nil {
return
}
- if consttype(nl) < 0 {
+ if consttype(nl) == 0 {
return
}
wl := nl.Type.Etype
@@ -840,7 +840,7 @@ func evconst(n *Node) {
if nr.Type == nil {
return
}
- if consttype(nr) < 0 {
+ if consttype(nr) == 0 {
return
}
wr = nr.Type.Etype
@@ -1195,8 +1195,6 @@ func evconst(n *Node) {
goto setfalse
}
- goto ret
-
ret:
norig = saveorig(n)
*n = *nl
@@ -1375,7 +1373,8 @@ func defaultlitreuse(n *Node, t *types.Type, reuse canReuseNode) *Node {
return convlit(n, t)
}
- if n.Val().Ctype() == CTNIL {
+ switch n.Val().Ctype() {
+ case CTNIL:
lineno = lno
if !n.Diag() {
yyerror("use of untyped nil")
@@ -1383,17 +1382,13 @@ func defaultlitreuse(n *Node, t *types.Type, reuse canReuseNode) *Node {
}
n.Type = nil
- break
- }
-
- if n.Val().Ctype() == CTSTR {
+ case CTSTR:
t1 := types.Types[TSTRING]
n = convlit1(n, t1, false, reuse)
- break
+ default:
+ yyerror("defaultlit: unknown literal: %v", n)
}
- yyerror("defaultlit: unknown literal: %v", n)
-
case CTxxx:
Fatalf("defaultlit: idealkind is CTxxx: %+v", n)
diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go
index b8a5a90a036..2756707aef1 100644
--- a/src/cmd/compile/internal/gc/dcl.go
+++ b/src/cmd/compile/internal/gc/dcl.go
@@ -5,7 +5,9 @@
package gc
import (
+ "bytes"
"cmd/compile/internal/types"
+ "cmd/internal/obj"
"cmd/internal/src"
"fmt"
"strings"
@@ -83,12 +85,14 @@ func declare(n *Node, ctxt Class) {
yyerror("cannot declare name %v", s)
}
- if ctxt == PEXTERN && s.Name == "init" {
- yyerror("cannot declare init - must be func")
- }
-
gen := 0
if ctxt == PEXTERN {
+ if s.Name == "init" {
+ yyerror("cannot declare init - must be func")
+ }
+ if s.Name == "main" && localpkg.Name == "main" {
+ yyerror("cannot declare main - must be func")
+ }
externdcl = append(externdcl, n)
} else {
if Curfn == nil && ctxt == PAUTO {
@@ -212,7 +216,13 @@ func newnoname(s *types.Sym) *Node {
// newfuncname generates a new name node for a function or method.
// TODO(rsc): Use an ODCLFUNC node instead. See comment in CL 7360.
func newfuncname(s *types.Sym) *Node {
- n := newname(s)
+ return newfuncnamel(lineno, s)
+}
+
+// newfuncnamel generates a new name node for a function or method.
+// TODO(rsc): Use an ODCLFUNC node instead. See comment in CL 7360.
+func newfuncnamel(pos src.XPos, s *types.Sym) *Node {
+ n := newnamel(pos, s)
n.Func = new(Func)
n.Func.SetIsHiddenClosure(Curfn != nil)
return n
@@ -227,11 +237,15 @@ func dclname(s *types.Sym) *Node {
}
func typenod(t *types.Type) *Node {
+ return typenodl(src.NoXPos, t)
+}
+
+func typenodl(pos src.XPos, t *types.Type) *Node {
// if we copied another type with *t = *u
// then t->nod might be out of date, so
// check t->nod->type too
if asNode(t.Nod) == nil || asNode(t.Nod).Type != t {
- t.Nod = asTypesNode(nod(OTYPE, nil, nil))
+ t.Nod = asTypesNode(nodl(pos, OTYPE, nil, nil))
asNode(t.Nod).Type = t
asNode(t.Nod).Sym = t.Sym
}
@@ -244,7 +258,11 @@ func anonfield(typ *types.Type) *Node {
}
func namedfield(s string, typ *types.Type) *Node {
- return nod(ODCLFIELD, newname(lookup(s)), typenod(typ))
+ return symfield(lookup(s), typ)
+}
+
+func symfield(s *types.Sym, typ *types.Type) *Node {
+ return nod(ODCLFIELD, newname(s), typenod(typ))
}
// oldname returns the Node that declares symbol s in the current scope.
@@ -519,7 +537,7 @@ func funcstart(n *Node) {
// finish the body.
// called in auto-declaration context.
// returns in extern-declaration context.
-func funcbody(n *Node) {
+func funcbody() {
// change the declaration context from auto to extern
if dclcontext != PAUTO {
Fatalf("funcbody: unexpected dclcontext %d", dclcontext)
@@ -742,7 +760,7 @@ func tointerface(l []*Node) *types.Type {
return t
}
-func tointerface0(t *types.Type, l []*Node) *types.Type {
+func tointerface0(t *types.Type, l []*Node) {
if t == nil || !t.IsInterface() {
Fatalf("interface expected")
}
@@ -756,35 +774,6 @@ func tointerface0(t *types.Type, l []*Node) *types.Type {
fields = append(fields, f)
}
t.SetInterface(fields)
-
- return t
-}
-
-func embedded(s *types.Sym, pkg *types.Pkg) *Node {
- const (
- CenterDot = 0xB7
- )
- // Names sometimes have disambiguation junk
- // appended after a center dot. Discard it when
- // making the name for the embedded struct field.
- name := s.Name
-
- if i := strings.Index(s.Name, string(CenterDot)); i >= 0 {
- name = s.Name[:i]
- }
-
- var n *Node
- if exportname(name) {
- n = newname(lookup(name))
- } else if s.Pkg == builtinpkg {
- // The name of embedded builtins belongs to pkg.
- n = newname(pkg.Lookup(name))
- } else {
- n = newname(s.Pkg.Lookup(name))
- }
- n = nod(ODCLFIELD, n, oldname(s))
- n.SetEmbedded(true)
- return n
}
func fakeRecv() *Node {
@@ -949,7 +938,8 @@ func methodname(s *types.Sym, recv *types.Type) *types.Sym {
// Add a method, declared as a function.
// - msym is the method symbol
// - t is function type (with receiver)
-func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) {
+// Returns a pointer to the existing or added Field.
+func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field {
if msym == nil {
Fatalf("no method symbol")
}
@@ -958,7 +948,7 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) {
rf := t.Recv() // ptr to this structure
if rf == nil {
yyerror("missing receiver")
- return
+ return nil
}
mt := methtype(rf.Type)
@@ -968,7 +958,7 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) {
if t != nil && t.IsPtr() {
if t.Sym != nil {
yyerror("invalid receiver type %v (%v is a pointer type)", pa, t)
- return
+ return nil
}
t = t.Elem()
}
@@ -987,23 +977,23 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) {
// but just in case, fall back to generic error.
yyerror("invalid receiver type %v (%L / %L)", pa, pa, t)
}
- return
+ return nil
}
- if local && !mt.Local() {
+ if local && mt.Sym.Pkg != localpkg {
yyerror("cannot define new methods on non-local type %v", mt)
- return
+ return nil
}
if msym.IsBlank() {
- return
+ return nil
}
if mt.IsStruct() {
for _, f := range mt.Fields().Slice() {
if f.Sym == msym {
yyerror("type %v has both field and method named %v", mt, msym)
- return
+ return nil
}
}
}
@@ -1017,7 +1007,7 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) {
if !eqtype(t, f.Type) || !eqtype(t.Recv().Type, f.Type.Recv().Type) {
yyerror("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t)
}
- return
+ return f
}
f := types.NewField()
@@ -1027,6 +1017,7 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) {
f.SetNointerface(nointerface)
mt.Methods().Append(f)
+ return f
}
func funccompile(n *Node) {
@@ -1096,9 +1087,10 @@ func makefuncsym(s *types.Sym) {
if s.IsBlank() {
return
}
- if compiling_runtime && s.Name == "getg" {
- // runtime.getg() is not a real function and so does
- // not get a funcsym.
+ if compiling_runtime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") {
+ // runtime.getg(), getclosureptr(), getcallerpc(), and
+ // getcallersp() are not real functions and so do not
+ // get funcsyms.
return
}
if _, existed := s.Pkg.LookupOK(funcsymname(s)); !existed {
@@ -1122,123 +1114,175 @@ func dclfunc(sym *types.Sym, tfn *Node) *Node {
}
type nowritebarrierrecChecker struct {
- curfn *Node
- stable bool
+ // extraCalls contains extra function calls that may not be
+ // visible during later analysis. It maps from the ODCLFUNC of
+ // the caller to a list of callees.
+ extraCalls map[*Node][]nowritebarrierrecCall
- // best maps from the ODCLFUNC of each visited function that
- // recursively invokes a write barrier to the called function
- // on the shortest path to a write barrier.
- best map[*Node]nowritebarrierrecCall
+ // curfn is the current function during AST walks.
+ curfn *Node
}
type nowritebarrierrecCall struct {
- target *Node
- depth int
- lineno src.XPos
+ target *Node // ODCLFUNC of caller or callee
+ lineno src.XPos // line of call
}
-func checknowritebarrierrec() {
- c := nowritebarrierrecChecker{
- best: make(map[*Node]nowritebarrierrecCall),
+type nowritebarrierrecCallSym struct {
+ target *obj.LSym // LSym of callee
+ lineno src.XPos // line of call
+}
+
+// newNowritebarrierrecChecker creates a nowritebarrierrecChecker. It
+// must be called before transformclosure and walk.
+func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
+ c := &nowritebarrierrecChecker{
+ extraCalls: make(map[*Node][]nowritebarrierrecCall),
}
- visitBottomUp(xtop, func(list []*Node, recursive bool) {
- // Functions with write barriers have depth 0.
- for _, n := range list {
- if n.Func.WBPos.IsKnown() && n.Func.Pragma&Nowritebarrier != 0 {
- yyerrorl(n.Func.WBPos, "write barrier prohibited")
- }
- if n.Func.WBPos.IsKnown() && n.Func.Pragma&Yeswritebarrierrec == 0 {
- c.best[n] = nowritebarrierrecCall{target: nil, depth: 0, lineno: n.Func.WBPos}
- }
+
+ // Find all systemstack calls and record their targets. In
+ // general, flow analysis can't see into systemstack, but it's
+ // important to handle it for this check, so we model it
+ // directly. This has to happen before transformclosure since
+ // it's a lot harder to work out the argument after.
+ for _, n := range xtop {
+ if n.Op != ODCLFUNC {
+ continue
}
-
- // Propagate write barrier depth up from callees. In
- // the recursive case, we have to update this at most
- // len(list) times and can stop when we an iteration
- // that doesn't change anything.
- for _ = range list {
- c.stable = false
- for _, n := range list {
- if n.Func.Pragma&Yeswritebarrierrec != 0 {
- // Don't propagate write
- // barrier up to a
- // yeswritebarrierrec function.
- continue
- }
- if !n.Func.WBPos.IsKnown() {
- c.curfn = n
- c.visitcodelist(n.Nbody)
- }
- }
- if c.stable {
- break
- }
- }
-
- // Check nowritebarrierrec functions.
- for _, n := range list {
- if n.Func.Pragma&Nowritebarrierrec == 0 {
- continue
- }
- call, hasWB := c.best[n]
- if !hasWB {
- continue
- }
-
- // Build the error message in reverse.
- err := ""
- for call.target != nil {
- err = fmt.Sprintf("\n\t%v: called by %v%s", linestr(call.lineno), n.Func.Nname, err)
- n = call.target
- call = c.best[n]
- }
- err = fmt.Sprintf("write barrier prohibited by caller; %v%s", n.Func.Nname, err)
- yyerrorl(n.Func.WBPos, err)
- }
- })
+ c.curfn = n
+ inspect(n, c.findExtraCalls)
+ }
+ c.curfn = nil
+ return c
}
-func (c *nowritebarrierrecChecker) visitcodelist(l Nodes) {
- for _, n := range l.Slice() {
- c.visitcode(n)
+func (c *nowritebarrierrecChecker) findExtraCalls(n *Node) bool {
+ if n.Op != OCALLFUNC {
+ return true
}
-}
-
-func (c *nowritebarrierrecChecker) visitcode(n *Node) {
- if n == nil {
- return
- }
-
- if n.Op == OCALLFUNC || n.Op == OCALLMETH {
- c.visitcall(n)
- }
-
- c.visitcodelist(n.Ninit)
- c.visitcode(n.Left)
- c.visitcode(n.Right)
- c.visitcodelist(n.List)
- c.visitcodelist(n.Nbody)
- c.visitcodelist(n.Rlist)
-}
-
-func (c *nowritebarrierrecChecker) visitcall(n *Node) {
fn := n.Left
- if n.Op == OCALLMETH {
- fn = asNode(n.Left.Sym.Def)
- }
if fn == nil || fn.Op != ONAME || fn.Class() != PFUNC || fn.Name.Defn == nil {
- return
+ return true
+ }
+ if !isRuntimePkg(fn.Sym.Pkg) || fn.Sym.Name != "systemstack" {
+ return true
}
- defn := fn.Name.Defn
- fnbest, ok := c.best[defn]
- if !ok {
- return
+ var callee *Node
+ arg := n.List.First()
+ switch arg.Op {
+ case ONAME:
+ callee = arg.Name.Defn
+ case OCLOSURE:
+ callee = arg.Func.Closure
+ default:
+ Fatalf("expected ONAME or OCLOSURE node, got %+v", arg)
}
- best, ok := c.best[c.curfn]
- if ok && fnbest.depth+1 >= best.depth {
- return
+ if callee.Op != ODCLFUNC {
+ Fatalf("expected ODCLFUNC node, got %+v", callee)
+ }
+ c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos})
+ return true
+}
+
+// recordCall records a call from ODCLFUNC node "from", to function
+// symbol "to" at position pos.
+//
+// This should be done as late as possible during compilation to
+// capture precise call graphs. The target of the call is an LSym
+// because that's all we know after we start SSA.
+//
+// This can be called concurrently for different from Nodes.
+func (c *nowritebarrierrecChecker) recordCall(from *Node, to *obj.LSym, pos src.XPos) {
+ if from.Op != ODCLFUNC {
+ Fatalf("expected ODCLFUNC, got %v", from)
+ }
+ // We record this information on the *Func so this is
+ // concurrent-safe.
+ fn := from.Func
+ if fn.nwbrCalls == nil {
+ fn.nwbrCalls = new([]nowritebarrierrecCallSym)
+ }
+ *fn.nwbrCalls = append(*fn.nwbrCalls, nowritebarrierrecCallSym{to, pos})
+}
+
+func (c *nowritebarrierrecChecker) check() {
+ // We walk the call graph as late as possible so we can
+ // capture all calls created by lowering, but this means we
+ // only get to see the obj.LSyms of calls. symToFunc lets us
+ // get back to the ODCLFUNCs.
+ symToFunc := make(map[*obj.LSym]*Node)
+ // funcs records the back-edges of the BFS call graph walk. It
+ // maps from the ODCLFUNC of each function that must not have
+ // write barriers to the call that inhibits them. Functions
+ // that are directly marked go:nowritebarrierrec are in this
+ // map with a zero-valued nowritebarrierrecCall. This also
+ // acts as the set of marks for the BFS of the call graph.
+ funcs := make(map[*Node]nowritebarrierrecCall)
+ // q is the queue of ODCLFUNC Nodes to visit in BFS order.
+ var q nodeQueue
+
+ for _, n := range xtop {
+ if n.Op != ODCLFUNC {
+ continue
+ }
+
+ symToFunc[n.Func.lsym] = n
+
+ // Make nowritebarrierrec functions BFS roots.
+ if n.Func.Pragma&Nowritebarrierrec != 0 {
+ funcs[n] = nowritebarrierrecCall{}
+ q.pushRight(n)
+ }
+ // Check go:nowritebarrier functions.
+ if n.Func.Pragma&Nowritebarrier != 0 && n.Func.WBPos.IsKnown() {
+ yyerrorl(n.Func.WBPos, "write barrier prohibited")
+ }
+ }
+
+ // Perform a BFS of the call graph from all
+ // go:nowritebarrierrec functions.
+ enqueue := func(src, target *Node, pos src.XPos) {
+ if target.Func.Pragma&Yeswritebarrierrec != 0 {
+ // Don't flow into this function.
+ return
+ }
+ if _, ok := funcs[target]; ok {
+ // Already found a path to target.
+ return
+ }
+
+ // Record the path.
+ funcs[target] = nowritebarrierrecCall{target: src, lineno: pos}
+ q.pushRight(target)
+ }
+ for !q.empty() {
+ fn := q.popLeft()
+
+ // Check fn.
+ if fn.Func.WBPos.IsKnown() {
+ var err bytes.Buffer
+ call := funcs[fn]
+ for call.target != nil {
+ fmt.Fprintf(&err, "\n\t%v: called by %v", linestr(call.lineno), call.target.Func.Nname)
+ call = funcs[call.target]
+ }
+ yyerrorl(fn.Func.WBPos, "write barrier prohibited by caller; %v%s", fn.Func.Nname, err.String())
+ continue
+ }
+
+ // Enqueue fn's calls.
+ for _, callee := range c.extraCalls[fn] {
+ enqueue(fn, callee.target, callee.lineno)
+ }
+ if fn.Func.nwbrCalls == nil {
+ continue
+ }
+ for _, callee := range *fn.Func.nwbrCalls {
+ target := symToFunc[callee.target]
+ if target != nil {
+ enqueue(fn, target, callee.lineno)
+ }
+ }
}
- c.best[c.curfn] = nowritebarrierrecCall{target: defn, depth: fnbest.depth + 1, lineno: n.Pos}
- c.stable = false
}
diff --git a/src/cmd/compile/internal/gc/dwinl.go b/src/cmd/compile/internal/gc/dwinl.go
new file mode 100644
index 00000000000..f76bacc5b91
--- /dev/null
+++ b/src/cmd/compile/internal/gc/dwinl.go
@@ -0,0 +1,317 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/dwarf"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "sort"
+ "strings"
+)
+
+// To identify variables by original source position.
+type varPos struct {
+ DeclFile string
+ DeclLine uint
+ DeclCol uint
+}
+
+// This is the main entry point for collection of raw material to
+// drive generation of DWARF "inlined subroutine" DIEs. See proposal
+// 22080 for more details and background info.
+func assembleInlines(fnsym *obj.LSym, fn *Node, dwVars []*dwarf.Var) dwarf.InlCalls {
+ var inlcalls dwarf.InlCalls
+
+ if Debug_gendwarfinl != 0 {
+ Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
+ }
+
+ // This maps inline index (from Ctxt.InlTree) to index in inlcalls.Calls
+ imap := make(map[int]int)
+
+ // Walk progs to build up the InlCalls data structure
+ var prevpos src.XPos
+ for p := fnsym.Func.Text; p != nil; p = p.Link {
+ if p.Pos == prevpos {
+ continue
+ }
+ ii := posInlIndex(p.Pos)
+ if ii >= 0 {
+ insertInlCall(&inlcalls, ii, imap)
+ }
+ prevpos = p.Pos
+ }
+
+ // This is used to partition DWARF vars by inline index. Vars not
+ // produced by the inliner will wind up in the vmap[0] entry.
+ vmap := make(map[int32][]*dwarf.Var)
+
+ // Now walk the dwarf vars and partition them based on whether they
+ // were produced by the inliner (dwv.InlIndex > 0) or were original
+ // vars/params from the function (dwv.InlIndex == 0).
+ for _, dwv := range dwVars {
+
+ vmap[dwv.InlIndex] = append(vmap[dwv.InlIndex], dwv)
+
+ // Zero index => var was not produced by an inline
+ if dwv.InlIndex == 0 {
+ continue
+ }
+
+ // Look up index in our map, then tack the var in question
+ // onto the vars list for the correct inlined call.
+ ii := int(dwv.InlIndex) - 1
+ idx, ok := imap[ii]
+ if !ok {
+ // We can occasionally encounter a var produced by the
+ // inliner for which there is no remaining prog; add a new
+ // entry to the call list in this scenario.
+ idx = insertInlCall(&inlcalls, ii, imap)
+ }
+ inlcalls.Calls[idx].InlVars =
+ append(inlcalls.Calls[idx].InlVars, dwv)
+ }
+
+ // Post process the map above to assign child indices to vars. For
+ // variables that weren't produced by an inline, sort them
+ // according to class and name and assign indices that way. For
+ // vars produced by an inline, assign child index by looking up
+ // the var name in the origin pre-optimization dcl list for the
+ // inlined function.
+ for ii, sl := range vmap {
+ if ii == 0 {
+ sort.Sort(byClassThenName(sl))
+ for j := 0; j < len(sl); j++ {
+ sl[j].ChildIndex = int32(j)
+ }
+ } else {
+ // Assign child index based on pre-inlined decls
+ ifnlsym := Ctxt.InlTree.InlinedFunction(int(ii - 1))
+ dcl, _ := preInliningDcls(ifnlsym)
+ m := make(map[varPos]int)
+ for i := 0; i < len(dcl); i++ {
+ n := dcl[i]
+ pos := Ctxt.InnermostPos(n.Pos)
+ vp := varPos{
+ DeclFile: pos.Base().SymFilename(),
+ DeclLine: pos.Line(),
+ DeclCol: pos.Col(),
+ }
+ m[vp] = i
+ }
+ for j := 0; j < len(sl); j++ {
+ vp := varPos{
+ DeclFile: sl[j].DeclFile,
+ DeclLine: sl[j].DeclLine,
+ DeclCol: sl[j].DeclCol,
+ }
+ if idx, found := m[vp]; found {
+ sl[j].ChildIndex = int32(idx)
+ } else {
+ Fatalf("unexpected: can't find var %s in preInliningDcls for %v\n", sl[j].Name, Ctxt.InlTree.InlinedFunction(int(ii-1)))
+ }
+ }
+ }
+ }
+
+ // Make a second pass through the progs to compute PC ranges
+ // for the various inlined calls.
+ curii := -1
+ var crange *dwarf.Range
+ var prevp *obj.Prog
+ for p := fnsym.Func.Text; p != nil; prevp, p = p, p.Link {
+ if prevp != nil && p.Pos == prevp.Pos {
+ continue
+ }
+ ii := posInlIndex(p.Pos)
+ if ii == curii {
+ continue
+ } else {
+ // Close out the current range
+ endRange(crange, prevp)
+
+ // Begin new range
+ crange = beginRange(inlcalls.Calls, p, ii, imap)
+ curii = ii
+ }
+ }
+ if prevp != nil {
+ endRange(crange, prevp)
+ }
+
+ // Debugging
+ if Debug_gendwarfinl != 0 {
+ dumpInlCalls(inlcalls)
+ dumpInlVars(dwVars)
+ }
+
+ return inlcalls
+}
+
+// Secondary hook for DWARF inlined subroutine generation. This is called
+// late in the compilation when it is determined that we need an
+// abstract function DIE for an inlined routine imported from a
+// previously compiled package.
+func genAbstractFunc(fn *obj.LSym) {
+ ifn := Ctxt.DwFixups.GetPrecursorFunc(fn)
+ if ifn == nil {
+ Ctxt.Diag("failed to locate precursor fn for %v", fn)
+ return
+ }
+ if Debug_gendwarfinl != 0 {
+ Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
+ }
+ Ctxt.DwarfAbstractFunc(ifn, fn, myimportpath)
+}
+
+func insertInlCall(dwcalls *dwarf.InlCalls, inlIdx int, imap map[int]int) int {
+ callIdx, found := imap[inlIdx]
+ if found {
+ return callIdx
+ }
+
+ // Haven't seen this inline yet. Visit parent of inline if there
+ // is one. We do this first so that parents appear before their
+ // children in the resulting table.
+ parCallIdx := -1
+ parInlIdx := Ctxt.InlTree.Parent(inlIdx)
+ if parInlIdx >= 0 {
+ parCallIdx = insertInlCall(dwcalls, parInlIdx, imap)
+ }
+
+ // Create new entry for this inline
+ inlinedFn := Ctxt.InlTree.InlinedFunction(int(inlIdx))
+ callXPos := Ctxt.InlTree.CallPos(int(inlIdx))
+ absFnSym := Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
+ pb := Ctxt.PosTable.Pos(callXPos).Base()
+ callFileSym := Ctxt.Lookup(pb.SymFilename())
+ ic := dwarf.InlCall{
+ InlIndex: inlIdx,
+ CallFile: callFileSym,
+ CallLine: uint32(callXPos.Line()),
+ AbsFunSym: absFnSym,
+ Root: parCallIdx == -1,
+ }
+ dwcalls.Calls = append(dwcalls.Calls, ic)
+ callIdx = len(dwcalls.Calls) - 1
+ imap[inlIdx] = callIdx
+
+ if parCallIdx != -1 {
+ // Add this inline to parent's child list
+ dwcalls.Calls[parCallIdx].Children = append(dwcalls.Calls[parCallIdx].Children, callIdx)
+ }
+
+ return callIdx
+}
+
+// Given a src.XPos, return its associated inlining index if it
+// corresponds to something created as a result of an inline, or -1 if
+// there is no inline info. Note that the index returned will refer to
+// the deepest call in the inlined stack, e.g. if you have "A calls B
+// calls C calls D" and all three callees are inlined (B, C, and D),
+// the index for a node from the inlined body of D will refer to the
+// call to D from C. Whew.
+func posInlIndex(xpos src.XPos) int {
+ pos := Ctxt.PosTable.Pos(xpos)
+ if b := pos.Base(); b != nil {
+ ii := b.InliningIndex()
+ if ii >= 0 {
+ return ii
+ }
+ }
+ return -1
+}
+
+func endRange(crange *dwarf.Range, p *obj.Prog) {
+ if crange == nil {
+ return
+ }
+ crange.End = p.Pc
+}
+
+func beginRange(calls []dwarf.InlCall, p *obj.Prog, ii int, imap map[int]int) *dwarf.Range {
+ if ii == -1 {
+ return nil
+ }
+ callIdx, found := imap[ii]
+ if !found {
+ Fatalf("internal error: can't find inlIndex %d in imap for prog at %d\n", ii, p.Pc)
+ }
+ call := &calls[callIdx]
+
+ // Set up range and append to correct inlined call
+ call.Ranges = append(call.Ranges, dwarf.Range{Start: p.Pc, End: -1})
+ return &call.Ranges[len(call.Ranges)-1]
+}
+
+func cmpDwarfVar(a, b *dwarf.Var) bool {
+ // named before artificial
+ aart := 0
+ if strings.HasPrefix(a.Name, "~r") {
+ aart = 1
+ }
+ bart := 0
+ if strings.HasPrefix(b.Name, "~r") {
+ bart = 1
+ }
+ if aart != bart {
+ return aart < bart
+ }
+
+ // otherwise sort by name
+ return a.Name < b.Name
+}
+
+// byClassThenName implements sort.Interface for []*dwarf.Var using cmpDwarfVar.
+type byClassThenName []*dwarf.Var
+
+func (s byClassThenName) Len() int { return len(s) }
+func (s byClassThenName) Less(i, j int) bool { return cmpDwarfVar(s[i], s[j]) }
+func (s byClassThenName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func dumpInlCall(inlcalls dwarf.InlCalls, idx, ilevel int) {
+ for i := 0; i < ilevel; i += 1 {
+ Ctxt.Logf(" ")
+ }
+ ic := inlcalls.Calls[idx]
+ callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex)
+ Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
+ for _, f := range ic.InlVars {
+ Ctxt.Logf(" %v", f.Name)
+ }
+ Ctxt.Logf(" ) C: (")
+ for _, k := range ic.Children {
+ Ctxt.Logf(" %v", k)
+ }
+ Ctxt.Logf(" ) R:")
+ for _, r := range ic.Ranges {
+ Ctxt.Logf(" [%d,%d)", r.Start, r.End)
+ }
+ Ctxt.Logf("\n")
+ for _, k := range ic.Children {
+ dumpInlCall(inlcalls, k, ilevel+1)
+ }
+
+}
+
+func dumpInlCalls(inlcalls dwarf.InlCalls) {
+ n := len(inlcalls.Calls)
+ for k := 0; k < n; k += 1 {
+ if inlcalls.Calls[k].Root {
+ dumpInlCall(inlcalls, k, 0)
+ }
+ }
+}
+
+func dumpInlVars(dwvars []*dwarf.Var) {
+ for i, dwv := range dwvars {
+ typ := "local"
+ if dwv.Abbrev == dwarf.DW_ABRV_PARAM_LOCLIST || dwv.Abbrev == dwarf.DW_ABRV_PARAM {
+ typ = "param"
+ }
+ Ctxt.Logf("V%d: %s CI:%d II:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, typ)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go
index 87a5b7f29f5..03c0adafd51 100644
--- a/src/cmd/compile/internal/gc/esc.go
+++ b/src/cmd/compile/internal/gc/esc.go
@@ -129,20 +129,17 @@ func (v *bottomUpVisitor) visitcode(n *Node, min uint32) uint32 {
min = v.visitcodelist(n.Nbody, min)
min = v.visitcodelist(n.Rlist, min)
- if n.Op == OCALLFUNC || n.Op == OCALLMETH {
- fn := n.Left
- if n.Op == OCALLMETH {
- fn = asNode(n.Left.Sym.Def)
- }
+ switch n.Op {
+ case OCALLFUNC, OCALLMETH:
+ fn := asNode(n.Left.Type.Nname())
if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil {
m := v.visit(fn.Name.Defn)
if m < min {
min = m
}
}
- }
- if n.Op == OCLOSURE {
+ case OCLOSURE:
m := v.visit(n.Func.Closure)
if m < min {
min = m
@@ -176,12 +173,6 @@ func (v *bottomUpVisitor) visitcode(n *Node, min uint32) uint32 {
// then the value can stay on the stack. If the value new(T) does
// not escape, then new(T) can be rewritten into a stack allocation.
// The same is true of slice literals.
-//
-// If optimizations are disabled (-N), this code is not used.
-// Instead, the compiler assumes that any value whose address
-// is taken without being immediately dereferenced
-// needs to be moved to the heap, and new(T) and slice
-// literals are always real allocations.
func escapes(all []*Node) {
visitBottomUp(all, escAnalyze)
@@ -205,9 +196,7 @@ const (
// allowed level when a loop is encountered. Using -2 suffices to
// pass all the tests we have written so far, which we assume matches
// the level of complexity we want the escape analysis code to handle.
-const (
- MinLevel = -2
-)
+const MinLevel = -2
// A Level encodes the reference state and context applied to
// (stack, heap) allocated memory.
@@ -679,7 +668,7 @@ func (e *EscState) esc(n *Node, parent *Node) {
// Big stuff escapes unconditionally
// "Big" conditions that were scattered around in walk have been gathered here
if n.Esc != EscHeap && n.Type != nil &&
- (n.Type.Width > MaxStackVarSize ||
+ (n.Type.Width > maxStackVarSize ||
(n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= 1<<16 ||
n.Op == OMAKESLICE && !isSmallMakeSlice(n)) {
if Debug['m'] > 2 {
@@ -691,7 +680,18 @@ func (e *EscState) esc(n *Node, parent *Node) {
}
e.esc(n.Left, n)
+
+ if n.Op == ORANGE {
+ // ORANGE node's Right is evaluated before the loop
+ e.loopdepth--
+ }
+
e.esc(n.Right, n)
+
+ if n.Op == ORANGE {
+ e.loopdepth++
+ }
+
e.esclist(n.Nbody, n)
e.esclist(n.List, n)
e.esclist(n.Rlist, n)
@@ -848,7 +848,7 @@ func (e *EscState) esc(n *Node, parent *Node) {
case ORETURN:
retList := n.List
- if retList.Len() == 1 && Curfn.Type.Results().NumFields() > 1 {
+ if retList.Len() == 1 && Curfn.Type.NumResults() > 1 {
// OAS2FUNC in disguise
// esccall already done on n.List.First()
// tie e.nodeEscState(n.List.First()).Retval to Curfn.Func.Dcl PPARAMOUT's
@@ -1279,16 +1279,14 @@ func parsetag(note string) uint16 {
// to the second output (and if there are more than two outputs, there is no flow to those.)
func describeEscape(em uint16) string {
var s string
- if em&EscMask == EscUnknown {
+ switch em & EscMask {
+ case EscUnknown:
s = "EscUnknown"
- }
- if em&EscMask == EscNone {
+ case EscNone:
s = "EscNone"
- }
- if em&EscMask == EscHeap {
+ case EscHeap:
s = "EscHeap"
- }
- if em&EscMask == EscReturn {
+ case EscReturn:
s = "EscReturn"
}
if em&EscContentEscapes != 0 {
@@ -1554,20 +1552,20 @@ func (e *EscState) esccall(call *Node, parent *Node) {
call.Right = arg
}
e.escassignWhyWhere(n, arg, "arg to recursive call", call) // TODO this message needs help.
- if arg != args[0] {
- // "..." arguments are untracked
- for _, a := range args {
- if Debug['m'] > 3 {
- fmt.Printf("%v::esccall:: ... <- %S, untracked\n", linestr(lineno), a)
- }
- e.escassignSinkWhyWhere(arg, a, "... arg to recursive call", call)
- }
- // No more PPARAM processing, but keep
- // going for PPARAMOUT.
- args = nil
+ if arg == args[0] {
+ args = args[1:]
continue
}
- args = args[1:]
+ // "..." arguments are untracked
+ for _, a := range args {
+ if Debug['m'] > 3 {
+ fmt.Printf("%v::esccall:: ... <- %S, untracked\n", linestr(lineno), a)
+ }
+ e.escassignSinkWhyWhere(arg, a, "... arg to recursive call", call)
+ }
+ // No more PPARAM processing, but keep
+ // going for PPARAMOUT.
+ args = nil
case PPARAMOUT:
cE.Retval.Append(n)
diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go
index a92a41c5ceb..c5d5c52205d 100644
--- a/src/cmd/compile/internal/gc/export.go
+++ b/src/cmd/compile/internal/gc/export.go
@@ -9,6 +9,7 @@ import (
"bytes"
"cmd/compile/internal/types"
"cmd/internal/bio"
+ "cmd/internal/src"
"fmt"
"unicode"
"unicode/utf8"
@@ -18,7 +19,7 @@ var (
Debug_export int // if set, print debugging information about export data
)
-func exportf(format string, args ...interface{}) {
+func exportf(bout *bio.Writer, format string, args ...interface{}) {
fmt.Fprintf(bout, format, args...)
if Debug_export != 0 {
fmt.Printf(format, args...)
@@ -82,7 +83,7 @@ func autoexport(n *Node, ctxt Class) {
if (ctxt != PEXTERN && ctxt != PFUNC) || dclcontext != PEXTERN {
return
}
- if n.Type != nil && n.Type.IsKind(TFUNC) && n.Type.Recv() != nil { // method
+ if n.Type != nil && n.Type.IsKind(TFUNC) && n.IsMethod() {
return
}
@@ -111,10 +112,10 @@ func reexportdep(n *Node) {
switch n.Op {
case ONAME:
switch n.Class() {
- // methods will be printed along with their type
- // nodes for T.Method expressions
case PFUNC:
- if n.Left != nil && n.Left.Op == OTYPE {
+ // methods will be printed along with their type
+ // nodes for T.Method expressions
+ if n.isMethodExpression() {
break
}
@@ -221,14 +222,14 @@ func (x methodbyname) Len() int { return len(x) }
func (x methodbyname) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x methodbyname) Less(i, j int) bool { return x[i].Sym.Name < x[j].Sym.Name }
-func dumpexport() {
+func dumpexport(bout *bio.Writer) {
if buildid != "" {
- exportf("build id %q\n", buildid)
+ exportf(bout, "build id %q\n", buildid)
}
size := 0 // size of export section without enclosing markers
// The linker also looks for the $$ marker - use char after $$ to distinguish format.
- exportf("\n$$B\n") // indicate binary export format
+ exportf(bout, "\n$$B\n") // indicate binary export format
if debugFormat {
// save a copy of the export data
var copy bytes.Buffer
@@ -252,7 +253,7 @@ func dumpexport() {
} else {
size = export(bout.Writer, Debug_export != 0)
}
- exportf("\n$$\n")
+ exportf(bout, "\n$$\n")
if Debug_export != 0 {
fmt.Printf("export data size = %d bytes\n", size)
@@ -280,12 +281,12 @@ func importsym(pkg *types.Pkg, s *types.Sym, op Op) {
// pkgtype returns the named type declared by symbol s.
// If no such type has been declared yet, a forward declaration is returned.
// pkg is the package being imported
-func pkgtype(pkg *types.Pkg, s *types.Sym) *types.Type {
+func pkgtype(pos src.XPos, pkg *types.Pkg, s *types.Sym) *types.Type {
importsym(pkg, s, OTYPE)
if asNode(s.Def) == nil || asNode(s.Def).Op != OTYPE {
t := types.New(TFORW)
t.Sym = s
- s.Def = asTypesNode(typenod(t))
+ s.Def = asTypesNode(typenodl(pos, t))
asNode(s.Def).Name = new(Name)
}
@@ -326,7 +327,7 @@ func importconst(pkg *types.Pkg, s *types.Sym, t *types.Type, n *Node) {
// importvar declares symbol s as an imported variable with type t.
// pkg is the package being imported
-func importvar(pkg *types.Pkg, s *types.Sym, t *types.Type) {
+func importvar(pos src.XPos, pkg *types.Pkg, s *types.Sym, t *types.Type) {
importsym(pkg, s, ONAME)
if asNode(s.Def) != nil && asNode(s.Def).Op == ONAME {
if eqtype(t, asNode(s.Def).Type) {
@@ -335,7 +336,7 @@ func importvar(pkg *types.Pkg, s *types.Sym, t *types.Type) {
yyerror("inconsistent definition for var %v during import\n\t%v (in %q)\n\t%v (in %q)", s, asNode(s.Def).Type, s.Importdef.Path, t, pkg.Path)
}
- n := newname(s)
+ n := newnamel(pos, s)
s.Importdef = pkg
n.Type = t
declare(n, PEXTERN)
@@ -347,7 +348,7 @@ func importvar(pkg *types.Pkg, s *types.Sym, t *types.Type) {
// importalias declares symbol s as an imported type alias with type t.
// pkg is the package being imported
-func importalias(pkg *types.Pkg, s *types.Sym, t *types.Type) {
+func importalias(pos src.XPos, pkg *types.Pkg, s *types.Sym, t *types.Type) {
importsym(pkg, s, OTYPE)
if asNode(s.Def) != nil && asNode(s.Def).Op == OTYPE {
if eqtype(t, asNode(s.Def).Type) {
@@ -356,7 +357,7 @@ func importalias(pkg *types.Pkg, s *types.Sym, t *types.Type) {
yyerror("inconsistent definition for type alias %v during import\n\t%v (in %q)\n\t%v (in %q)", s, asNode(s.Def).Type, s.Importdef.Path, t, pkg.Path)
}
- n := newname(s)
+ n := newnamel(pos, s)
n.Op = OTYPE
s.Importdef = pkg
n.Type = t
@@ -386,10 +387,10 @@ func dumpasmhdr() {
if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() {
break
}
- fmt.Fprintf(b, "#define %s__size %d\n", t.Sym.Name, int(t.Width))
- for _, t := range t.Fields().Slice() {
- if !t.Sym.IsBlank() {
- fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, t.Sym.Name, int(t.Offset))
+ fmt.Fprintf(b, "#define %s__size %d\n", n.Sym.Name, int(t.Width))
+ for _, f := range t.Fields().Slice() {
+ if !f.Sym.IsBlank() {
+ fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, f.Sym.Name, int(f.Offset))
}
}
}
diff --git a/src/cmd/compile/internal/gc/float_test.go b/src/cmd/compile/internal/gc/float_test.go
index f906f3a228c..4cb9532e556 100644
--- a/src/cmd/compile/internal/gc/float_test.go
+++ b/src/cmd/compile/internal/gc/float_test.go
@@ -4,7 +4,10 @@
package gc
-import "testing"
+import (
+ "math"
+ "testing"
+)
// For GO386=387, make sure fucomi* opcodes are not used
// for comparison operations.
@@ -31,6 +34,107 @@ func TestFloatCompare(t *testing.T) {
}
}
+func TestFloatCompareFolded(t *testing.T) {
+ // float64 comparisons
+ d1, d3, d5, d9 := float64(1), float64(3), float64(5), float64(9)
+ if d3 == d5 {
+ t.Errorf("d3 == d5 returned true")
+ }
+ if d3 != d3 {
+ t.Errorf("d3 != d3 returned true")
+ }
+ if d3 > d5 {
+ t.Errorf("d3 > d5 returned true")
+ }
+ if d3 >= d9 {
+ t.Errorf("d3 >= d9 returned true")
+ }
+ if d5 < d1 {
+ t.Errorf("d5 < d1 returned true")
+ }
+ if d9 <= d1 {
+ t.Errorf("d9 <= d1 returned true")
+ }
+ if math.NaN() == math.NaN() {
+ t.Errorf("math.NaN() == math.NaN() returned true")
+ }
+ if math.NaN() >= math.NaN() {
+ t.Errorf("math.NaN() >= math.NaN() returned true")
+ }
+ if math.NaN() <= math.NaN() {
+ t.Errorf("math.NaN() <= math.NaN() returned true")
+ }
+ if math.Copysign(math.NaN(), -1) < math.NaN() {
+ t.Errorf("math.Copysign(math.NaN(), -1) < math.NaN() returned true")
+ }
+ if math.Inf(1) != math.Inf(1) {
+ t.Errorf("math.Inf(1) != math.Inf(1) returned true")
+ }
+ if math.Inf(-1) != math.Inf(-1) {
+ t.Errorf("math.Inf(-1) != math.Inf(-1) returned true")
+ }
+ if math.Copysign(0, -1) != 0 {
+ t.Errorf("math.Copysign(0, -1) != 0 returned true")
+ }
+ if math.Copysign(0, -1) < 0 {
+ t.Errorf("math.Copysign(0, -1) < 0 returned true")
+ }
+ if 0 > math.Copysign(0, -1) {
+ t.Errorf("0 > math.Copysign(0, -1) returned true")
+ }
+
+ // float32 comparisons
+ s1, s3, s5, s9 := float32(1), float32(3), float32(5), float32(9)
+ if s3 == s5 {
+ t.Errorf("s3 == s5 returned true")
+ }
+ if s3 != s3 {
+ t.Errorf("s3 != s3 returned true")
+ }
+ if s3 > s5 {
+ t.Errorf("s3 > s5 returned true")
+ }
+ if s3 >= s9 {
+ t.Errorf("s3 >= s9 returned true")
+ }
+ if s5 < s1 {
+ t.Errorf("s5 < s1 returned true")
+ }
+ if s9 <= s1 {
+ t.Errorf("s9 <= s1 returned true")
+ }
+ sPosNaN, sNegNaN := float32(math.NaN()), float32(math.Copysign(math.NaN(), -1))
+ if sPosNaN == sPosNaN {
+ t.Errorf("sPosNaN == sPosNaN returned true")
+ }
+ if sPosNaN >= sPosNaN {
+ t.Errorf("sPosNaN >= sPosNaN returned true")
+ }
+ if sPosNaN <= sPosNaN {
+ t.Errorf("sPosNaN <= sPosNaN returned true")
+ }
+ if sNegNaN < sPosNaN {
+ t.Errorf("sNegNaN < sPosNaN returned true")
+ }
+ sPosInf, sNegInf := float32(math.Inf(1)), float32(math.Inf(-1))
+ if sPosInf != sPosInf {
+ t.Errorf("sPosInf != sPosInf returned true")
+ }
+ if sNegInf != sNegInf {
+ t.Errorf("sNegInf != sNegInf returned true")
+ }
+ sNegZero := float32(math.Copysign(0, -1))
+ if sNegZero != 0 {
+ t.Errorf("sNegZero != 0 returned true")
+ }
+ if sNegZero < 0 {
+ t.Errorf("sNegZero < 0 returned true")
+ }
+ if 0 > sNegZero {
+ t.Errorf("0 > sNegZero returned true")
+ }
+}
+
// For GO386=387, make sure fucomi* opcodes are not used
// for float->int conversions.
@@ -95,6 +199,16 @@ func cvt12(a float32) uint {
return uint(a)
}
+//go:noinline
+func f2i64p(v float64) *int64 {
+ return ip64(int64(v / 0.1))
+}
+
+//go:noinline
+func ip64(v int64) *int64 {
+ return &v
+}
+
func TestFloatConvert(t *testing.T) {
if got := cvt1(3.5); got != 3 {
t.Errorf("cvt1 got %d, wanted 3", got)
@@ -132,6 +246,120 @@ func TestFloatConvert(t *testing.T) {
if got := cvt12(3.5); got != 3 {
t.Errorf("cvt12 got %d, wanted 3", got)
}
+ if got := *f2i64p(10); got != 100 {
+ t.Errorf("f2i64p got %d, wanted 100", got)
+ }
+}
+
+func TestFloatConvertFolded(t *testing.T) {
+ // Assign constants to variables so that they are (hopefully) constant folded
+ // by the SSA backend rather than the frontend.
+ u64, u32, u16, u8 := uint64(1<<63), uint32(1<<31), uint16(1<<15), uint8(1<<7)
+ i64, i32, i16, i8 := int64(-1<<63), int32(-1<<31), int16(-1<<15), int8(-1<<7)
+ du64, du32, du16, du8 := float64(1<<63), float64(1<<31), float64(1<<15), float64(1<<7)
+ di64, di32, di16, di8 := float64(-1<<63), float64(-1<<31), float64(-1<<15), float64(-1<<7)
+ su64, su32, su16, su8 := float32(1<<63), float32(1<<31), float32(1<<15), float32(1<<7)
+ si64, si32, si16, si8 := float32(-1<<63), float32(-1<<31), float32(-1<<15), float32(-1<<7)
+
+ // integer to float
+ if float64(u64) != du64 {
+ t.Errorf("float64(u64) != du64")
+ }
+ if float64(u32) != du32 {
+ t.Errorf("float64(u32) != du32")
+ }
+ if float64(u16) != du16 {
+ t.Errorf("float64(u16) != du16")
+ }
+ if float64(u8) != du8 {
+ t.Errorf("float64(u8) != du8")
+ }
+ if float64(i64) != di64 {
+ t.Errorf("float64(i64) != di64")
+ }
+ if float64(i32) != di32 {
+ t.Errorf("float64(i32) != di32")
+ }
+ if float64(i16) != di16 {
+ t.Errorf("float64(i16) != di16")
+ }
+ if float64(i8) != di8 {
+ t.Errorf("float64(i8) != di8")
+ }
+ if float32(u64) != su64 {
+ t.Errorf("float32(u64) != su64")
+ }
+ if float32(u32) != su32 {
+ t.Errorf("float32(u32) != su32")
+ }
+ if float32(u16) != su16 {
+ t.Errorf("float32(u16) != su16")
+ }
+ if float32(u8) != su8 {
+ t.Errorf("float32(u8) != su8")
+ }
+ if float32(i64) != si64 {
+ t.Errorf("float32(i64) != si64")
+ }
+ if float32(i32) != si32 {
+ t.Errorf("float32(i32) != si32")
+ }
+ if float32(i16) != si16 {
+ t.Errorf("float32(i16) != si16")
+ }
+ if float32(i8) != si8 {
+ t.Errorf("float32(i8) != si8")
+ }
+
+ // float to integer
+ if uint64(du64) != u64 {
+ t.Errorf("uint64(du64) != u64")
+ }
+ if uint32(du32) != u32 {
+ t.Errorf("uint32(du32) != u32")
+ }
+ if uint16(du16) != u16 {
+ t.Errorf("uint16(du16) != u16")
+ }
+ if uint8(du8) != u8 {
+ t.Errorf("uint8(du8) != u8")
+ }
+ if int64(di64) != i64 {
+ t.Errorf("int64(di64) != i64")
+ }
+ if int32(di32) != i32 {
+ t.Errorf("int32(di32) != i32")
+ }
+ if int16(di16) != i16 {
+ t.Errorf("int16(di16) != i16")
+ }
+ if int8(di8) != i8 {
+ t.Errorf("int8(di8) != i8")
+ }
+ if uint64(su64) != u64 {
+ t.Errorf("uint64(su64) != u64")
+ }
+ if uint32(su32) != u32 {
+ t.Errorf("uint32(su32) != u32")
+ }
+ if uint16(su16) != u16 {
+ t.Errorf("uint16(su16) != u16")
+ }
+ if uint8(su8) != u8 {
+ t.Errorf("uint8(su8) != u8")
+ }
+ if int64(si64) != i64 {
+ t.Errorf("int64(si64) != i64")
+ }
+ if int32(si32) != i32 {
+ t.Errorf("int32(si32) != i32")
+ }
+ if int16(si16) != i16 {
+ t.Errorf("int16(si16) != i16")
+ }
+ if int8(si8) != i8 {
+ t.Errorf("int8(si8) != i8")
+ }
}
var sinkFloat float64
diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go
index 2f56d8ab51c..4b2fdb0dca7 100644
--- a/src/cmd/compile/internal/gc/fmt.go
+++ b/src/cmd/compile/internal/gc/fmt.go
@@ -204,11 +204,6 @@ var goopnames = []string{
OSUB: "-",
OSWITCH: "switch",
OXOR: "^",
- OXFALL: "fallthrough",
-}
-
-func (o Op) String() string {
- return fmt.Sprint(o)
}
func (o Op) GoString() string {
@@ -227,28 +222,14 @@ func (o Op) format(s fmt.State, verb rune, mode fmtMode) {
func (o Op) oconv(s fmt.State, flag FmtFlag, mode fmtMode) {
if flag&FmtSharp != 0 || mode != FDbg {
- if o >= 0 && int(o) < len(goopnames) && goopnames[o] != "" {
+ if int(o) < len(goopnames) && goopnames[o] != "" {
fmt.Fprint(s, goopnames[o])
return
}
}
- if o >= 0 && int(o) < len(opnames) && opnames[o] != "" {
- fmt.Fprint(s, opnames[o])
- return
- }
-
- fmt.Fprintf(s, "O-%d", int(o))
-}
-
-var classnames = []string{
- "Pxxx",
- "PEXTERN",
- "PAUTO",
- "PAUTOHEAP",
- "PPARAM",
- "PPARAMOUT",
- "PFUNC",
+ // 'o.String()' instead of just 'o' to avoid infinite recursion
+ fmt.Fprint(s, o.String())
}
type (
@@ -448,11 +429,7 @@ func (n *Node) jconv(s fmt.State, flag FmtFlag) {
}
if n.Class() != 0 {
- if int(n.Class()) < len(classnames) {
- fmt.Fprintf(s, " class(%s)", classnames[n.Class()])
- } else {
- fmt.Fprintf(s, " class(%d?)", n.Class())
- }
+ fmt.Fprintf(s, " class(%v)", n.Class())
}
if n.Colas() {
@@ -814,7 +791,7 @@ func typefmt(t *types.Type, flag FmtFlag, mode fmtMode, depth int) string {
}
buf = append(buf, tmodeString(t.Params(), mode, depth)...)
- switch t.Results().NumFields() {
+ switch t.NumResults() {
case 0:
// nothing to do
@@ -1080,11 +1057,7 @@ func (n *Node) stmtfmt(s fmt.State, mode fmtMode) {
}
mode.Fprintf(s, ": %v", n.Nbody)
- case OBREAK,
- OCONTINUE,
- OGOTO,
- OFALL,
- OXFALL:
+ case OBREAK, OCONTINUE, OGOTO, OFALL:
if n.Left != nil {
mode.Fprintf(s, "%#v %v", n.Op, n.Left)
} else {
@@ -1219,7 +1192,6 @@ var opprec = []int{
OSELECT: -1,
OSWITCH: -1,
OXCASE: -1,
- OXFALL: -1,
OEND: 0,
}
@@ -1543,13 +1515,11 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
n.Right.exprfmt(s, nprec+1, mode)
case OADDSTR:
- i := 0
- for _, n1 := range n.List.Slice() {
+ for i, n1 := range n.List.Slice() {
if i != 0 {
fmt.Fprint(s, " + ")
}
n1.exprfmt(s, nprec, mode)
- i++
}
case OCMPSTR, OCMPIFACE:
diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go
index 626d282c184..f9b4584cf6b 100644
--- a/src/cmd/compile/internal/gc/gen.go
+++ b/src/cmd/compile/internal/gc/gen.go
@@ -11,7 +11,7 @@ import (
"strconv"
)
-func Sysfunc(name string) *obj.LSym {
+func sysfunc(name string) *obj.LSym {
return Runtimepkg.Lookup(name).Linksym()
}
@@ -39,7 +39,7 @@ func autotmpname(n int) string {
}
// make a new Node off the books
-func tempnamel(pos src.XPos, curfn *Node, nn *Node, t *types.Type) {
+func tempAt(pos src.XPos, curfn *Node, t *types.Type) *Node {
if curfn == nil {
Fatalf("no curfn for tempname")
}
@@ -61,23 +61,15 @@ func tempnamel(pos src.XPos, curfn *Node, nn *Node, t *types.Type) {
n.SetClass(PAUTO)
n.Esc = EscNever
n.Name.Curfn = curfn
+ n.Name.SetUsed(true)
n.Name.SetAutoTemp(true)
curfn.Func.Dcl = append(curfn.Func.Dcl, n)
dowidth(t)
- *nn = *n
+
+ return n.Orig
}
func temp(t *types.Type) *Node {
- var n Node
- tempnamel(lineno, Curfn, &n, t)
- asNode(n.Sym.Def).Name.SetUsed(true)
- return n.Orig
-}
-
-func tempAt(pos src.XPos, curfn *Node, t *types.Type) *Node {
- var n Node
- tempnamel(pos, curfn, &n, t)
- asNode(n.Sym.Def).Name.SetUsed(true)
- return n.Orig
+ return tempAt(lineno, Curfn, t)
}
diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go
index b1ead93c346..dc94cf4f98b 100644
--- a/src/cmd/compile/internal/gc/go.go
+++ b/src/cmd/compile/internal/gc/go.go
@@ -7,7 +7,6 @@ package gc
import (
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
- "cmd/internal/bio"
"cmd/internal/obj"
"cmd/internal/src"
"sync"
@@ -15,7 +14,7 @@ import (
const (
BADWIDTH = types.BADWIDTH
- MaxStackVarSize = 10 * 1024 * 1024
+ maxStackVarSize = 10 * 1024 * 1024
)
// isRuntimePkg reports whether p is package runtime.
@@ -85,8 +84,6 @@ var outfile string
var linkobj string
var dolinkobj bool
-var bout *bio.Writer
-
// nerrors is the number of compiler errors reported
// since the last call to saveerrors.
var nerrors int
@@ -201,7 +198,7 @@ var compiling_runtime bool
// Compiling the standard library
var compiling_std bool
-var compiling_wrappers int
+var compiling_wrappers bool
var use_writebarrier bool
@@ -222,6 +219,11 @@ var instrumenting bool
// Whether we are tracking lexical scopes for DWARF.
var trackScopes bool
+// Controls generation of DWARF inlined instance records. Zero
+// disables, 1 emits inlined routines but suppresses var info,
+// and 2 emits inlined routines with tracking of formals/locals.
+var genDwarfInline int
+
var debuglive int
var Ctxt *obj.Link
@@ -241,9 +243,10 @@ var autogeneratedPos src.XPos
type Arch struct {
LinkArch *obj.LinkArch
- REGSP int
- MAXWIDTH int64
- Use387 bool // should 386 backend use 387 FP instructions instead of sse2.
+ REGSP int
+ MAXWIDTH int64
+ Use387 bool // should 386 backend use 387 FP instructions instead of sse2.
+ SoftFloat bool
PadFrame func(int64) int64
ZeroRange func(*Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
@@ -290,6 +293,7 @@ var (
goschedguarded,
writeBarrier,
writebarrierptr,
+ gcWriteBarrier,
typedmemmove,
typedmemclr,
Udiv *obj.LSym
diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go
index b25709b9999..d074900d983 100644
--- a/src/cmd/compile/internal/gc/gsubr.go
+++ b/src/cmd/compile/internal/gc/gsubr.go
@@ -37,7 +37,7 @@ import (
"cmd/internal/src"
)
-var sharedProgArray *[10000]obj.Prog = new([10000]obj.Prog) // *T instead of T to work around issue 19839
+var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839
// Progs accumulates Progs for a function and converts them into machine code.
type Progs struct {
@@ -70,13 +70,13 @@ func newProgs(fn *Node, worker int) *Progs {
}
func (pp *Progs) NewProg() *obj.Prog {
+ var p *obj.Prog
if pp.cacheidx < len(pp.progcache) {
- p := &pp.progcache[pp.cacheidx]
- p.Ctxt = Ctxt
+ p = &pp.progcache[pp.cacheidx]
pp.cacheidx++
- return p
+ } else {
+ p = new(obj.Prog)
}
- p := new(obj.Prog)
p.Ctxt = Ctxt
return p
}
@@ -84,7 +84,7 @@ func (pp *Progs) NewProg() *obj.Prog {
// Flush converts from pp to machine code.
func (pp *Progs) Flush() {
plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.curfn}
- obj.Flushplist(Ctxt, plist, pp.NewProg)
+ obj.Flushplist(Ctxt, plist, pp.NewProg, myimportpath)
}
// Free clears pp and any associated resources.
diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go
index 93ae2410cd9..180cbcfda2b 100644
--- a/src/cmd/compile/internal/gc/init.go
+++ b/src/cmd/compile/internal/gc/init.go
@@ -198,7 +198,7 @@ func fninit(n []*Node) {
exportsym(fn.Func.Nname)
fn.Nbody.Set(r)
- funcbody(fn)
+ funcbody()
Curfn = fn
fn = typecheck(fn, Etop)
@@ -208,8 +208,7 @@ func fninit(n []*Node) {
}
func (n *Node) checkInitFuncSignature() {
- ft := n.Type.FuncType()
- if ft.Receiver.Fields().Len()+ft.Params.Fields().Len()+ft.Results.Fields().Len() > 0 {
+ if n.Type.NumRecvs()+n.Type.NumParams()+n.Type.NumResults() > 0 {
Fatalf("init function cannot have receiver, params, or results: %v (%v)", n, n.Type)
}
}
diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go
index dfa13e3c3b3..0e8ef196af7 100644
--- a/src/cmd/compile/internal/gc/inl.go
+++ b/src/cmd/compile/internal/gc/inl.go
@@ -8,29 +8,34 @@
// expand calls to inlinable functions.
//
// The debug['l'] flag controls the aggressiveness. Note that main() swaps level 0 and 1,
-// making 1 the default and -l disable. -ll and more is useful to flush out bugs.
-// These additional levels (beyond -l) may be buggy and are not supported.
+// making 1 the default and -l disable. Additional levels (beyond -l) may be buggy and
+// are not supported.
// 0: disabled
// 1: 80-nodes leaf functions, oneliners, lazy typechecking (default)
-// 2: early typechecking of all imported bodies
+// 2: (unassigned)
// 3: allow variadic functions
-// 4: allow non-leaf functions , (breaks runtime.Caller)
+// 4: allow non-leaf functions
//
-// At some point this may get another default and become switch-offable with -N.
+// At some point this may get another default and become switch-offable with -N.
//
-// The debug['m'] flag enables diagnostic output. a single -m is useful for verifying
-// which calls get inlined or not, more is for debugging, and may go away at any point.
+// The -d typcheckinl flag enables early typechecking of all imported bodies,
+// which is useful to flush out bugs.
+//
+// The debug['m'] flag enables diagnostic output. a single -m is useful for verifying
+// which calls get inlined or not, more is for debugging, and may go away at any point.
//
// TODO:
// - inline functions with ... args
-// - handle T.meth(f()) with func f() (t T, arg, arg, )
package gc
import (
"cmd/compile/internal/types"
+ "cmd/internal/obj"
"cmd/internal/src"
"fmt"
+ "sort"
+ "strings"
)
// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
@@ -118,6 +123,15 @@ func caninl(fn *Node) {
return
}
+ // The nowritebarrierrec checker currently works at function
+ // granularity, so inlining yeswritebarrierrec functions can
+ // confuse it (#22342). As a workaround, disallow inlining
+ // them for now.
+ if fn.Func.Pragma&Yeswritebarrierrec != 0 {
+ reason = "marked go:yeswritebarrierrec"
+ return
+ }
+
// If fn has no body (is defined outside of Go), cannot inline it.
if fn.Nbody.Len() == 0 {
reason = "no function body"
@@ -150,6 +164,12 @@ func caninl(fn *Node) {
return
}
+ n := fn.Func.Nname
+ if n.Func.InlinabilityChecked() {
+ return
+ }
+ defer n.Func.SetInlinabilityChecked(true)
+
const maxBudget = 80
visitor := hairyVisitor{budget: maxBudget}
if visitor.visitList(fn.Nbody) {
@@ -157,15 +177,13 @@ func caninl(fn *Node) {
return
}
if visitor.budget < 0 {
- reason = "function too complex"
+ reason = fmt.Sprintf("function too complex: cost %d exceeds budget %d", maxBudget-visitor.budget, maxBudget)
return
}
savefn := Curfn
Curfn = fn
- n := fn.Func.Nname
-
n.Func.Inl.Set(fn.Nbody.Slice())
fn.Nbody.Set(inlcopylist(n.Func.Inl.Slice()))
inldcl := inlcopylist(n.Name.Defn.Func.Dcl)
@@ -185,6 +203,43 @@ func caninl(fn *Node) {
Curfn = savefn
}
+// inlFlood marks n's inline body for export and recursively ensures
+// all called functions are marked too.
+func inlFlood(n *Node) {
+ if n == nil {
+ return
+ }
+ if n.Op != ONAME || n.Class() != PFUNC {
+ Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op, n.Class())
+ }
+ if n.Func == nil {
+ // TODO(mdempsky): Should init have a Func too?
+ if n.Sym.Name == "init" {
+ return
+ }
+ Fatalf("inlFlood: missing Func on %v", n)
+ }
+ if n.Func.Inl.Len() == 0 {
+ return
+ }
+
+ if n.Func.ExportInline() {
+ return
+ }
+ n.Func.SetExportInline(true)
+
+ typecheckinl(n)
+
+ // Recursively flood any functions called by this one.
+ inspectList(n.Func.Inl, func(n *Node) bool {
+ switch n.Op {
+ case OCALLFUNC, OCALLMETH:
+ inlFlood(asNode(n.Left.Type.Nname()))
+ }
+ return true
+ })
+}
+
// hairyVisitor visits a function body to determine its inlining
// hairiness and whether or not it can be inlined.
type hairyVisitor struct {
@@ -228,8 +283,28 @@ func (v *hairyVisitor) visit(n *Node) bool {
v.budget -= fn.InlCost
break
}
+ if n.Left.Op == OCLOSURE {
+ if fn := inlinableClosure(n.Left); fn != nil {
+ v.budget -= fn.Func.InlCost
+ break
+ }
+ } else if n.Left.Op == ONAME && n.Left.Name != nil && n.Left.Name.Defn != nil {
+ // NB: this case currently cannot trigger since closure definition
+ // prevents inlining
+ // NB: ideally we would also handle captured variables defined as
+ // closures in the outer scope this brings us back to the idea of
+ // function value propagation, which if available would both avoid
+ // the "reassigned" check and neatly handle multiple use cases in a
+ // single code path
+ if d := n.Left.Name.Defn; d.Op == OAS && d.Right.Op == OCLOSURE {
+ if fn := inlinableClosure(d.Right); fn != nil {
+ v.budget -= fn.Func.InlCost
+ break
+ }
+ }
+ }
- if n.isMethodCalledAsFunction() {
+ if n.Left.isMethodExpression() {
if d := asNode(n.Left.Sym.Def); d != nil && d.Func.Inl.Len() != 0 {
v.budget -= d.Func.InlCost
break
@@ -279,6 +354,10 @@ func (v *hairyVisitor) visit(n *Node) bool {
ORETJMP:
v.reason = "unhandled op " + n.Op.String()
return true
+
+ case ODCLCONST, OEMPTY, OFALL, OLABEL:
+ // These nodes don't produce code; omit from inlining budget.
+ return false
}
v.budget--
@@ -293,8 +372,8 @@ func (v *hairyVisitor) visit(n *Node) bool {
v.budget -= 2
}
- if v.budget < 0 {
- v.reason = "function too complex"
+ // When debugging, don't stop early, to get full cost of inlining this function
+ if v.budget < 0 && Debug['m'] < 2 {
return true
}
@@ -517,8 +596,39 @@ func inlnode(n *Node) *Node {
}
if n.Left.Func != nil && n.Left.Func.Inl.Len() != 0 && !isIntrinsicCall(n) { // normal case
n = mkinlcall(n, n.Left, n.Isddd())
- } else if n.isMethodCalledAsFunction() && asNode(n.Left.Sym.Def) != nil {
+ } else if n.Left.isMethodExpression() && asNode(n.Left.Sym.Def) != nil {
n = mkinlcall(n, asNode(n.Left.Sym.Def), n.Isddd())
+ } else if n.Left.Op == OCLOSURE {
+ if f := inlinableClosure(n.Left); f != nil {
+ n = mkinlcall(n, f, n.Isddd())
+ }
+ } else if n.Left.Op == ONAME && n.Left.Name != nil && n.Left.Name.Defn != nil {
+ if d := n.Left.Name.Defn; d.Op == OAS && d.Right.Op == OCLOSURE {
+ if f := inlinableClosure(d.Right); f != nil {
+ // NB: this check is necessary to prevent indirect re-assignment of the variable
+ // having the address taken after the invocation or only used for reads is actually fine
+ // but we have no easy way to distinguish the safe cases
+ if d.Left.Addrtaken() {
+ if Debug['m'] > 1 {
+ fmt.Printf("%v: cannot inline escaping closure variable %v\n", n.Line(), n.Left)
+ }
+ break
+ }
+
+ // ensure the variable is never re-assigned
+ if unsafe, a := reassigned(n.Left); unsafe {
+ if Debug['m'] > 1 {
+ if a != nil {
+ fmt.Printf("%v: cannot inline re-assigned closure variable at %v: %v\n", n.Line(), a.Line(), a)
+ } else {
+ fmt.Printf("%v: cannot inline global closure variable %v\n", n.Line(), n.Left)
+ }
+ }
+ break
+ }
+ n = mkinlcall(n, f, n.Isddd())
+ }
+ }
}
case OCALLMETH:
@@ -542,6 +652,98 @@ func inlnode(n *Node) *Node {
return n
}
+// inlinableClosure takes an OCLOSURE node and follows linkage to the matching ONAME with
+// the inlinable body. Returns nil if the function is not inlinable.
+func inlinableClosure(n *Node) *Node {
+ c := n.Func.Closure
+ caninl(c)
+ f := c.Func.Nname
+ if f == nil || f.Func.Inl.Len() == 0 {
+ return nil
+ }
+ return f
+}
+
+// reassigned takes an ONAME node, walks the function in which it is defined, and returns a boolean
+// indicating whether the name has any assignments other than its declaration.
+// The second return value is the first such assignment encountered in the walk, if any. It is mostly
+// useful for -m output documenting the reason for inhibited optimizations.
+// NB: global variables are always considered to be re-assigned.
+// TODO: handle initial declaration not including an assignment and followed by a single assignment?
+func reassigned(n *Node) (bool, *Node) {
+ if n.Op != ONAME {
+ Fatalf("reassigned %v", n)
+ }
+ // no way to reliably check for no-reassignment of globals, assume it can be
+ if n.Name.Curfn == nil {
+ return true, nil
+ }
+ f := n.Name.Curfn
+ // There just might be a good reason for this although this can be pretty surprising:
+ // local variables inside a closure have Curfn pointing to the OCLOSURE node instead
+ // of the corresponding ODCLFUNC.
+ // We need to walk the function body to check for reassignments so we follow the
+ // linkage to the ODCLFUNC node as that is where body is held.
+ if f.Op == OCLOSURE {
+ f = f.Func.Closure
+ }
+ v := reassignVisitor{name: n}
+ a := v.visitList(f.Nbody)
+ return a != nil, a
+}
+
+type reassignVisitor struct {
+ name *Node
+}
+
+func (v *reassignVisitor) visit(n *Node) *Node {
+ if n == nil {
+ return nil
+ }
+ switch n.Op {
+ case OAS:
+ if n.Left == v.name && n != v.name.Name.Defn {
+ return n
+ }
+ return nil
+ case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE:
+ for _, p := range n.List.Slice() {
+ if p == v.name && n != v.name.Name.Defn {
+ return n
+ }
+ }
+ return nil
+ }
+ if a := v.visit(n.Left); a != nil {
+ return a
+ }
+ if a := v.visit(n.Right); a != nil {
+ return a
+ }
+ if a := v.visitList(n.List); a != nil {
+ return a
+ }
+ if a := v.visitList(n.Rlist); a != nil {
+ return a
+ }
+ if a := v.visitList(n.Ninit); a != nil {
+ return a
+ }
+ if a := v.visitList(n.Nbody); a != nil {
+ return a
+ }
+ return nil
+}
+
+func (v *reassignVisitor) visitList(l Nodes) *Node {
+ for _, n := range l.Slice() {
+ if a := v.visit(n); a != nil {
+ return a
+ }
+ }
+ return nil
+}
+
// The result of mkinlcall MUST be assigned back to n, e.g.
// n.Left = mkinlcall(n.Left, fn, isddd)
func mkinlcall(n *Node, fn *Node, isddd bool) *Node {
@@ -580,7 +782,7 @@ var inlgen int
// parameters.
// The result of mkinlcall1 MUST be assigned back to n, e.g.
// n.Left = mkinlcall1(n.Left, fn, isddd)
-func mkinlcall1(n *Node, fn *Node, isddd bool) *Node {
+func mkinlcall1(n, fn *Node, isddd bool) *Node {
if fn.Func.Inl.Len() == 0 {
// No inlinable body.
return n
@@ -591,7 +793,7 @@ func mkinlcall1(n *Node, fn *Node, isddd bool) *Node {
return n
}
- if Debug['l'] < 2 {
+ if Debug_typecheckinl == 0 {
typecheckinl(fn)
}
@@ -607,16 +809,56 @@ func mkinlcall1(n *Node, fn *Node, isddd bool) *Node {
ninit := n.Ninit
+ // Make temp names to use instead of the originals.
+ inlvars := make(map[*Node]*Node)
+
+ // record formals/locals for later post-processing
+ var inlfvars []*Node
+
// Find declarations corresponding to inlineable body.
var dcl []*Node
if fn.Name.Defn != nil {
dcl = fn.Func.Inldcl.Slice() // local function
+
+ // handle captured variables when inlining closures
+ if c := fn.Name.Defn.Func.Closure; c != nil {
+ for _, v := range c.Func.Cvars.Slice() {
+ if v.Op == OXXX {
+ continue
+ }
+
+ o := v.Name.Param.Outer
+ // make sure the outer param matches the inlining location
+ // NB: if we enabled inlining of functions containing OCLOSURE or refined
+ // the reassigned check via some sort of copy propagation this would most
+ // likely need to be changed to a loop to walk up to the correct Param
+ if o == nil || (o.Name.Curfn != Curfn && o.Name.Curfn.Func.Closure != Curfn) {
+ Fatalf("%v: unresolvable capture %v %v\n", n.Line(), fn, v)
+ }
+
+ if v.Name.Byval() {
+ iv := typecheck(inlvar(v), Erv)
+ ninit.Append(nod(ODCL, iv, nil))
+ ninit.Append(typecheck(nod(OAS, iv, o), Etop))
+ inlvars[v] = iv
+ } else {
+ addr := newname(lookup("&" + v.Sym.Name))
+ addr.Type = types.NewPtr(v.Type)
+ ia := typecheck(inlvar(addr), Erv)
+ ninit.Append(nod(ODCL, ia, nil))
+ ninit.Append(typecheck(nod(OAS, ia, nod(OADDR, o, nil)), Etop))
+ inlvars[addr] = ia
+
+ // When capturing by reference, all occurrence of the captured var
+ // must be substituted with dereference of the temporary address
+ inlvars[v] = typecheck(nod(OIND, ia, nil), Erv)
+ }
+ }
+ }
} else {
dcl = fn.Func.Dcl // imported function
}
- // Make temp names to use instead of the originals.
- inlvars := make(map[*Node]*Node)
for _, ln := range dcl {
if ln.Op != ONAME {
continue
@@ -631,13 +873,25 @@ func mkinlcall1(n *Node, fn *Node, isddd bool) *Node {
if ln.Class() == PPARAM || ln.Name.Param.Stackcopy != nil && ln.Name.Param.Stackcopy.Class() == PPARAM {
ninit.Append(nod(ODCL, inlvars[ln], nil))
}
+ if genDwarfInline > 0 {
+ inlf := inlvars[ln]
+ if ln.Class() == PPARAM {
+ inlf.SetInlFormal(true)
+ } else {
+ inlf.SetInlLocal(true)
+ }
+ inlf.Pos = ln.Pos
+ inlfvars = append(inlfvars, inlf)
+ }
}
// temporaries for return values.
var retvars []*Node
for i, t := range fn.Type.Results().Fields().Slice() {
var m *Node
+ var mpos src.XPos
if t != nil && asNode(t.Nname) != nil && !isblank(asNode(t.Nname)) {
+ mpos = asNode(t.Nname).Pos
m = inlvar(asNode(t.Nname))
m = typecheck(m, Erv)
inlvars[asNode(t.Nname)] = m
@@ -646,6 +900,17 @@ func mkinlcall1(n *Node, fn *Node, isddd bool) *Node {
m = retvar(t, i)
}
+ if genDwarfInline > 0 {
+ // Don't update the src.Pos on a return variable if it
+ // was manufactured by the inliner (e.g. "~r2"); such vars
+ // were not part of the original callee.
+ if !strings.HasPrefix(m.Sym.Name, "~r") {
+ m.SetInlFormal(true)
+ m.Pos = mpos
+ inlfvars = append(inlfvars, m)
+ }
+ }
+
ninit.Append(nod(ODCL, m, nil))
retvars = append(retvars, m)
}
@@ -736,10 +1001,26 @@ func mkinlcall1(n *Node, fn *Node, isddd bool) *Node {
inlgen++
+ parent := -1
+ if b := Ctxt.PosTable.Pos(n.Pos).Base(); b != nil {
+ parent = b.InliningIndex()
+ }
+ sort.Sort(byNodeName(dcl))
+ newIndex := Ctxt.InlTree.Add(parent, n.Pos, fn.Sym.Linksym())
+
+ if genDwarfInline > 0 {
+ if !fn.Sym.Linksym().WasInlined() {
+ Ctxt.DwFixups.SetPrecursorFunc(fn.Sym.Linksym(), fn)
+ fn.Sym.Linksym().Set(obj.AttrWasInlined, true)
+ }
+ }
+
subst := inlsubst{
- retlabel: retlabel,
- retvars: retvars,
- inlvars: inlvars,
+ retlabel: retlabel,
+ retvars: retvars,
+ inlvars: inlvars,
+ bases: make(map[*src.PosBase]*src.PosBase),
+ newInlIndex: newIndex,
}
body := subst.list(fn.Func.Inl)
@@ -749,6 +1030,12 @@ func mkinlcall1(n *Node, fn *Node, isddd bool) *Node {
typecheckslice(body, Etop)
+ if genDwarfInline > 0 {
+ for _, v := range inlfvars {
+ v.Pos = subst.updatedPos(v.Pos)
+ }
+ }
+
//dumplist("ninit post", ninit);
call := nod(OINLCALL, nil, nil)
@@ -758,51 +1045,24 @@ func mkinlcall1(n *Node, fn *Node, isddd bool) *Node {
call.Type = n.Type
call.SetTypecheck(1)
- // Hide the args from setPos -- the parameters to the inlined
- // call already have good line numbers that should be preserved.
- args := as.Rlist
- as.Rlist.Set(nil)
-
- // Rewrite the line information for the inlined AST.
- parent := -1
- callBase := Ctxt.PosTable.Pos(n.Pos).Base()
- if callBase != nil {
- parent = callBase.InliningIndex()
- }
- newIndex := Ctxt.InlTree.Add(parent, n.Pos, fn.Sym.Linksym())
- setpos := &setPos{
- bases: make(map[*src.PosBase]*src.PosBase),
- newInlIndex: newIndex,
- }
- setpos.node(call)
-
- as.Rlist.Set(args.Slice())
-
- //dumplist("call body", body);
-
- n = call
-
// transitive inlining
// might be nice to do this before exporting the body,
// but can't emit the body with inlining expanded.
// instead we emit the things that the body needs
// and each use must redo the inlining.
// luckily these are small.
- body = fn.Func.Inl.Slice()
- fn.Func.Inl.Set(nil) // prevent infinite recursion (shouldn't happen anyway)
inlnodelist(call.Nbody)
for _, n := range call.Nbody.Slice() {
if n.Op == OINLCALL {
inlconv2stmt(n)
}
}
- fn.Func.Inl.Set(body)
if Debug['m'] > 2 {
- fmt.Printf("%v: After inlining %+v\n\n", n.Line(), n)
+ fmt.Printf("%v: After inlining %+v\n\n", call.Line(), call)
}
- return n
+ return call
}
// Every time we expand a function we generate a new set of tmpnames,
@@ -857,6 +1117,14 @@ type inlsubst struct {
retvars []*Node
inlvars map[*Node]*Node
+
+ // bases maps from original PosBase to PosBase with an extra
+ // inlined call frame.
+ bases map[*src.PosBase]*src.PosBase
+
+ // newInlIndex is the index of the inlined call frame to
+ // insert for inlined nodes.
+ newInlIndex int
}
// list inlines a list of nodes.
@@ -904,7 +1172,6 @@ func (subst *inlsubst) node(n *Node) *Node {
// dump("Return before substitution", n);
case ORETURN:
m := nod(OGOTO, subst.retlabel, nil)
-
m.Ninit.Set(subst.list(n.Ninit))
if len(subst.retvars) != 0 && n.List.Len() != 0 {
@@ -930,6 +1197,7 @@ func (subst *inlsubst) node(n *Node) *Node {
case OGOTO, OLABEL:
m := nod(OXXX, nil, nil)
*m = *n
+ m.Pos = subst.updatedPos(m.Pos)
m.Ninit.Set(nil)
p := fmt.Sprintf("%s·%d", n.Left.Sym.Name, inlgen)
m.Left = newname(lookup(p))
@@ -939,6 +1207,7 @@ func (subst *inlsubst) node(n *Node) *Node {
m := nod(OXXX, nil, nil)
*m = *n
+ m.Pos = subst.updatedPos(m.Pos)
m.Ninit.Set(nil)
if n.Op == OCLOSURE {
@@ -955,55 +1224,39 @@ func (subst *inlsubst) node(n *Node) *Node {
return m
}
-// setPos is a visitor to update position info with a new inlining index.
-type setPos struct {
- bases map[*src.PosBase]*src.PosBase
- newInlIndex int
-}
-
-func (s *setPos) nodelist(ll Nodes) {
- for _, n := range ll.Slice() {
- s.node(n)
- }
-}
-
-func (s *setPos) node(n *Node) {
- if n == nil {
- return
- }
- if n.Op == OLITERAL || n.Op == OTYPE {
- if n.Sym != nil {
- // This node is not a copy, so don't clobber position.
- return
- }
- }
-
- // don't clobber names, unless they're freshly synthesized
- if n.Op != ONAME || !n.Pos.IsKnown() {
- n.Pos = s.updatedPos(n)
- }
-
- s.node(n.Left)
- s.node(n.Right)
- s.nodelist(n.List)
- s.nodelist(n.Rlist)
- s.nodelist(n.Ninit)
- s.nodelist(n.Nbody)
-}
-
-func (s *setPos) updatedPos(n *Node) src.XPos {
- pos := Ctxt.PosTable.Pos(n.Pos)
+func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos {
+ pos := Ctxt.PosTable.Pos(xpos)
oldbase := pos.Base() // can be nil
- newbase := s.bases[oldbase]
+ newbase := subst.bases[oldbase]
if newbase == nil {
- newbase = src.NewInliningBase(oldbase, s.newInlIndex)
- pos.SetBase(newbase)
- s.bases[oldbase] = newbase
+ newbase = src.NewInliningBase(oldbase, subst.newInlIndex)
+ subst.bases[oldbase] = newbase
}
pos.SetBase(newbase)
return Ctxt.PosTable.XPos(pos)
}
-func (n *Node) isMethodCalledAsFunction() bool {
- return n.Left.Op == ONAME && n.Left.Left != nil && n.Left.Left.Op == OTYPE && n.Left.Right != nil && n.Left.Right.Op == ONAME
+func cmpNodeName(a, b *Node) bool {
+ // named before artificial
+ aart := 0
+ if strings.HasPrefix(a.Sym.Name, "~r") {
+ aart = 1
+ }
+ bart := 0
+ if strings.HasPrefix(b.Sym.Name, "~r") {
+ bart = 1
+ }
+ if aart != bart {
+ return aart < bart
+ }
+
+ // otherwise sort by name
+ return a.Sym.Name < b.Sym.Name
}
+
+// byNodeName implements sort.Interface for []*Node using cmpNodeName.
+type byNodeName []*Node
+
+func (s byNodeName) Len() int { return len(s) }
+func (s byNodeName) Less(i, j int) bool { return cmpNodeName(s[i], s[j]) }
+func (s byNodeName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
diff --git a/src/cmd/compile/internal/gc/inl_test.go b/src/cmd/compile/internal/gc/inl_test.go
new file mode 100644
index 00000000000..3e6da2ed7bb
--- /dev/null
+++ b/src/cmd/compile/internal/gc/inl_test.go
@@ -0,0 +1,215 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "bufio"
+ "internal/testenv"
+ "io"
+ "os/exec"
+ "regexp"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+// TestIntendedInlining tests that specific runtime functions are inlined.
+// This allows refactoring for code clarity and re-use without fear that
+// changes to the compiler will cause silent performance regressions.
+func TestIntendedInlining(t *testing.T) {
+ if testing.Short() && testenv.Builder() == "" {
+ t.Skip("skipping in short mode")
+ }
+ testenv.MustHaveGoRun(t)
+ t.Parallel()
+
+ // want is the list of function names (by package) that should
+ // be inlined.
+ want := map[string][]string{
+ "runtime": {
+ // TODO(mvdan): enable these once mid-stack
+ // inlining is available
+ // "adjustctxt",
+
+ "add",
+ "acquirem",
+ "add1",
+ "addb",
+ "adjustpanics",
+ "adjustpointer",
+ "bucketMask",
+ "bucketShift",
+ "chanbuf",
+ "deferArgs",
+ "deferclass",
+ "evacuated",
+ "fastlog2",
+ "fastrand",
+ "float64bits",
+ "funcPC",
+ "getm",
+ "isDirectIface",
+ "itabHashFunc",
+ "maxSliceCap",
+ "noescape",
+ "readUnaligned32",
+ "readUnaligned64",
+ "releasem",
+ "round",
+ "roundupsize",
+ "selectsize",
+ "stringStructOf",
+ "subtract1",
+ "subtractb",
+ "tophash",
+ "totaldefersize",
+ "(*bmap).keys",
+ "(*bmap).overflow",
+ "(*waitq).enqueue",
+
+ // GC-related ones
+ "cgoInRange",
+ "gclinkptr.ptr",
+ "guintptr.ptr",
+ "heapBits.bits",
+ "heapBits.isPointer",
+ "heapBits.morePointers",
+ "heapBits.next",
+ "heapBitsForAddr",
+ "inheap",
+ "markBits.isMarked",
+ "muintptr.ptr",
+ "puintptr.ptr",
+ "spanOfUnchecked",
+ "(*gcWork).putFast",
+ "(*gcWork).tryGetFast",
+ "(*guintptr).set",
+ "(*markBits).advance",
+ "(*mspan).allocBitsForIndex",
+ "(*mspan).base",
+ "(*mspan).markBitsForBase",
+ "(*mspan).markBitsForIndex",
+ "(*muintptr).set",
+ "(*puintptr).set",
+ },
+ "runtime/internal/sys": {},
+ "bytes": {
+ "(*Buffer).Bytes",
+ "(*Buffer).Cap",
+ "(*Buffer).Len",
+ "(*Buffer).Next",
+ "(*Buffer).Read",
+ "(*Buffer).ReadByte",
+ "(*Buffer).Reset",
+ "(*Buffer).String",
+ "(*Buffer).UnreadByte",
+ "(*Buffer).tryGrowByReslice",
+ },
+ "unicode/utf8": {
+ "FullRune",
+ "FullRuneInString",
+ "RuneLen",
+ "ValidRune",
+ },
+ "reflect": {
+ "Value.CanAddr",
+ "Value.CanSet",
+ "Value.IsValid",
+ "add",
+ "align",
+ "flag.kind",
+ "flag.ro",
+
+ // TODO: these use panic, need mid-stack
+ // inlining
+ // "Value.CanInterface",
+ // "Value.pointer",
+ // "flag.mustBe",
+ // "flag.mustBeAssignable",
+ // "flag.mustBeExported",
+ },
+ "regexp": {
+ "(*bitState).push",
+ },
+ }
+
+ if runtime.GOARCH != "386" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" {
+ // nextFreeFast calls sys.Ctz64, which on 386 is implemented in asm and is not inlinable.
+ // We currently don't have midstack inlining so nextFreeFast is also not inlinable on 386.
+ // On MIPS64x, Ctz64 is not intrinsified and causes nextFreeFast too expensive to inline
+ // (Issue 22239).
+ want["runtime"] = append(want["runtime"], "nextFreeFast")
+ }
+ if runtime.GOARCH != "386" {
+ // As explained above, Ctz64 and Ctz32 are not Go code on 386.
+ // The same applies to Bswap32.
+ want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Ctz64")
+ want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Ctz32")
+ want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Bswap32")
+ }
+ switch runtime.GOARCH {
+ case "amd64", "amd64p32", "arm64", "mips64", "mips64le", "ppc64", "ppc64le", "s390x":
+ // rotl_31 is only defined on 64-bit architectures
+ want["runtime"] = append(want["runtime"], "rotl_31")
+ }
+
+ notInlinedReason := make(map[string]string)
+ pkgs := make([]string, 0, len(want))
+ for pname, fnames := range want {
+ pkgs = append(pkgs, pname)
+ for _, fname := range fnames {
+ fullName := pname + "." + fname
+ if _, ok := notInlinedReason[fullName]; ok {
+ t.Errorf("duplicate func: %s", fullName)
+ }
+ notInlinedReason[fullName] = "unknown reason"
+ }
+ }
+
+ args := append([]string{"build", "-a", "-gcflags=all=-m -m"}, pkgs...)
+ cmd := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), args...))
+ pr, pw := io.Pipe()
+ cmd.Stdout = pw
+ cmd.Stderr = pw
+ cmdErr := make(chan error, 1)
+ go func() {
+ cmdErr <- cmd.Run()
+ pw.Close()
+ }()
+ scanner := bufio.NewScanner(pr)
+ curPkg := ""
+ canInline := regexp.MustCompile(`: can inline ([^ ]*)`)
+ cannotInline := regexp.MustCompile(`: cannot inline ([^ ]*): (.*)`)
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, "# ") {
+ curPkg = line[2:]
+ continue
+ }
+ if m := canInline.FindStringSubmatch(line); m != nil {
+ fname := m[1]
+ delete(notInlinedReason, curPkg+"."+fname)
+ continue
+ }
+ if m := cannotInline.FindStringSubmatch(line); m != nil {
+ fname, reason := m[1], m[2]
+ fullName := curPkg + "." + fname
+ if _, ok := notInlinedReason[fullName]; ok {
+ // cmd/compile gave us a reason why
+ notInlinedReason[fullName] = reason
+ }
+ continue
+ }
+ }
+ if err := <-cmdErr; err != nil {
+ t.Fatal(err)
+ }
+ if err := scanner.Err(); err != nil {
+ t.Fatal(err)
+ }
+ for fullName, reason := range notInlinedReason {
+ t.Errorf("%s was not inlined: %s", fullName, reason)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
index 2b61564ad8d..b651c9acb3a 100644
--- a/src/cmd/compile/internal/gc/main.go
+++ b/src/cmd/compile/internal/gc/main.go
@@ -11,6 +11,7 @@ import (
"bytes"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
+ "cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
@@ -43,7 +44,12 @@ var (
Debug_slice int
Debug_vlog bool
Debug_wb int
+ Debug_eagerwb int
Debug_pctab string
+ Debug_locationlist int
+ Debug_typecheckinl int
+ Debug_gendwarfinl int
+ Debug_softfloat int
)
// Debug arguments.
@@ -67,8 +73,13 @@ var debugtab = []struct {
{"slice", "print information about slice compilation", &Debug_slice},
{"typeassert", "print information about type assertion inlining", &Debug_typeassert},
{"wb", "print information about write barriers", &Debug_wb},
+ {"eagerwb", "use unbuffered write barrier", &Debug_eagerwb},
{"export", "print export data", &Debug_export},
{"pctab", "print named pc-value table", &Debug_pctab},
+ {"locationlists", "print information about DWARF location list creation", &Debug_locationlist},
+ {"typecheckinl", "eager typechecking of inline function bodies", &Debug_typecheckinl},
+ {"dwarfinl", "print information about DWARF inlined function creation", &Debug_gendwarfinl},
+ {"softfloat", "force compiler to emit soft-float code", &Debug_softfloat},
}
const debugHelpHeader = `usage: -d arg[,arg]* and arg is [=]
@@ -102,19 +113,6 @@ func hidePanic() {
}
}
-func doversion() {
- p := objabi.Expstring()
- if p == objabi.DefaultExpstring() {
- p = ""
- }
- sep := ""
- if p != "" {
- sep = " "
- }
- fmt.Printf("compile version %s%s%s\n", objabi.Version, sep, p)
- os.Exit(0)
-}
-
// supportsDynlink reports whether or not the code generator for the given
// architecture supports the -shared and -dynlink flags.
func supportsDynlink(arch *sys.Arch) bool {
@@ -125,6 +123,8 @@ func supportsDynlink(arch *sys.Arch) bool {
var timings Timings
var benchfile string
+var nowritebarrierrecCheck *nowritebarrierrecChecker
+
// Main parses flags and Go source files specified in the command-line
// arguments, type-checks the parsed Go package, compiles functions to machine
// code, and finally writes the compiled package definition to disk.
@@ -137,6 +137,7 @@ func Main(archInit func(*Arch)) {
Ctxt = obj.Linknew(thearch.LinkArch)
Ctxt.DiagFunc = yyerror
+ Ctxt.DiagFlush = flusherrors
Ctxt.Bso = bufio.NewWriter(os.Stdout)
localpkg = types.NewPkg("", "")
@@ -182,9 +183,10 @@ func Main(archInit func(*Arch)) {
objabi.Flagcount("E", "debug symbol export", &Debug['E'])
objabi.Flagfn1("I", "add `directory` to import search path", addidir)
objabi.Flagcount("K", "debug missing line numbers", &Debug['K'])
+ objabi.Flagcount("L", "show full file names in error messages", &Debug['L'])
objabi.Flagcount("N", "disable optimizations", &Debug['N'])
flag.BoolVar(&Debug_asm, "S", false, "print assembly listing")
- objabi.Flagfn0("V", "print compiler version", doversion)
+ objabi.AddVersionFlag() // -V
objabi.Flagcount("W", "debug parse tree after type checking", &Debug['W'])
flag.StringVar(&asmhdr, "asmhdr", "", "write assembly header to `file`")
flag.StringVar(&buildid, "buildid", "", "record `id` as the build id in the export metadata")
@@ -192,6 +194,8 @@ func Main(archInit func(*Arch)) {
flag.BoolVar(&pure_go, "complete", false, "compiling complete package (no C or assembly)")
flag.StringVar(&debugstr, "d", "", "print debug information about items in `list`; try -d help")
flag.BoolVar(&flagDWARF, "dwarf", true, "generate DWARF symbols")
+ flag.BoolVar(&Ctxt.Flag_locationlists, "dwarflocationlists", false, "add location lists to DWARF in optimized mode")
+ flag.IntVar(&genDwarfInline, "gendwarfinl", 2, "generate DWARF inline info records")
objabi.Flagcount("e", "no limit on number of errors reported", &Debug['e'])
objabi.Flagcount("f", "debug stack frames", &Debug['f'])
objabi.Flagcount("h", "halt on error", &Debug['h'])
@@ -235,6 +239,11 @@ func Main(archInit func(*Arch)) {
flag.StringVar(&benchfile, "bench", "", "append benchmark times to `file`")
objabi.Flagparse(usage)
+ // Record flags that affect the build result. (And don't
+ // record flags that don't, since that would cause spurious
+ // changes in the binary.)
+ recordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists")
+
Ctxt.Flag_shared = flag_dynlink || flag_shared
Ctxt.Flag_dynlink = flag_dynlink
Ctxt.Flag_optimize = Debug['N'] == 0
@@ -243,6 +252,11 @@ func Main(archInit func(*Arch)) {
Ctxt.Debugvlog = Debug_vlog
if flagDWARF {
Ctxt.DebugInfo = debuginfo
+ Ctxt.GenAbstractFunc = genAbstractFunc
+ Ctxt.DwFixups = obj.NewDwarfFixupTable(Ctxt)
+ } else {
+ // turn off inline generation if no dwarf at all
+ genDwarfInline = 0
}
if flag.NArg() < 1 && debugstr != "help" && debugstr != "ssa/help" {
@@ -298,6 +312,9 @@ func Main(archInit func(*Arch)) {
if nBackendWorkers > 1 && !concurrentBackendAllowed() {
log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args)
}
+ if Ctxt.Flag_locationlists && len(Ctxt.Arch.DWARFRegisters) == 0 {
+ log.Fatalf("location lists requested but register mapping not available on %v", Ctxt.Arch.Name)
+ }
// parse -d argument
if debugstr != "" {
@@ -374,16 +391,29 @@ func Main(archInit func(*Arch)) {
// set via a -d flag
Ctxt.Debugpcln = Debug_pctab
+ if flagDWARF {
+ dwarf.EnableLogging(Debug_gendwarfinl != 0)
+ }
+
+ if Debug_softfloat != 0 {
+ thearch.SoftFloat = true
+ }
// enable inlining. for now:
// default: inlining on. (debug['l'] == 1)
// -l: inlining off (debug['l'] == 0)
- // -ll, -lll: inlining on again, with extra debugging (debug['l'] > 1)
+ // -l=2, -l=3: inlining on again, with extra debugging (debug['l'] > 1)
if Debug['l'] <= 1 {
Debug['l'] = 1 - Debug['l']
}
- trackScopes = flagDWARF && Debug['l'] == 0 && Debug['N'] != 0
+ // The buffered write barrier is only implemented on amd64
+ // right now.
+ if objabi.GOARCH != "amd64" {
+ Debug_eagerwb = 1
+ }
+
+ trackScopes = flagDWARF && ((Debug['l'] == 0 && Debug['N'] != 0) || Ctxt.Flag_locationlists)
Widthptr = thearch.LinkArch.PtrSize
Widthreg = thearch.LinkArch.RegSize
@@ -488,6 +518,8 @@ func Main(archInit func(*Arch)) {
fcount++
}
}
+ // With all types ckecked, it's now safe to verify map keys.
+ checkMapKeys()
timings.AddEvent(fcount, "funcs")
// Phase 4: Decide how to capture closed variables.
@@ -510,7 +542,7 @@ func Main(archInit func(*Arch)) {
// Phase 5: Inlining
timings.Start("fe", "inlining")
- if Debug['l'] > 1 {
+ if Debug_typecheckinl != 0 {
// Typecheck imported function bodies if debug['l'] > 1,
// otherwise lazily when used or re-exported.
for _, n := range importlist {
@@ -553,6 +585,14 @@ func Main(archInit func(*Arch)) {
escapes(xtop)
if dolinkobj {
+ // Collect information for go:nowritebarrierrec
+ // checking. This must happen before transformclosure.
+ // We'll do the final check after write barriers are
+ // inserted.
+ if compiling_runtime {
+ nowritebarrierrecCheck = newNowritebarrierrecChecker()
+ }
+
// Phase 7: Transform closure bodies to properly reference captured variables.
// This needs to happen before walk, because closures must be transformed
// before walk reaches a call of a closure.
@@ -601,8 +641,20 @@ func Main(archInit func(*Arch)) {
// at least until this convoluted structure has been unwound.
nBackendWorkers = 1
- if compiling_runtime {
- checknowritebarrierrec()
+ if nowritebarrierrecCheck != nil {
+ // Write barriers are now known. Check the
+ // call graph.
+ nowritebarrierrecCheck.check()
+ nowritebarrierrecCheck = nil
+ }
+
+ // Finalize DWARF inline routine DIEs, then explicitly turn off
+ // DWARF inlining gen so as to avoid problems with generated
+ // method wrappers.
+ if Ctxt.DwFixups != nil {
+ Ctxt.DwFixups.Finalize(myimportpath, Debug_gendwarfinl != 0)
+ Ctxt.DwFixups = nil
+ genDwarfInline = 0
}
// Check whether any of the functions we have compiled have gigantic stack frames.
@@ -610,7 +662,7 @@ func Main(archInit func(*Arch)) {
return largeStackFrames[i].Before(largeStackFrames[j])
})
for _, largePos := range largeStackFrames {
- yyerrorl(largePos, "stack frame too large (>2GB)")
+ yyerrorl(largePos, "stack frame too large (>1GB)")
}
}
@@ -763,7 +815,7 @@ func isDriveLetter(b byte) bool {
return 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z'
}
-// is this path a local name? begins with ./ or ../ or /
+// is this path a local name? begins with ./ or ../ or /
func islocalname(name string) bool {
return strings.HasPrefix(name, "/") ||
runtime.GOOS == "windows" && len(name) >= 3 && isDriveLetter(name[0]) && name[1] == ':' && name[2] == '/' ||
@@ -868,7 +920,7 @@ func loadsys() {
n.Type = typ
declare(n, PFUNC)
case varTag:
- importvar(Runtimepkg, sym, typ)
+ importvar(lineno, Runtimepkg, sym, typ)
default:
Fatalf("unhandled declaration tag %v", d.tag)
}
@@ -1178,8 +1230,8 @@ func concurrentBackendAllowed() bool {
if Debug_vlog || debugstr != "" || debuglive > 0 {
return false
}
- // TODO: test and add builders for GOEXPERIMENT values, and enable
- if os.Getenv("GOEXPERIMENT") != "" {
+ // TODO: Test and delete these conditions.
+ if objabi.Fieldtrack_enabled != 0 || objabi.Preemptibleloops_enabled != 0 || objabi.Clobberdead_enabled != 0 {
return false
}
// TODO: fix races and enable the following flags
@@ -1188,3 +1240,58 @@ func concurrentBackendAllowed() bool {
}
return true
}
+
+// recordFlags records the specified command-line flags to be placed
+// in the DWARF info.
+func recordFlags(flags ...string) {
+ if myimportpath == "" {
+ // We can't record the flags if we don't know what the
+ // package name is.
+ return
+ }
+
+ type BoolFlag interface {
+ IsBoolFlag() bool
+ }
+ type CountFlag interface {
+ IsCountFlag() bool
+ }
+ var cmd bytes.Buffer
+ for _, name := range flags {
+ f := flag.Lookup(name)
+ if f == nil {
+ continue
+ }
+ getter := f.Value.(flag.Getter)
+ if getter.String() == f.DefValue {
+ // Flag has default value, so omit it.
+ continue
+ }
+ if bf, ok := f.Value.(BoolFlag); ok && bf.IsBoolFlag() {
+ val, ok := getter.Get().(bool)
+ if ok && val {
+ fmt.Fprintf(&cmd, " -%s", f.Name)
+ continue
+ }
+ }
+ if cf, ok := f.Value.(CountFlag); ok && cf.IsCountFlag() {
+ val, ok := getter.Get().(int)
+ if ok && val == 1 {
+ fmt.Fprintf(&cmd, " -%s", f.Name)
+ continue
+ }
+ }
+ fmt.Fprintf(&cmd, " -%s=%v", f.Name, getter.Get())
+ }
+
+ if cmd.Len() == 0 {
+ return
+ }
+ s := Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + myimportpath)
+ s.Type = objabi.SDWARFINFO
+ // Sometimes (for example when building tests) we can link
+ // together two package main archives. So allow dups.
+ s.Set(obj.AttrDuplicateOK, true)
+ Ctxt.Data = append(Ctxt.Data, s)
+ s.P = cmd.Bytes()[1:]
+}
diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go
index 54c48434cc7..dcd5f20dfd2 100644
--- a/src/cmd/compile/internal/gc/noder.go
+++ b/src/cmd/compile/internal/gc/noder.go
@@ -19,7 +19,6 @@ import (
)
func parseFiles(filenames []string) uint {
- var lines uint
var noders []*noder
// Limit the number of simultaneously open files.
sem := make(chan struct{}, runtime.GOMAXPROCS(0)+10)
@@ -45,6 +44,7 @@ func parseFiles(filenames []string) uint {
}(filename)
}
+ var lines uint
for _, p := range noders {
for e := range p.err {
yyerrorpos(e.Pos, "%s", e.Msg)
@@ -87,15 +87,15 @@ type noder struct {
scope ScopeID
}
-func (p *noder) funchdr(n *Node, pos src.Pos) ScopeID {
+func (p *noder) funchdr(n *Node) ScopeID {
old := p.scope
p.scope = 0
funchdr(n)
return old
}
-func (p *noder) funcbody(n *Node, pos src.Pos, old ScopeID) {
- funcbody(n)
+func (p *noder) funcbody(old ScopeID) {
+ funcbody()
p.scope = old
}
@@ -224,15 +224,14 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) {
pack.Sym = my
pack.Name.Pkg = ipkg
- if my.Name == "." {
+ switch my.Name {
+ case ".":
importdot(ipkg, pack)
return
- }
- if my.Name == "init" {
+ case "init":
yyerrorl(pack.Pos, "cannot import package as init - init must be a func")
return
- }
- if my.Name == "_" {
+ case "_":
return
}
if my.Def != nil {
@@ -322,7 +321,6 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) *Node {
n := p.declName(decl.Name)
n.Op = OTYPE
declare(n, dclcontext)
- n.SetLocal(true)
// decl.Type may be nil but in that case we got a syntax error during parsing
typ := p.typeExprOrNil(decl.Type)
@@ -391,9 +389,8 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node {
declare(f.Func.Nname, PFUNC)
}
- oldScope := p.funchdr(f, fun.Pos())
+ oldScope := p.funchdr(f)
- endPos := fun.Pos()
if fun.Body != nil {
if f.Noescape() {
yyerrorl(f.Pos, "can only use //go:noescape with external func implementations")
@@ -405,7 +402,6 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node {
}
f.Nbody.Set(body)
- endPos = fun.Body.Rbrace
lineno = Ctxt.PosTable.XPos(fun.Body.Rbrace)
f.Func.Endlineno = lineno
} else {
@@ -414,7 +410,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node {
}
}
- p.funcbody(f, endPos, oldScope)
+ p.funcbody(oldScope)
return f
}
@@ -541,6 +537,9 @@ func (p *noder) expr(expr syntax.Expr) *Node {
// ntype? Shrug, doesn't matter here.
return p.nod(expr, ODOTTYPE, p.expr(expr.X), p.expr(expr.Type))
case *syntax.Operation:
+ if expr.Op == syntax.Add && expr.Y != nil {
+ return p.sum(expr)
+ }
x := p.expr(expr.X)
if expr.Y == nil {
if expr.Op == syntax.And {
@@ -601,6 +600,82 @@ func (p *noder) expr(expr syntax.Expr) *Node {
panic("unhandled Expr")
}
+// sum efficiently handles very large summation expressions (such as
+// in issue #16394). In particular, it avoids left recursion and
+// collapses string literals.
+func (p *noder) sum(x syntax.Expr) *Node {
+ // While we need to handle long sums with asymptotic
+ // efficiency, the vast majority of sums are very small: ~95%
+ // have only 2 or 3 operands, and ~99% of string literals are
+ // never concatenated.
+
+ adds := make([]*syntax.Operation, 0, 2)
+ for {
+ add, ok := x.(*syntax.Operation)
+ if !ok || add.Op != syntax.Add || add.Y == nil {
+ break
+ }
+ adds = append(adds, add)
+ x = add.X
+ }
+
+ // nstr is the current rightmost string literal in the
+ // summation (if any), and chunks holds its accumulated
+ // substrings.
+ //
+ // Consider the expression x + "a" + "b" + "c" + y. When we
+ // reach the string literal "a", we assign nstr to point to
+ // its corresponding Node and initialize chunks to {"a"}.
+ // Visiting the subsequent string literals "b" and "c", we
+ // simply append their values to chunks. Finally, when we
+ // reach the non-constant operand y, we'll join chunks to form
+ // "abc" and reassign the "a" string literal's value.
+ //
+ // N.B., we need to be careful about named string constants
+ // (indicated by Sym != nil) because 1) we can't modify their
+ // value, as doing so would affect other uses of the string
+ // constant, and 2) they may have types, which we need to
+ // handle correctly. For now, we avoid these problems by
+ // treating named string constants the same as non-constant
+ // operands.
+ var nstr *Node
+ chunks := make([]string, 0, 1)
+
+ n := p.expr(x)
+ if Isconst(n, CTSTR) && n.Sym == nil {
+ nstr = n
+ chunks = append(chunks, nstr.Val().U.(string))
+ }
+
+ for i := len(adds) - 1; i >= 0; i-- {
+ add := adds[i]
+
+ r := p.expr(add.Y)
+ if Isconst(r, CTSTR) && r.Sym == nil {
+ if nstr != nil {
+ // Collapse r into nstr instead of adding to n.
+ chunks = append(chunks, r.Val().U.(string))
+ continue
+ }
+
+ nstr = r
+ chunks = append(chunks, nstr.Val().U.(string))
+ } else {
+ if len(chunks) > 1 {
+ nstr.SetVal(Val{U: strings.Join(chunks, "")})
+ }
+ nstr = nil
+ chunks = chunks[:0]
+ }
+ n = p.nod(add, OADD, n, r)
+ }
+ if len(chunks) > 1 {
+ nstr.SetVal(Val{U: strings.Join(chunks, "")})
+ }
+
+ return n
+}
+
func (p *noder) typeExpr(typ syntax.Expr) *Node {
// TODO(mdempsky): Be stricter? typecheck should handle errors anyway.
return p.expr(typ)
@@ -700,7 +775,11 @@ func (p *noder) embedded(typ syntax.Expr) *Node {
}
typ = op.X
}
- n := embedded(p.packname(typ), localpkg)
+
+ sym := p.packname(typ)
+ n := nod(ODCLFIELD, newname(lookup(sym.Name)), oldname(sym))
+ n.SetEmbedded(true)
+
if isStar {
n.Right = p.nod(op, OIND, n.Right, nil)
}
@@ -708,9 +787,13 @@ func (p *noder) embedded(typ syntax.Expr) *Node {
}
func (p *noder) stmts(stmts []syntax.Stmt) []*Node {
+ return p.stmtsFall(stmts, false)
+}
+
+func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []*Node {
var nodes []*Node
- for _, stmt := range stmts {
- s := p.stmt(stmt)
+ for i, stmt := range stmts {
+ s := p.stmtFall(stmt, fallOK && i+1 == len(stmts))
if s == nil {
} else if s.Op == OBLOCK && s.Ninit.Len() == 0 {
nodes = append(nodes, s.List.Slice()...)
@@ -722,12 +805,16 @@ func (p *noder) stmts(stmts []syntax.Stmt) []*Node {
}
func (p *noder) stmt(stmt syntax.Stmt) *Node {
+ return p.stmtFall(stmt, false)
+}
+
+func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *Node {
p.lineno(stmt)
switch stmt := stmt.(type) {
case *syntax.EmptyStmt:
return nil
case *syntax.LabeledStmt:
- return p.labeledStmt(stmt)
+ return p.labeledStmt(stmt, fallOK)
case *syntax.BlockStmt:
l := p.blockStmt(stmt)
if len(l) == 0 {
@@ -749,15 +836,10 @@ func (p *noder) stmt(stmt syntax.Stmt) *Node {
return n
}
- lhs := p.exprList(stmt.Lhs)
- rhs := p.exprList(stmt.Rhs)
-
n := p.nod(stmt, OAS, nil, nil) // assume common case
- if stmt.Op == syntax.Def {
- n.SetColas(true)
- colasdefn(lhs, n) // modifies lhs, call before using lhs[0] in common case
- }
+ rhs := p.exprList(stmt.Rhs)
+ lhs := p.assignList(stmt.Lhs, n, stmt.Op == syntax.Def)
if len(lhs) == 1 && len(rhs) == 1 {
// common case
@@ -778,7 +860,10 @@ func (p *noder) stmt(stmt syntax.Stmt) *Node {
case syntax.Continue:
op = OCONTINUE
case syntax.Fallthrough:
- op = OXFALL
+ if !fallOK {
+ yyerror("fallthrough statement out of place")
+ }
+ op = OFALL
case syntax.Goto:
op = OGOTO
default:
@@ -788,9 +873,6 @@ func (p *noder) stmt(stmt syntax.Stmt) *Node {
if stmt.Label != nil {
n.Left = p.newname(stmt.Label)
}
- if op == OXFALL {
- n.Xoffset = int64(types.Block)
- }
return n
case *syntax.CallStmt:
var op Op
@@ -836,6 +918,66 @@ func (p *noder) stmt(stmt syntax.Stmt) *Node {
panic("unhandled Stmt")
}
+func (p *noder) assignList(expr syntax.Expr, defn *Node, colas bool) []*Node {
+ if !colas {
+ return p.exprList(expr)
+ }
+
+ defn.SetColas(true)
+
+ var exprs []syntax.Expr
+ if list, ok := expr.(*syntax.ListExpr); ok {
+ exprs = list.ElemList
+ } else {
+ exprs = []syntax.Expr{expr}
+ }
+
+ res := make([]*Node, len(exprs))
+ seen := make(map[*types.Sym]bool, len(exprs))
+
+ newOrErr := false
+ for i, expr := range exprs {
+ p.lineno(expr)
+ res[i] = nblank
+
+ name, ok := expr.(*syntax.Name)
+ if !ok {
+ yyerrorpos(expr.Pos(), "non-name %v on left side of :=", p.expr(expr))
+ newOrErr = true
+ continue
+ }
+
+ sym := p.name(name)
+ if sym.IsBlank() {
+ continue
+ }
+
+ if seen[sym] {
+ yyerrorpos(expr.Pos(), "%v repeated on left side of :=", sym)
+ newOrErr = true
+ continue
+ }
+ seen[sym] = true
+
+ if sym.Block == types.Block {
+ res[i] = oldname(sym)
+ continue
+ }
+
+ newOrErr = true
+ n := newname(sym)
+ declare(n, dclcontext)
+ n.Name.Defn = defn
+ defn.Ninit.Append(nod(ODCL, n, nil))
+ res[i] = n
+ }
+
+ if !newOrErr {
+ yyerrorl(defn.Pos, "no new variables on left side of :=")
+ }
+ return res
+}
+
func (p *noder) blockStmt(stmt *syntax.BlockStmt) []*Node {
p.openScope(stmt.Pos())
nodes := p.stmts(stmt.List)
@@ -875,12 +1017,7 @@ func (p *noder) forStmt(stmt *syntax.ForStmt) *Node {
n = p.nod(r, ORANGE, nil, p.expr(r.X))
if r.Lhs != nil {
- lhs := p.exprList(r.Lhs)
- n.List.Set(lhs)
- if r.Def {
- n.SetColas(true)
- colasdefn(lhs, n)
- }
+ n.List.Set(p.assignList(r.Lhs, n, r.Def))
}
} else {
n = p.nod(stmt, OFOR, nil, nil)
@@ -910,7 +1047,7 @@ func (p *noder) switchStmt(stmt *syntax.SwitchStmt) *Node {
}
tswitch := n.Left
- if tswitch != nil && (tswitch.Op != OTYPESW || tswitch.Left == nil) {
+ if tswitch != nil && tswitch.Op != OTYPESW {
tswitch = nil
}
n.List.Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace))
@@ -932,15 +1069,35 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *Node, rbrace
if clause.Cases != nil {
n.List.Set(p.exprList(clause.Cases))
}
- if tswitch != nil {
+ if tswitch != nil && tswitch.Left != nil {
nn := newname(tswitch.Left.Sym)
declare(nn, dclcontext)
n.Rlist.Set1(nn)
// keep track of the instances for reporting unused
nn.Name.Defn = tswitch
}
- n.Xoffset = int64(types.Block)
- n.Nbody.Set(p.stmts(clause.Body))
+
+ // Trim trailing empty statements. We omit them from
+ // the Node AST anyway, and it's easier to identify
+ // out-of-place fallthrough statements without them.
+ body := clause.Body
+ for len(body) > 0 {
+ if _, ok := body[len(body)-1].(*syntax.EmptyStmt); !ok {
+ break
+ }
+ body = body[:len(body)-1]
+ }
+
+ n.Nbody.Set(p.stmtsFall(body, true))
+ if l := n.Nbody.Len(); l > 0 && n.Nbody.Index(l-1).Op == OFALL {
+ if tswitch != nil {
+ yyerror("cannot fallthrough in type switch")
+ }
+ if i+1 == len(clauses) {
+ yyerror("cannot fallthrough final case in switch")
+ }
+ }
+
nodes = append(nodes, n)
}
if len(clauses) > 0 {
@@ -968,7 +1125,6 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace src.Pos) []*Nod
if clause.Comm != nil {
n.List.Set1(p.stmt(clause.Comm))
}
- n.Xoffset = int64(types.Block)
n.Nbody.Set(p.stmts(clause.Body))
nodes = append(nodes, n)
}
@@ -978,12 +1134,12 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace src.Pos) []*Nod
return nodes
}
-func (p *noder) labeledStmt(label *syntax.LabeledStmt) *Node {
+func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) *Node {
lhs := p.nod(label, OLABEL, p.newname(label.Label), nil)
var ls *Node
if label.Stmt != nil { // TODO(mdempsky): Should always be present.
- ls = p.stmt(label.Stmt)
+ ls = p.stmtFall(label.Stmt, fallOK)
}
lhs.Name.Defn = ls
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
index 83e64e728e9..874c59cb448 100644
--- a/src/cmd/compile/internal/gc/obj.go
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -9,6 +9,7 @@ import (
"cmd/internal/bio"
"cmd/internal/obj"
"cmd/internal/objabi"
+ "cmd/internal/src"
"crypto/sha256"
"fmt"
"io"
@@ -16,9 +17,7 @@ import (
)
// architecture-independent object file output
-const (
- ArhdrSize = 60
-)
+const ArhdrSize = 60
func formathdr(arhdr []byte, name string, size int64) {
copy(arhdr[:], fmt.Sprintf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size))
@@ -56,13 +55,13 @@ func dumpobj() {
}
func dumpobj1(outfile string, mode int) {
- var err error
- bout, err = bio.Create(outfile)
+ bout, err := bio.Create(outfile)
if err != nil {
flusherrors()
fmt.Printf("can't create %s: %v\n", outfile, err)
errorexit()
}
+ defer bout.Close()
startobj := int64(0)
var arhdr [ArhdrSize]byte
@@ -92,7 +91,7 @@ func dumpobj1(outfile string, mode int) {
printheader()
if mode&modeCompilerObj != 0 {
- dumpexport()
+ dumpexport(bout)
}
if writearchive {
@@ -109,7 +108,6 @@ func dumpobj1(outfile string, mode int) {
}
if mode&modeLinkerObj == 0 {
- bout.Close()
return
}
@@ -171,8 +169,6 @@ func dumpobj1(outfile string, mode int) {
formathdr(arhdr[:], "_go_.o", size)
bout.Write(arhdr[:])
}
-
- bout.Close()
}
func addptabs() {
@@ -204,24 +200,68 @@ func addptabs() {
}
}
+func dumpGlobal(n *Node) {
+ if n.Type == nil {
+ Fatalf("external %v nil type\n", n)
+ }
+ if n.Class() == PFUNC {
+ return
+ }
+ if n.Sym.Pkg != localpkg {
+ return
+ }
+ dowidth(n.Type)
+ ggloblnod(n)
+}
+
+func dumpGlobalConst(n *Node) {
+ // only export typed constants
+ t := n.Type
+ if t == nil {
+ return
+ }
+ if n.Sym.Pkg != localpkg {
+ return
+ }
+ // only export integer constants for now
+ switch t.Etype {
+ case TINT8:
+ case TINT16:
+ case TINT32:
+ case TINT64:
+ case TINT:
+ case TUINT8:
+ case TUINT16:
+ case TUINT32:
+ case TUINT64:
+ case TUINT:
+ case TUINTPTR:
+ // ok
+ case TIDEAL:
+ if !Isconst(n, CTINT) {
+ return
+ }
+ x := n.Val().U.(*Mpint)
+ if x.Cmp(minintval[TINT]) < 0 || x.Cmp(maxintval[TINT]) > 0 {
+ return
+ }
+ // Ideal integers we export as int (if they fit).
+ t = types.Types[TINT]
+ default:
+ return
+ }
+ Ctxt.DwarfIntConst(myimportpath, n.Sym.Name, typesymname(t), n.Int64())
+}
+
func dumpglobls() {
// add globals
for _, n := range externdcl {
- if n.Op != ONAME {
- continue
+ switch n.Op {
+ case ONAME:
+ dumpGlobal(n)
+ case OLITERAL:
+ dumpGlobalConst(n)
}
-
- if n.Type == nil {
- Fatalf("external %v nil type\n", n)
- }
- if n.Class() == PFUNC {
- continue
- }
- if n.Sym.Pkg != localpkg {
- continue
- }
- dowidth(n.Type)
- ggloblnod(n)
}
obj.SortSlice(funcsyms, func(i, j int) bool {
@@ -291,7 +331,7 @@ func dbvec(s *obj.LSym, off int, bv bvec) int {
return off
}
-func stringsym(s string) (data *obj.LSym) {
+func stringsym(pos src.XPos, s string) (data *obj.LSym) {
var symname string
if len(s) > 100 {
// Huge strings are hashed to avoid long names in object files.
@@ -312,7 +352,7 @@ func stringsym(s string) (data *obj.LSym) {
if !symdata.SeenGlobl() {
// string data
- off := dsname(symdata, 0, s)
+ off := dsname(symdata, 0, s, pos, "string")
ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
}
@@ -328,7 +368,7 @@ func slicebytes(nam *Node, s string, len int) {
sym.Def = asTypesNode(newname(sym))
lsym := sym.Linksym()
- off := dsname(lsym, 0, s)
+ off := dsname(lsym, 0, s, nam.Pos, "slice")
ggloblsym(lsym, int32(off), obj.NOPTR|obj.LOCAL)
if nam.Op != ONAME {
@@ -341,7 +381,15 @@ func slicebytes(nam *Node, s string, len int) {
duintptr(nsym, off, uint64(len))
}
-func dsname(s *obj.LSym, off int, t string) int {
+func dsname(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
+ // Objects that are too large will cause the data section to overflow right away,
+ // causing a cryptic error message by the linker. Check for oversize objects here
+ // and provide a useful error message instead.
+ if int64(len(t)) > 2e9 {
+ yyerrorl(pos, "%v with length %v is too big", what, len(t))
+ return 0
+ }
+
s.WriteString(Ctxt, int64(off), len(t), t)
return off + len(t)
}
@@ -406,7 +454,7 @@ func gdata(nam *Node, nr *Node, wid int) {
}
case string:
- symdata := stringsym(u)
+ symdata := stringsym(nam.Pos, u)
s.WriteAddr(Ctxt, nam.Xoffset, Widthptr, symdata, 0)
s.WriteInt(Ctxt, nam.Xoffset+int64(Widthptr), Widthptr, int64(len(u)))
diff --git a/src/cmd/compile/internal/gc/op_string.go b/src/cmd/compile/internal/gc/op_string.go
new file mode 100644
index 00000000000..2d4772780e5
--- /dev/null
+++ b/src/cmd/compile/internal/gc/op_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=Op -trimprefix=O"; DO NOT EDIT.
+
+package gc
+
+import "fmt"
+
+const _Op_name = "XXXNAMENONAMETYPEPACKLITERALADDSUBORXORADDSTRADDRANDANDAPPENDARRAYBYTESTRARRAYBYTESTRTMPARRAYRUNESTRSTRARRAYBYTESTRARRAYBYTETMPSTRARRAYRUNEASAS2AS2FUNCAS2RECVAS2MAPRAS2DOTTYPEASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECMPIFACECMPSTRCOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTINDINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMULDIVMODLSHRSHANDANDNOTNEWNOTCOMPLUSMINUSORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFBLOCKBREAKCASEXCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELPROCRANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDDDDARGINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARKILLVARLIVEINDREGSPRETJMPGETGEND"
+
+var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 36, 39, 45, 49, 55, 61, 73, 88, 100, 112, 127, 139, 141, 144, 151, 158, 165, 175, 179, 183, 191, 199, 208, 216, 219, 224, 231, 239, 245, 252, 258, 267, 275, 283, 289, 293, 302, 309, 313, 316, 323, 331, 339, 346, 352, 355, 361, 368, 376, 380, 387, 395, 397, 399, 401, 403, 405, 407, 410, 415, 423, 426, 435, 438, 442, 450, 457, 466, 469, 472, 475, 478, 481, 484, 490, 493, 496, 499, 503, 508, 512, 517, 522, 528, 533, 537, 542, 550, 558, 564, 573, 580, 584, 591, 598, 606, 610, 614, 618, 625, 632, 640, 646, 651, 656, 660, 665, 673, 678, 683, 687, 690, 698, 702, 704, 709, 713, 718, 724, 730, 736, 742, 747, 751, 758, 764, 769, 775, 778, 784, 791, 796, 800, 805, 809, 819, 824, 832, 839, 846, 854, 860, 864, 867}
+
+func (i Op) String() string {
+ if i >= Op(len(_Op_index)-1) {
+ return fmt.Sprintf("Op(%d)", i)
+ }
+ return _Op_name[_Op_index[i]:_Op_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/gc/opnames.go b/src/cmd/compile/internal/gc/opnames.go
deleted file mode 100644
index 09442b595f4..00000000000
--- a/src/cmd/compile/internal/gc/opnames.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-// auto generated by go tool dist
-var opnames = []string{
- OXXX: "XXX",
- ONAME: "NAME",
- ONONAME: "NONAME",
- OTYPE: "TYPE",
- OPACK: "PACK",
- OLITERAL: "LITERAL",
- OADD: "ADD",
- OSUB: "SUB",
- OOR: "OR",
- OXOR: "XOR",
- OADDSTR: "ADDSTR",
- OADDR: "ADDR",
- OANDAND: "ANDAND",
- OAPPEND: "APPEND",
- OARRAYBYTESTR: "ARRAYBYTESTR",
- OARRAYBYTESTRTMP: "ARRAYBYTESTRTMP",
- OARRAYRUNESTR: "ARRAYRUNESTR",
- OSTRARRAYBYTE: "STRARRAYBYTE",
- OSTRARRAYBYTETMP: "STRARRAYBYTETMP",
- OSTRARRAYRUNE: "STRARRAYRUNE",
- OAS: "AS",
- OAS2: "AS2",
- OAS2FUNC: "AS2FUNC",
- OAS2RECV: "AS2RECV",
- OAS2MAPR: "AS2MAPR",
- OAS2DOTTYPE: "AS2DOTTYPE",
- OASOP: "ASOP",
- OCALL: "CALL",
- OCALLFUNC: "CALLFUNC",
- OCALLMETH: "CALLMETH",
- OCALLINTER: "CALLINTER",
- OCALLPART: "CALLPART",
- OCAP: "CAP",
- OCLOSE: "CLOSE",
- OCLOSURE: "CLOSURE",
- OCMPIFACE: "CMPIFACE",
- OCMPSTR: "CMPSTR",
- OCOMPLIT: "COMPLIT",
- OMAPLIT: "MAPLIT",
- OSTRUCTLIT: "STRUCTLIT",
- OARRAYLIT: "ARRAYLIT",
- OSLICELIT: "SLICELIT",
- OPTRLIT: "PTRLIT",
- OCONV: "CONV",
- OCONVIFACE: "CONVIFACE",
- OCONVNOP: "CONVNOP",
- OCOPY: "COPY",
- ODCL: "DCL",
- ODCLFUNC: "DCLFUNC",
- ODCLFIELD: "DCLFIELD",
- ODCLCONST: "DCLCONST",
- ODCLTYPE: "DCLTYPE",
- ODELETE: "DELETE",
- ODOT: "DOT",
- ODOTPTR: "DOTPTR",
- ODOTMETH: "DOTMETH",
- ODOTINTER: "DOTINTER",
- OXDOT: "XDOT",
- ODOTTYPE: "DOTTYPE",
- ODOTTYPE2: "DOTTYPE2",
- OEQ: "EQ",
- ONE: "NE",
- OLT: "LT",
- OLE: "LE",
- OGE: "GE",
- OGT: "GT",
- OIND: "IND",
- OINDEX: "INDEX",
- OINDEXMAP: "INDEXMAP",
- OKEY: "KEY",
- OSTRUCTKEY: "STRUCTKEY",
- OLEN: "LEN",
- OMAKE: "MAKE",
- OMAKECHAN: "MAKECHAN",
- OMAKEMAP: "MAKEMAP",
- OMAKESLICE: "MAKESLICE",
- OMUL: "MUL",
- ODIV: "DIV",
- OMOD: "MOD",
- OLSH: "LSH",
- ORSH: "RSH",
- OAND: "AND",
- OANDNOT: "ANDNOT",
- ONEW: "NEW",
- ONOT: "NOT",
- OCOM: "COM",
- OPLUS: "PLUS",
- OMINUS: "MINUS",
- OOROR: "OROR",
- OPANIC: "PANIC",
- OPRINT: "PRINT",
- OPRINTN: "PRINTN",
- OPAREN: "PAREN",
- OSEND: "SEND",
- OSLICE: "SLICE",
- OSLICEARR: "SLICEARR",
- OSLICESTR: "SLICESTR",
- OSLICE3: "SLICE3",
- OSLICE3ARR: "SLICE3ARR",
- ORECOVER: "RECOVER",
- ORECV: "RECV",
- ORUNESTR: "RUNESTR",
- OSELRECV: "SELRECV",
- OSELRECV2: "SELRECV2",
- OIOTA: "IOTA",
- OREAL: "REAL",
- OIMAG: "IMAG",
- OCOMPLEX: "COMPLEX",
- OBLOCK: "BLOCK",
- OBREAK: "BREAK",
- OCASE: "CASE",
- OXCASE: "XCASE",
- OCONTINUE: "CONTINUE",
- ODEFER: "DEFER",
- OEMPTY: "EMPTY",
- OFALL: "FALL",
- OXFALL: "XFALL",
- OFOR: "FOR",
- OFORUNTIL: "FORUNTIL",
- OGOTO: "GOTO",
- OIF: "IF",
- OLABEL: "LABEL",
- OPROC: "PROC",
- ORANGE: "RANGE",
- ORETURN: "RETURN",
- OSELECT: "SELECT",
- OSWITCH: "SWITCH",
- OTYPESW: "TYPESW",
- OTCHAN: "TCHAN",
- OTMAP: "TMAP",
- OTSTRUCT: "TSTRUCT",
- OTINTER: "TINTER",
- OTFUNC: "TFUNC",
- OTARRAY: "TARRAY",
- ODDD: "DDD",
- ODDDARG: "DDDARG",
- OINLCALL: "INLCALL",
- OEFACE: "EFACE",
- OITAB: "ITAB",
- OIDATA: "IDATA",
- OSPTR: "SPTR",
- OCLOSUREVAR: "CLOSUREVAR",
- OCFUNC: "CFUNC",
- OCHECKNIL: "CHECKNIL",
- OVARKILL: "VARKILL",
- OVARLIVE: "VARLIVE",
- OINDREGSP: "INDREGSP",
- ORETJMP: "RETJMP",
- OGETG: "GETG",
- OEND: "END",
-}
diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go
index cdda2f3486e..de89adf0e0e 100644
--- a/src/cmd/compile/internal/gc/order.go
+++ b/src/cmd/compile/internal/gc/order.go
@@ -187,7 +187,7 @@ func isaddrokay(n *Node) bool {
// The result of orderaddrtemp MUST be assigned back to n, e.g.
// n.Left = orderaddrtemp(n.Left, order)
func orderaddrtemp(n *Node, order *Order) *Node {
- if consttype(n) >= 0 {
+ if consttype(n) > 0 {
// TODO: expand this to all static composite literal nodes?
n = defaultlit(n, nil)
dowidth(n.Type)
@@ -235,18 +235,16 @@ func poptemp(mark ordermarker, order *Order) {
// above the mark on the temporary stack, but it does not pop them
// from the stack.
func cleantempnopop(mark ordermarker, order *Order, out *[]*Node) {
- var kill *Node
-
for i := len(order.temp) - 1; i >= int(mark); i-- {
n := order.temp[i]
if n.Name.Keepalive() {
n.Name.SetKeepalive(false)
n.SetAddrtaken(true) // ensure SSA keeps the n variable
- kill = nod(OVARLIVE, n, nil)
+ kill := nod(OVARLIVE, n, nil)
kill = typecheck(kill, Etop)
*out = append(*out, kill)
}
- kill = nod(OVARKILL, n, nil)
+ kill := nod(OVARKILL, n, nil)
kill = typecheck(kill, Etop)
*out = append(*out, kill)
}
@@ -346,14 +344,14 @@ func ismulticall(l Nodes) bool {
}
// call must return multiple values
- return n.Left.Type.Results().NumFields() > 1
+ return n.Left.Type.NumResults() > 1
}
// Copyret emits t1, t2, ... = n, where n is a function call,
// and then returns the list t1, t2, ....
func copyret(n *Node, order *Order) []*Node {
if !n.Type.IsFuncArgStruct() {
- Fatalf("copyret %v %d", n.Type, n.Left.Type.Results().NumFields())
+ Fatalf("copyret %v %d", n.Type, n.Left.Type.NumResults())
}
var l1 []*Node
@@ -429,10 +427,10 @@ func ordercall(n *Node, order *Order) {
// to make sure that all map assignments have the form m[k] = x.
// (Note: orderexpr has already been called on n, so we know k is addressable.)
//
-// If n is the multiple assignment form ..., m[k], ... = ..., the rewrite is
+// If n is the multiple assignment form ..., m[k], ... = ..., x, ..., the rewrite is
// t1 = m
// t2 = k
-// ...., t3, ... = x
+// ...., t3, ... = ..., x, ...
// t1[t2] = t3
//
// The temporaries t1, t2 are needed in case the ... being assigned
@@ -446,30 +444,29 @@ func ordermapassign(n *Node, order *Order) {
Fatalf("ordermapassign %v", n.Op)
case OAS:
+ if n.Left.Op == OINDEXMAP {
+ // Make sure we evaluate the RHS before starting the map insert.
+ // We need to make sure the RHS won't panic. See issue 22881.
+ n.Right = ordercheapexpr(n.Right, order)
+ }
order.out = append(order.out, n)
case OAS2, OAS2DOTTYPE, OAS2MAPR, OAS2FUNC:
var post []*Node
- var m *Node
- var a *Node
- for i1, n1 := range n.List.Slice() {
- if n1.Op == OINDEXMAP {
- m = n1
+ for i, m := range n.List.Slice() {
+ switch {
+ case m.Op == OINDEXMAP:
if !m.Left.IsAutoTmp() {
m.Left = ordercopyexpr(m.Left, m.Left.Type, order, 0)
}
if !m.Right.IsAutoTmp() {
m.Right = ordercopyexpr(m.Right, m.Right.Type, order, 0)
}
- n.List.SetIndex(i1, ordertemp(m.Type, order, false))
- a = nod(OAS, m, n.List.Index(i1))
- a = typecheck(a, Etop)
- post = append(post, a)
- } else if instrumenting && n.Op == OAS2FUNC && !isblank(n.List.Index(i1)) {
- m = n.List.Index(i1)
+ fallthrough
+ case instrumenting && n.Op == OAS2FUNC && !isblank(m):
t := ordertemp(m.Type, order, false)
- n.List.SetIndex(i1, t)
- a = nod(OAS, m, t)
+ n.List.SetIndex(i, t)
+ a := nod(OAS, m, t)
a = typecheck(a, Etop)
post = append(post, a)
}
@@ -533,8 +530,9 @@ func orderstmt(n *Node, order *Order) {
// out map read from map write when l is
// a map index expression.
t := marktemp(order)
-
n.Left = orderexpr(n.Left, order, nil)
+ n.Right = orderexpr(n.Right, order, nil)
+
n.Left = ordersafeexpr(n.Left, order)
tmp1 := treecopy(n.Left, src.NoXPos)
if tmp1.Op == OINDEXMAP {
@@ -619,7 +617,6 @@ func orderstmt(n *Node, order *Order) {
ODCLCONST,
ODCLTYPE,
OFALL,
- OXFALL,
OGOTO,
OLABEL,
ORETJMP:
@@ -761,11 +758,12 @@ func orderstmt(n *Node, order *Order) {
r := n.Right
n.Right = ordercopyexpr(r, r.Type, order, 0)
- // n->alloc is the temp for the iterator.
- prealloc[n] = ordertemp(types.Types[TUINT8], order, true)
+ // prealloc[n] is the temp for the iterator.
+ // hiter contains pointers and needs to be zeroed.
+ prealloc[n] = ordertemp(hiter(n.Type), order, true)
}
- for i := range n.List.Slice() {
- n.List.SetIndex(i, orderexprinplace(n.List.Index(i), order))
+ for i, n1 := range n.List.Slice() {
+ n.List.SetIndex(i, orderexprinplace(n1, order))
}
orderblockNodes(&n.Nbody)
order.out = append(order.out, n)
@@ -787,14 +785,11 @@ func orderstmt(n *Node, order *Order) {
case OSELECT:
t := marktemp(order)
- var tmp1 *Node
- var tmp2 *Node
- var r *Node
for _, n2 := range n.List.Slice() {
if n2.Op != OXCASE {
Fatalf("order select case %v", n2.Op)
}
- r = n2.Left
+ r := n2.Left
setlineno(n2)
// Append any new body prologue to ninit.
@@ -855,16 +850,16 @@ func orderstmt(n *Node, order *Order) {
// use channel element type for temporary to avoid conversions,
// such as in case interfacevalue = <-intchan.
// the conversion happens in the OAS instead.
- tmp1 = r.Left
+ tmp1 := r.Left
if r.Colas() {
- tmp2 = nod(ODCL, tmp1, nil)
+ tmp2 := nod(ODCL, tmp1, nil)
tmp2 = typecheck(tmp2, Etop)
n2.Ninit.Append(tmp2)
}
r.Left = ordertemp(r.Right.Left.Type.Elem(), order, types.Haspointers(r.Right.Left.Type.Elem()))
- tmp2 = nod(OAS, tmp1, r.Left)
+ tmp2 := nod(OAS, tmp1, r.Left)
tmp2 = typecheck(tmp2, Etop)
n2.Ninit.Append(tmp2)
}
@@ -873,15 +868,15 @@ func orderstmt(n *Node, order *Order) {
r.List.Set(nil)
}
if r.List.Len() != 0 {
- tmp1 = r.List.First()
+ tmp1 := r.List.First()
if r.Colas() {
- tmp2 = nod(ODCL, tmp1, nil)
+ tmp2 := nod(ODCL, tmp1, nil)
tmp2 = typecheck(tmp2, Etop)
n2.Ninit.Append(tmp2)
}
r.List.Set1(ordertemp(types.Types[TBOOL], order, false))
- tmp2 = okas(tmp1, r.List.First())
+ tmp2 := okas(tmp1, r.List.First())
tmp2 = typecheck(tmp2, Etop)
n2.Ninit.Append(tmp2)
}
diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go
index 66e4a10ee88..cf99931bb5f 100644
--- a/src/cmd/compile/internal/gc/pgen.go
+++ b/src/cmd/compile/internal/gc/pgen.go
@@ -13,8 +13,10 @@ import (
"cmd/internal/src"
"cmd/internal/sys"
"fmt"
+ "math"
"math/rand"
"sort"
+ "strings"
"sync"
"time"
)
@@ -36,26 +38,22 @@ func emitptrargsmap() {
nptr := int(Curfn.Type.ArgWidth() / int64(Widthptr))
bv := bvalloc(int32(nptr) * 2)
nbitmap := 1
- if Curfn.Type.Results().NumFields() > 0 {
+ if Curfn.Type.NumResults() > 0 {
nbitmap = 2
}
off := duint32(lsym, 0, uint32(nbitmap))
off = duint32(lsym, off, uint32(bv.n))
- var xoffset int64
+
if Curfn.IsMethod() {
- xoffset = 0
- onebitwalktype1(Curfn.Type.Recvs(), &xoffset, bv)
+ onebitwalktype1(Curfn.Type.Recvs(), 0, bv)
}
-
- if Curfn.Type.Params().NumFields() > 0 {
- xoffset = 0
- onebitwalktype1(Curfn.Type.Params(), &xoffset, bv)
+ if Curfn.Type.NumParams() > 0 {
+ onebitwalktype1(Curfn.Type.Params(), 0, bv)
}
-
off = dbvec(lsym, off, bv)
- if Curfn.Type.Results().NumFields() > 0 {
- xoffset = 0
- onebitwalktype1(Curfn.Type.Results(), &xoffset, bv)
+
+ if Curfn.Type.NumResults() > 0 {
+ onebitwalktype1(Curfn.Type.Results(), 0, bv)
off = dbvec(lsym, off, bv)
}
@@ -132,20 +130,21 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
scratchUsed := false
for _, b := range f.Blocks {
for _, v := range b.Values {
- switch a := v.Aux.(type) {
- case *ssa.ArgSymbol:
- n := a.Node.(*Node)
- // Don't modify nodfp; it is a global.
- if n != nodfp {
+ if n, ok := v.Aux.(*Node); ok {
+ switch n.Class() {
+ case PPARAM, PPARAMOUT:
+ // Don't modify nodfp; it is a global.
+ if n != nodfp {
+ n.Name.SetUsed(true)
+ }
+ case PAUTO:
n.Name.SetUsed(true)
}
- case *ssa.AutoSymbol:
- a.Node.(*Node).Name.SetUsed(true)
}
-
if !scratchUsed {
scratchUsed = v.Op.UsesScratch()
}
+
}
}
@@ -230,23 +229,23 @@ func compilenow() bool {
return nBackendWorkers == 1 && Debug_compilelater == 0
}
-const maxStackSize = 1 << 31
+const maxStackSize = 1 << 30
// compileSSA builds an SSA backend function,
// uses it to generate a plist,
// and flushes that plist to machine code.
// worker indicates which of the backend workers is doing the processing.
func compileSSA(fn *Node, worker int) {
- ssafn := buildssa(fn, worker)
- pp := newProgs(fn, worker)
- genssa(ssafn, pp)
- if pp.Text.To.Offset < maxStackSize {
- pp.Flush()
- } else {
+ f := buildssa(fn, worker)
+ if f.Frontend().(*ssafn).stksize >= maxStackSize {
largeStackFramesMu.Lock()
largeStackFrames = append(largeStackFrames, fn.Pos)
largeStackFramesMu.Unlock()
+ return
}
+ pp := newProgs(fn, worker)
+ genssa(f, pp)
+ pp.Flush()
// fieldtrack must be called after pp.Flush. See issue 20014.
fieldtrack(pp.Text.From.Sym, fn.Func.FieldTrack)
pp.Free()
@@ -281,6 +280,7 @@ func compileFunctions() {
})
}
var wg sync.WaitGroup
+ Ctxt.InParallel = true
c := make(chan *Node, nBackendWorkers)
for i := 0; i < nBackendWorkers; i++ {
wg.Add(1)
@@ -297,35 +297,101 @@ func compileFunctions() {
close(c)
compilequeue = nil
wg.Wait()
+ Ctxt.InParallel = false
sizeCalculationDisabled = false
}
}
-func debuginfo(fnsym *obj.LSym, curfn interface{}) []dwarf.Scope {
+func debuginfo(fnsym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
fn := curfn.(*Node)
- if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect {
- Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
+ debugInfo := fn.Func.DebugInfo
+ fn.Func.DebugInfo = nil
+ if fn.Func.Nname != nil {
+ if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect {
+ Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
+ }
}
- var dwarfVars []*dwarf.Var
- var varScopes []ScopeID
-
+ var automDecls []*Node
+ // Populate Automs for fn.
for _, n := range fn.Func.Dcl {
if n.Op != ONAME { // might be OTYPE or OLITERAL
continue
}
-
var name obj.AddrName
+ switch n.Class() {
+ case PAUTO:
+ if !n.Name.Used() {
+ // Text == nil -> generating abstract function
+ if fnsym.Func.Text != nil {
+ Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
+ }
+ continue
+ }
+ name = obj.NAME_AUTO
+ case PPARAM, PPARAMOUT:
+ name = obj.NAME_PARAM
+ default:
+ continue
+ }
+ automDecls = append(automDecls, n)
+ gotype := ngotype(n).Linksym()
+ fnsym.Func.Autom = append(fnsym.Func.Autom, &obj.Auto{
+ Asym: Ctxt.Lookup(n.Sym.Name),
+ Aoffset: int32(n.Xoffset),
+ Name: name,
+ Gotype: gotype,
+ })
+ }
+
+ decls, dwarfVars := createDwarfVars(fnsym, debugInfo, automDecls)
+
+ var varScopes []ScopeID
+ for _, decl := range decls {
+ pos := decl.Pos
+ if decl.Name.Defn != nil && (decl.Name.Captured() || decl.Name.Byval()) {
+ // It's not clear which position is correct for captured variables here:
+ // * decl.Pos is the wrong position for captured variables, in the inner
+ // function, but it is the right position in the outer function.
+ // * decl.Name.Defn is nil for captured variables that were arguments
+ // on the outer function, however the decl.Pos for those seems to be
+ // correct.
+ // * decl.Name.Defn is the "wrong" thing for variables declared in the
+ // header of a type switch, it's their position in the header, rather
+ // than the position of the case statement. In principle this is the
+ // right thing, but here we prefer the latter because it makes each
+ // instance of the header variable local to the lexical block of its
+ // case statement.
+ // This code is probably wrong for type switch variables that are also
+ // captured.
+ pos = decl.Name.Defn.Pos
+ }
+ varScopes = append(varScopes, findScope(fn.Func.Marks, pos))
+ }
+
+ scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes)
+ var inlcalls dwarf.InlCalls
+ if genDwarfInline > 0 {
+ inlcalls = assembleInlines(fnsym, fn, dwarfVars)
+ }
+ return scopes, inlcalls
+}
+
+// createSimpleVars creates a DWARF entry for every variable declared in the
+// function, claiming that they are permanently on the stack.
+func createSimpleVars(automDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) {
+ var vars []*dwarf.Var
+ var decls []*Node
+ selected := make(map[*Node]bool)
+ for _, n := range automDecls {
+ if n.IsAutoTmp() {
+ continue
+ }
var abbrev int
offs := n.Xoffset
switch n.Class() {
case PAUTO:
- if !n.Name.Used() {
- Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
- }
- name = obj.NAME_AUTO
-
abbrev = dwarf.DW_ABRV_AUTO
if Ctxt.FixedFrameSize() == 0 {
offs -= int64(Widthptr)
@@ -335,48 +401,490 @@ func debuginfo(fnsym *obj.LSym, curfn interface{}) []dwarf.Scope {
}
case PPARAM, PPARAMOUT:
- name = obj.NAME_PARAM
-
abbrev = dwarf.DW_ABRV_PARAM
offs += Ctxt.FixedFrameSize()
-
default:
- continue
+ Fatalf("createSimpleVars unexpected type %v for node %v", n.Class(), n)
}
+ selected[n] = true
+ typename := dwarf.InfoPrefix + typesymname(n.Type)
+ decls = append(decls, n)
+ inlIndex := 0
+ if genDwarfInline > 1 {
+ if n.InlFormal() || n.InlLocal() {
+ inlIndex = posInlIndex(n.Pos) + 1
+ }
+ }
+ declpos := Ctxt.InnermostPos(n.Pos)
+ vars = append(vars, &dwarf.Var{
+ Name: n.Sym.Name,
+ IsReturnValue: n.Class() == PPARAMOUT,
+ IsInlFormal: n.InlFormal(),
+ Abbrev: abbrev,
+ StackOffset: int32(offs),
+ Type: Ctxt.Lookup(typename),
+ DeclFile: declpos.Base().SymFilename(),
+ DeclLine: declpos.Line(),
+ DeclCol: declpos.Col(),
+ InlIndex: int32(inlIndex),
+ ChildIndex: -1,
+ })
+ }
+ return decls, vars, selected
+}
+
+type varPart struct {
+ varOffset int64
+ slot ssa.SlotID
+}
+
+func createComplexVars(fnsym *obj.LSym, debugInfo *ssa.FuncDebug, automDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) {
+ for _, blockDebug := range debugInfo.Blocks {
+ for _, locList := range blockDebug.Variables {
+ for _, loc := range locList.Locations {
+ if loc.StartProg != nil {
+ loc.StartPC = loc.StartProg.Pc
+ }
+ if loc.EndProg != nil {
+ loc.EndPC = loc.EndProg.Pc
+ } else {
+ loc.EndPC = fnsym.Size
+ }
+ if Debug_locationlist == 0 {
+ loc.EndProg = nil
+ loc.StartProg = nil
+ }
+ }
+ }
+ }
+
+ // Group SSA variables by the user variable they were decomposed from.
+ varParts := map[*Node][]varPart{}
+ ssaVars := make(map[*Node]bool)
+ for slotID, slot := range debugInfo.VarSlots {
+ for slot.SplitOf != nil {
+ slot = slot.SplitOf
+ }
+ n := slot.N.(*Node)
+ ssaVars[n] = true
+ varParts[n] = append(varParts[n], varPart{varOffset(slot), ssa.SlotID(slotID)})
+ }
+
+ // Produce a DWARF variable entry for each user variable.
+ // Don't iterate over the map -- that's nondeterministic, and
+ // createComplexVar has side effects. Instead, go by slot.
+ var decls []*Node
+ var vars []*dwarf.Var
+ for _, slot := range debugInfo.VarSlots {
+ for slot.SplitOf != nil {
+ slot = slot.SplitOf
+ }
+ n := slot.N.(*Node)
+ parts := varParts[n]
+ if parts == nil {
+ continue
+ }
+ // Don't work on this variable again, no matter how many slots it has.
+ delete(varParts, n)
+
+ // Get the order the parts need to be in to represent the memory
+ // of the decomposed user variable.
+ sort.Sort(partsByVarOffset(parts))
+
+ if dvar := createComplexVar(debugInfo, n, parts); dvar != nil {
+ decls = append(decls, n)
+ vars = append(vars, dvar)
+ }
+ }
+
+ return decls, vars, ssaVars
+}
+
+func createDwarfVars(fnsym *obj.LSym, debugInfo *ssa.FuncDebug, automDecls []*Node) ([]*Node, []*dwarf.Var) {
+ // Collect a raw list of DWARF vars.
+ var vars []*dwarf.Var
+ var decls []*Node
+ var selected map[*Node]bool
+ if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && debugInfo != nil {
+ decls, vars, selected = createComplexVars(fnsym, debugInfo, automDecls)
+ } else {
+ decls, vars, selected = createSimpleVars(automDecls)
+ }
+
+ var dcl []*Node
+ var chopVersion bool
+ if fnsym.WasInlined() {
+ dcl, chopVersion = preInliningDcls(fnsym)
+ } else {
+ dcl = automDecls
+ }
+
+ // If optimization is enabled, the list above will typically be
+ // missing some of the original pre-optimization variables in the
+ // function (they may have been promoted to registers, folded into
+ // constants, dead-coded away, etc). Here we add back in entries
+ // for selected missing vars. Note that the recipe below creates a
+ // conservative location. The idea here is that we want to
+ // communicate to the user that "yes, there is a variable named X
+ // in this function, but no, I don't have enough information to
+ // reliably report its contents."
+ for _, n := range dcl {
+ if _, found := selected[n]; found {
+ continue
+ }
+ c := n.Sym.Name[0]
+ if c == '~' || c == '.' || n.Type.IsUntyped() {
+ continue
+ }
+ typename := dwarf.InfoPrefix + typesymname(n.Type)
+ decls = append(decls, n)
+ abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
+ if n.Class() == PPARAM || n.Class() == PPARAMOUT {
+ abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+ }
+ inlIndex := 0
+ if genDwarfInline > 1 {
+ if n.InlFormal() || n.InlLocal() {
+ inlIndex = posInlIndex(n.Pos) + 1
+ }
+ }
+ declpos := Ctxt.InnermostPos(n.Pos)
+ vars = append(vars, &dwarf.Var{
+ Name: n.Sym.Name,
+ IsReturnValue: n.Class() == PPARAMOUT,
+ Abbrev: abbrev,
+ StackOffset: int32(n.Xoffset),
+ Type: Ctxt.Lookup(typename),
+ DeclFile: declpos.Base().SymFilename(),
+ DeclLine: declpos.Line(),
+ DeclCol: declpos.Col(),
+ InlIndex: int32(inlIndex),
+ ChildIndex: -1,
+ })
+ // Append a "deleted auto" entry to the autom list so as to
+ // insure that the type in question is picked up by the linker.
+ // See issue 22941.
gotype := ngotype(n).Linksym()
fnsym.Func.Autom = append(fnsym.Func.Autom, &obj.Auto{
Asym: Ctxt.Lookup(n.Sym.Name),
- Aoffset: int32(n.Xoffset),
- Name: name,
+ Aoffset: int32(-1),
+ Name: obj.NAME_DELETED_AUTO,
Gotype: gotype,
})
- if n.IsAutoTmp() {
- continue
- }
-
- typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
- dwarfVars = append(dwarfVars, &dwarf.Var{
- Name: n.Sym.Name,
- Abbrev: abbrev,
- Offset: int32(offs),
- Type: Ctxt.Lookup(typename),
- })
-
- var scope ScopeID
- if !n.Name.Captured() && !n.Name.Byval() {
- // n.Pos of captured variables is their first
- // use in the closure but they should always
- // be assigned to scope 0 instead.
- // TODO(mdempsky): Verify this.
- scope = findScope(fn.Func.Marks, n.Pos)
- }
-
- varScopes = append(varScopes, scope)
}
- return assembleScopes(fnsym, fn, dwarfVars, varScopes)
+ // Parameter and local variable names are given middle dot
+ // version numbers as part of the writing them out to export
+ // data (see issue 4326). If DWARF inlined routine generation
+ // is turned on, undo this versioning, since DWARF variables
+ // in question will be parented by the inlined routine and
+ // not the top-level caller.
+ if genDwarfInline > 1 && chopVersion {
+ for _, v := range vars {
+ if v.InlIndex != -1 {
+ if i := strings.Index(v.Name, "·"); i > 0 {
+ v.Name = v.Name[:i] // cut off Vargen
+ }
+ }
+ }
+ }
+
+ return decls, vars
+}
+
+// Given a function that was inlined at some point during the compilation,
+// return a list of nodes corresponding to the autos/locals in that
+// function prior to inlining. Untyped and compiler-synthesized vars are
+// stripped out along the way.
+func preInliningDcls(fnsym *obj.LSym) ([]*Node, bool) {
+ fn := Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*Node)
+ imported := false
+ var dcl, rdcl []*Node
+ if fn.Name.Defn != nil {
+ dcl = fn.Func.Inldcl.Slice() // local function
+ } else {
+ dcl = fn.Func.Dcl // imported function
+ imported = true
+ }
+ for _, n := range dcl {
+ c := n.Sym.Name[0]
+ if c == '~' || c == '.' || n.Type.IsUntyped() {
+ continue
+ }
+ rdcl = append(rdcl, n)
+ }
+ return rdcl, imported
+}
+
+// varOffset returns the offset of slot within the user variable it was
+// decomposed from. This has nothing to do with its stack offset.
+func varOffset(slot *ssa.LocalSlot) int64 {
+ offset := slot.Off
+ for ; slot.SplitOf != nil; slot = slot.SplitOf {
+ offset += slot.SplitOffset
+ }
+ return offset
+}
+
+type partsByVarOffset []varPart
+
+func (a partsByVarOffset) Len() int { return len(a) }
+func (a partsByVarOffset) Less(i, j int) bool { return a[i].varOffset < a[j].varOffset }
+func (a partsByVarOffset) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// stackOffset returns the stack location of a LocalSlot relative to the
+// stack pointer, suitable for use in a DWARF location entry. This has nothing
+// to do with its offset in the user variable.
+func stackOffset(slot *ssa.LocalSlot) int32 {
+ n := slot.N.(*Node)
+ var base int64
+ switch n.Class() {
+ case PAUTO:
+ if Ctxt.FixedFrameSize() == 0 {
+ base -= int64(Widthptr)
+ }
+ if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) {
+ base -= int64(Widthptr)
+ }
+ case PPARAM, PPARAMOUT:
+ base += Ctxt.FixedFrameSize()
+ }
+ return int32(base + n.Xoffset + slot.Off)
+}
+
+// createComplexVar builds a DWARF variable entry and location list representing n.
+func createComplexVar(debugInfo *ssa.FuncDebug, n *Node, parts []varPart) *dwarf.Var {
+ slots := debugInfo.Slots
+ var offs int64 // base stack offset for this kind of variable
+ var abbrev int
+ switch n.Class() {
+ case PAUTO:
+ abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
+ if Ctxt.FixedFrameSize() == 0 {
+ offs -= int64(Widthptr)
+ }
+ if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) {
+ offs -= int64(Widthptr)
+ }
+
+ case PPARAM, PPARAMOUT:
+ abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+ offs += Ctxt.FixedFrameSize()
+ default:
+ return nil
+ }
+
+ gotype := ngotype(n).Linksym()
+ typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
+ inlIndex := 0
+ if genDwarfInline > 1 {
+ if n.InlFormal() || n.InlLocal() {
+ inlIndex = posInlIndex(n.Pos) + 1
+ }
+ }
+ declpos := Ctxt.InnermostPos(n.Pos)
+ dvar := &dwarf.Var{
+ Name: n.Sym.Name,
+ IsReturnValue: n.Class() == PPARAMOUT,
+ IsInlFormal: n.InlFormal(),
+ Abbrev: abbrev,
+ Type: Ctxt.Lookup(typename),
+ // The stack offset is used as a sorting key, so for decomposed
+ // variables just give it the lowest one. It's not used otherwise.
+ // This won't work well if the first slot hasn't been assigned a stack
+ // location, but it's not obvious how to do better.
+ StackOffset: int32(stackOffset(slots[parts[0].slot])),
+ DeclFile: declpos.Base().SymFilename(),
+ DeclLine: declpos.Line(),
+ DeclCol: declpos.Col(),
+ InlIndex: int32(inlIndex),
+ ChildIndex: -1,
+ }
+
+ if Debug_locationlist != 0 {
+ Ctxt.Logf("Building location list for %+v. Parts:\n", n)
+ for _, part := range parts {
+ Ctxt.Logf("\t%v => %v\n", debugInfo.Slots[part.slot], debugInfo.SlotLocsString(part.slot))
+ }
+ }
+
+ // Given a variable that's been decomposed into multiple parts,
+ // its location list may need a new entry after the beginning or
+ // end of every location entry for each of its parts. For example:
+ //
+ // [variable] [pc range]
+ // string.ptr |----|-----| |----|
+ // string.len |------------| |--|
+ // ... needs a location list like:
+ // string |----|-----|-| |--|-|
+ //
+ // Note that location entries may or may not line up with each other,
+ // and some of the result will only have one or the other part.
+ //
+ // To build the resulting list:
+ // - keep a "current" pointer for each part
+ // - find the next transition point
+ // - advance the current pointer for each part up to that transition point
+ // - build the piece for the range between that transition point and the next
+ // - repeat
+
+ type locID struct {
+ block int
+ loc int
+ }
+ findLoc := func(part varPart, id locID) *ssa.VarLoc {
+ if id.block >= len(debugInfo.Blocks) {
+ return nil
+ }
+ return debugInfo.Blocks[id.block].Variables[part.slot].Locations[id.loc]
+ }
+ nextLoc := func(part varPart, id locID) (locID, *ssa.VarLoc) {
+ // Check if there's another loc in this block
+ id.loc++
+ if b := debugInfo.Blocks[id.block]; b != nil && id.loc < len(b.Variables[part.slot].Locations) {
+ return id, findLoc(part, id)
+ }
+ // Find the next block that has a loc for this part.
+ id.loc = 0
+ id.block++
+ for ; id.block < len(debugInfo.Blocks); id.block++ {
+ if b := debugInfo.Blocks[id.block]; b != nil && len(b.Variables[part.slot].Locations) != 0 {
+ return id, findLoc(part, id)
+ }
+ }
+ return id, nil
+ }
+ curLoc := make([]locID, len(slots))
+ // Position each pointer at the first entry for its slot.
+ for _, part := range parts {
+ if b := debugInfo.Blocks[0]; b != nil && len(b.Variables[part.slot].Locations) != 0 {
+ // Block 0 has an entry; no need to advance.
+ continue
+ }
+ curLoc[part.slot], _ = nextLoc(part, curLoc[part.slot])
+ }
+
+ // findBoundaryAfter finds the next beginning or end of a piece after currentPC.
+ findBoundaryAfter := func(currentPC int64) int64 {
+ min := int64(math.MaxInt64)
+ for _, part := range parts {
+ // For each part, find the first PC greater than current. Doesn't
+ // matter if it's a start or an end, since we're looking for any boundary.
+ // If it's the new winner, save it.
+ onePart:
+ for i, loc := curLoc[part.slot], findLoc(part, curLoc[part.slot]); loc != nil; i, loc = nextLoc(part, i) {
+ for _, pc := range [2]int64{loc.StartPC, loc.EndPC} {
+ if pc > currentPC {
+ if pc < min {
+ min = pc
+ }
+ break onePart
+ }
+ }
+ }
+ }
+ return min
+ }
+ var start int64
+ end := findBoundaryAfter(0)
+ for {
+ // Advance to the next chunk.
+ start = end
+ end = findBoundaryAfter(start)
+ if end == math.MaxInt64 {
+ break
+ }
+
+ dloc := dwarf.Location{StartPC: start, EndPC: end}
+ if Debug_locationlist != 0 {
+ Ctxt.Logf("Processing range %x -> %x\n", start, end)
+ }
+
+ // Advance curLoc to the last location that starts before/at start.
+ // After this loop, if there's a location that covers [start, end), it will be current.
+ // Otherwise the current piece will be too early.
+ for _, part := range parts {
+ choice := locID{-1, -1}
+ for i, loc := curLoc[part.slot], findLoc(part, curLoc[part.slot]); loc != nil; i, loc = nextLoc(part, i) {
+ if loc.StartPC > start {
+ break //overshot
+ }
+ choice = i // best yet
+ }
+ if choice.block != -1 {
+ curLoc[part.slot] = choice
+ }
+ if Debug_locationlist != 0 {
+ Ctxt.Logf("\t %v => %v", slots[part.slot], curLoc[part.slot])
+ }
+ }
+ if Debug_locationlist != 0 {
+ Ctxt.Logf("\n")
+ }
+ // Assemble the location list entry for this chunk.
+ present := 0
+ for _, part := range parts {
+ dpiece := dwarf.Piece{
+ Length: slots[part.slot].Type.Size(),
+ }
+ loc := findLoc(part, curLoc[part.slot])
+ if loc == nil || start >= loc.EndPC || end <= loc.StartPC {
+ if Debug_locationlist != 0 {
+ Ctxt.Logf("\t%v: missing", slots[part.slot])
+ }
+ dpiece.Missing = true
+ dloc.Pieces = append(dloc.Pieces, dpiece)
+ continue
+ }
+ present++
+ if Debug_locationlist != 0 {
+ Ctxt.Logf("\t%v: %v", slots[part.slot], debugInfo.Blocks[curLoc[part.slot].block].LocString(loc))
+ }
+ if loc.OnStack {
+ dpiece.OnStack = true
+ dpiece.StackOffset = stackOffset(slots[loc.StackLocation])
+ } else {
+ for reg := 0; reg < len(debugInfo.Registers); reg++ {
+ if loc.Registers&(1< totally missing\n")
+ }
+ continue
+ }
+ // Extend the previous entry if possible.
+ if len(dvar.LocationList) > 0 {
+ prev := &dvar.LocationList[len(dvar.LocationList)-1]
+ if prev.EndPC == dloc.StartPC && len(prev.Pieces) == len(dloc.Pieces) {
+ equal := true
+ for i := range prev.Pieces {
+ if prev.Pieces[i] != dloc.Pieces[i] {
+ equal = false
+ }
+ }
+ if equal {
+ prev.EndPC = end
+ if Debug_locationlist != 0 {
+ Ctxt.Logf("-> merged with previous, now %#v\n", prev)
+ }
+ continue
+ }
+ }
+ }
+ dvar.LocationList = append(dvar.LocationList, dloc)
+ if Debug_locationlist != 0 {
+ Ctxt.Logf("-> added: %#v\n", dloc)
+ }
+ }
+ return dvar
}
// fieldtrack adds R_USEFIELD relocations to fnsym to record any
diff --git a/src/cmd/compile/internal/gc/phi.go b/src/cmd/compile/internal/gc/phi.go
index 0ce7a4b11d7..b549f0ea6f2 100644
--- a/src/cmd/compile/internal/gc/phi.go
+++ b/src/cmd/compile/internal/gc/phi.go
@@ -233,24 +233,25 @@ func (s *phiState) insertVarPhis(n int, var_ *Node, defs []*ssa.Block, typ *type
// a D-edge, or an edge whose target is in currentRoot's subtree.
continue
}
- if !hasPhi.contains(c.ID) {
- // Add a phi to block c for variable n.
- hasPhi.add(c.ID)
- v := c.NewValue0I(currentRoot.Pos, ssa.OpPhi, typ, int64(n)) // TODO: line number right?
- // Note: we store the variable number in the phi's AuxInt field. Used temporarily by phi building.
- s.s.addNamedValue(var_, v)
- for i := 0; i < len(c.Preds); i++ {
- v.AddArg(s.placeholder) // Actual args will be filled in by resolveFwdRefs.
- }
- if debugPhi {
- fmt.Printf("new phi for var%d in %s: %s\n", n, c, v)
- }
- if !hasDef.contains(c.ID) {
- // There's now a new definition of this variable in block c.
- // Add it to the priority queue to explore.
- heap.Push(priq, c)
- hasDef.add(c.ID)
- }
+ if hasPhi.contains(c.ID) {
+ continue
+ }
+ // Add a phi to block c for variable n.
+ hasPhi.add(c.ID)
+ v := c.NewValue0I(currentRoot.Pos, ssa.OpPhi, typ, int64(n)) // TODO: line number right?
+ // Note: we store the variable number in the phi's AuxInt field. Used temporarily by phi building.
+ s.s.addNamedValue(var_, v)
+ for i := 0; i < len(c.Preds); i++ {
+ v.AddArg(s.placeholder) // Actual args will be filled in by resolveFwdRefs.
+ }
+ if debugPhi {
+ fmt.Printf("new phi for var%d in %s: %s\n", n, c, v)
+ }
+ if !hasDef.contains(c.ID) {
+ // There's now a new definition of this variable in block c.
+ // Add it to the priority queue to explore.
+ heap.Push(priq, c)
+ hasDef.add(c.ID)
}
}
diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go
index ca449b72bdf..49d0229702b 100644
--- a/src/cmd/compile/internal/gc/plive.go
+++ b/src/cmd/compile/internal/gc/plive.go
@@ -306,12 +306,10 @@ func affectedNode(v *ssa.Value) (*Node, ssa.SymEffect) {
var n *Node
switch a := v.Aux.(type) {
- case nil, *ssa.ExternSymbol:
+ case nil, *obj.LSym:
// ok, but no node
- case *ssa.ArgSymbol:
- n = a.Node.(*Node)
- case *ssa.AutoSymbol:
- n = a.Node.(*Node)
+ case *Node:
+ n = a
default:
Fatalf("weird aux: %s", v.LongString())
}
@@ -353,110 +351,85 @@ func (lv *Liveness) blockEffects(b *ssa.Block) *BlockEffects {
return &lv.be[b.ID]
}
-// NOTE: The bitmap for a specific type t should be cached in t after the first run
-// and then simply copied into bv at the correct offset on future calls with
-// the same type t. On https://rsc.googlecode.com/hg/testdata/slow.go, onebitwalktype1
-// accounts for 40% of the 6g execution time.
-func onebitwalktype1(t *types.Type, xoffset *int64, bv bvec) {
- if t.Align > 0 && *xoffset&int64(t.Align-1) != 0 {
+// NOTE: The bitmap for a specific type t could be cached in t after
+// the first run and then simply copied into bv at the correct offset
+// on future calls with the same type t.
+func onebitwalktype1(t *types.Type, off int64, bv bvec) {
+ if t.Align > 0 && off&int64(t.Align-1) != 0 {
Fatalf("onebitwalktype1: invalid initial alignment, %v", t)
}
switch t.Etype {
- case TINT8,
- TUINT8,
- TINT16,
- TUINT16,
- TINT32,
- TUINT32,
- TINT64,
- TUINT64,
- TINT,
- TUINT,
- TUINTPTR,
- TBOOL,
- TFLOAT32,
- TFLOAT64,
- TCOMPLEX64,
- TCOMPLEX128:
- *xoffset += t.Width
+ case TINT8, TUINT8, TINT16, TUINT16,
+ TINT32, TUINT32, TINT64, TUINT64,
+ TINT, TUINT, TUINTPTR, TBOOL,
+ TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128:
- case TPTR32,
- TPTR64,
- TUNSAFEPTR,
- TFUNC,
- TCHAN,
- TMAP:
- if *xoffset&int64(Widthptr-1) != 0 {
+ case TPTR32, TPTR64, TUNSAFEPTR, TFUNC, TCHAN, TMAP:
+ if off&int64(Widthptr-1) != 0 {
Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
- bv.Set(int32(*xoffset / int64(Widthptr))) // pointer
- *xoffset += t.Width
+ bv.Set(int32(off / int64(Widthptr))) // pointer
case TSTRING:
// struct { byte *str; intgo len; }
- if *xoffset&int64(Widthptr-1) != 0 {
+ if off&int64(Widthptr-1) != 0 {
Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
- bv.Set(int32(*xoffset / int64(Widthptr))) //pointer in first slot
- *xoffset += t.Width
+ bv.Set(int32(off / int64(Widthptr))) //pointer in first slot
case TINTER:
// struct { Itab *tab; void *data; }
// or, when isnilinter(t)==true:
// struct { Type *type; void *data; }
- if *xoffset&int64(Widthptr-1) != 0 {
+ if off&int64(Widthptr-1) != 0 {
Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
- bv.Set(int32(*xoffset / int64(Widthptr))) // pointer in first slot
- bv.Set(int32(*xoffset/int64(Widthptr) + 1)) // pointer in second slot
- *xoffset += t.Width
+ bv.Set(int32(off / int64(Widthptr))) // pointer in first slot
+ bv.Set(int32(off/int64(Widthptr) + 1)) // pointer in second slot
case TSLICE:
// struct { byte *array; uintgo len; uintgo cap; }
- if *xoffset&int64(Widthptr-1) != 0 {
+ if off&int64(Widthptr-1) != 0 {
Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t)
}
- bv.Set(int32(*xoffset / int64(Widthptr))) // pointer in first slot (BitsPointer)
- *xoffset += t.Width
+ bv.Set(int32(off / int64(Widthptr))) // pointer in first slot (BitsPointer)
case TARRAY:
+ elt := t.Elem()
+ if elt.Width == 0 {
+ // Short-circuit for #20739.
+ break
+ }
for i := int64(0); i < t.NumElem(); i++ {
- onebitwalktype1(t.Elem(), xoffset, bv)
+ onebitwalktype1(elt, off, bv)
+ off += elt.Width
}
case TSTRUCT:
- var o int64
- for _, t1 := range t.Fields().Slice() {
- fieldoffset := t1.Offset
- *xoffset += fieldoffset - o
- onebitwalktype1(t1.Type, xoffset, bv)
- o = fieldoffset + t1.Type.Width
+ for _, f := range t.Fields().Slice() {
+ onebitwalktype1(f.Type, off+f.Offset, bv)
}
- *xoffset += t.Width - o
-
default:
Fatalf("onebitwalktype1: unexpected type, %v", t)
}
}
-// Returns the number of words of local variables.
-func localswords(lv *Liveness) int32 {
+// localWords returns the number of words of local variables.
+func (lv *Liveness) localWords() int32 {
return int32(lv.stkptrsize / int64(Widthptr))
}
-// Returns the number of words of in and out arguments.
-func argswords(lv *Liveness) int32 {
+// argWords returns the number of words of in and out arguments.
+func (lv *Liveness) argWords() int32 {
return int32(lv.fn.Type.ArgWidth() / int64(Widthptr))
}
// Generates live pointer value maps for arguments and local variables. The
// this argument and the in arguments are always assumed live. The vars
// argument is a slice of *Nodes.
-func onebitlivepointermap(lv *Liveness, liveout bvec, vars []*Node, args bvec, locals bvec) {
- var xoffset int64
-
+func (lv *Liveness) pointerMap(liveout bvec, vars []*Node, args, locals bvec) {
for i := int32(0); ; i++ {
i = liveout.Next(i)
if i < 0 {
@@ -465,12 +438,10 @@ func onebitlivepointermap(lv *Liveness, liveout bvec, vars []*Node, args bvec, l
node := vars[i]
switch node.Class() {
case PAUTO:
- xoffset = node.Xoffset + lv.stkptrsize
- onebitwalktype1(node.Type, &xoffset, locals)
+ onebitwalktype1(node.Type, node.Xoffset+lv.stkptrsize, locals)
case PPARAM, PPARAMOUT:
- xoffset = node.Xoffset
- onebitwalktype1(node.Type, &xoffset, args)
+ onebitwalktype1(node.Type, node.Xoffset, args)
}
}
}
@@ -484,7 +455,7 @@ func issafepoint(v *ssa.Value) bool {
// Initializes the sets for solving the live variables. Visits all the
// instructions in each basic block to summarizes the information at each basic
// block
-func livenessprologue(lv *Liveness) {
+func (lv *Liveness) prologue() {
lv.initcache()
for _, b := range lv.f.Blocks {
@@ -518,7 +489,7 @@ func livenessprologue(lv *Liveness) {
}
// Solve the liveness dataflow equations.
-func livenesssolve(lv *Liveness) {
+func (lv *Liveness) solve() {
// These temporary bitvectors exist to avoid successive allocations and
// frees within the loop.
newlivein := bvalloc(int32(len(lv.vars)))
@@ -618,7 +589,7 @@ func livenesssolve(lv *Liveness) {
// Visits all instructions in a basic block and computes a bit vector of live
// variables at each safe point locations.
-func livenessepilogue(lv *Liveness) {
+func (lv *Liveness) epilogue() {
nvars := int32(len(lv.vars))
liveout := bvalloc(nvars)
any := bvalloc(nvars)
@@ -721,7 +692,7 @@ func livenessepilogue(lv *Liveness) {
for _, b := range lv.f.Blocks {
be := lv.blockEffects(b)
- // walk backward, emit pcdata and populate the maps
+ // walk backward, construct maps at each safe point
index := int32(be.lastbitmapindex)
if index < 0 {
// the first block we encounter should have the ATEXT so
@@ -924,13 +895,7 @@ func clobberWalk(b *ssa.Block, v *Node, offset int64, t *types.Type) {
// clobberPtr generates a clobber of the pointer at offset offset in v.
// The clobber instruction is added at the end of b.
func clobberPtr(b *ssa.Block, v *Node, offset int64) {
- var aux interface{}
- if v.Class() == PAUTO {
- aux = &ssa.AutoSymbol{Node: v}
- } else {
- aux = &ssa.ArgSymbol{Node: v}
- }
- b.NewValue0IA(src.NoXPos, ssa.OpClobber, types.TypeVoid, offset, aux)
+ b.NewValue0IA(src.NoXPos, ssa.OpClobber, types.TypeVoid, offset, v)
}
func (lv *Liveness) avarinitanyall(b *ssa.Block, any, all bvec) {
@@ -988,7 +953,7 @@ func hashbitmap(h uint32, bv bvec) uint32 {
// is actually a net loss: we save about 50k of argument bitmaps but the new
// PCDATA tables cost about 100k. So for now we keep using a single index for
// both bitmap lists.
-func livenesscompact(lv *Liveness) {
+func (lv *Liveness) compact() {
// Linear probing hash table of bitmaps seen so far.
// The hash table has 4n entries to keep the linear
// scan short. An entry of -1 indicates an empty slot.
@@ -1047,7 +1012,8 @@ Outer:
}
lv.livevars = lv.livevars[:uniq]
- // Rewrite PCDATA instructions to use new numbering.
+ // Record compacted stack map indexes for each value.
+ // These will later become PCDATA instructions.
lv.showlive(nil, lv.livevars[0])
pos := 1
lv.stackMapIndex = make(map[*ssa.Value]int)
@@ -1138,7 +1104,7 @@ func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool) bo
// Prints the computed liveness information and inputs, for debugging.
// This format synthesizes the information used during the multiple passes
// into a single presentation.
-func livenessprintdebug(lv *Liveness) {
+func (lv *Liveness) printDebug() {
fmt.Printf("liveness: %s\n", lv.fn.funcname())
pcdata := 0
@@ -1250,12 +1216,12 @@ func livenessprintdebug(lv *Liveness) {
// first word dumped is the total number of bitmaps. The second word is the
// length of the bitmaps. All bitmaps are assumed to be of equal length. The
// remaining bytes are the raw bitmaps.
-func livenessemit(lv *Liveness, argssym, livesym *obj.LSym) {
- args := bvalloc(argswords(lv))
+func (lv *Liveness) emit(argssym, livesym *obj.LSym) {
+ args := bvalloc(lv.argWords())
aoff := duint32(argssym, 0, uint32(len(lv.livevars))) // number of bitmaps
aoff = duint32(argssym, aoff, uint32(args.n)) // number of bits in each bitmap
- locals := bvalloc(localswords(lv))
+ locals := bvalloc(lv.localWords())
loff := duint32(livesym, 0, uint32(len(lv.livevars))) // number of bitmaps
loff = duint32(livesym, loff, uint32(locals.n)) // number of bits in each bitmap
@@ -1263,7 +1229,7 @@ func livenessemit(lv *Liveness, argssym, livesym *obj.LSym) {
args.Clear()
locals.Clear()
- onebitlivepointermap(lv, live, lv.vars, args, locals)
+ lv.pointerMap(live, lv.vars, args, locals)
aoff = dbvec(argssym, aoff, args)
loff = dbvec(livesym, loff, locals)
@@ -1288,18 +1254,18 @@ func liveness(e *ssafn, f *ssa.Func) map[*ssa.Value]int {
lv := newliveness(e.curfn, f, vars, idx, e.stkptrsize)
// Run the dataflow framework.
- livenessprologue(lv)
- livenesssolve(lv)
- livenessepilogue(lv)
- livenesscompact(lv)
+ lv.prologue()
+ lv.solve()
+ lv.epilogue()
+ lv.compact()
lv.clobber()
if debuglive >= 2 {
- livenessprintdebug(lv)
+ lv.printDebug()
}
// Emit the live pointer map data structures
if ls := e.curfn.Func.lsym; ls != nil {
- livenessemit(lv, &ls.Func.GCArgs, &ls.Func.GCLocals)
+ lv.emit(&ls.Func.GCArgs, &ls.Func.GCLocals)
}
return lv.stackMapIndex
}
diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go
index cfb803187cd..4b92ce9e0ed 100644
--- a/src/cmd/compile/internal/gc/racewalk.go
+++ b/src/cmd/compile/internal/gc/racewalk.go
@@ -70,11 +70,15 @@ func instrument(fn *Node) {
nodpc := *nodfp
nodpc.Type = types.Types[TUINTPTR]
nodpc.Xoffset = int64(-Widthptr)
+ savedLineno := lineno
+ lineno = src.NoXPos
nd := mkcall("racefuncenter", nil, nil, &nodpc)
+
fn.Func.Enter.Prepend(nd)
nd = mkcall("racefuncexit", nil, nil)
fn.Func.Exit.Append(nd)
fn.Func.Dcl = append(fn.Func.Dcl, &nodpc)
+ lineno = savedLineno
}
if Debug['W'] != 0 {
@@ -140,11 +144,9 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) {
case OAS, OAS2FUNC:
instrumentnode(&n.Left, init, 1, 0)
instrumentnode(&n.Right, init, 0, 0)
- goto ret
// can't matter
case OCFUNC, OVARKILL, OVARLIVE:
- goto ret
case OBLOCK:
ls := n.List.Slice()
@@ -162,26 +164,25 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) {
instrumentnode(&ls[i], &ls[i].Ninit, 0, 0)
afterCall = (op == OCALLFUNC || op == OCALLMETH || op == OCALLINTER)
}
- goto ret
case ODEFER:
instrumentnode(&n.Left, init, 0, 0)
- goto ret
case OPROC:
instrumentnode(&n.Left, init, 0, 0)
- goto ret
case OCALLINTER:
instrumentnode(&n.Left, init, 0, 0)
- goto ret
- // Instrument dst argument of runtime.writebarrier* calls
- // as we do not instrument runtime code.
- // typedslicecopy is instrumented in runtime.
case OCALLFUNC:
+ // Note that runtime.typedslicecopy is the only
+ // assignment-like function call in the AST at this
+ // point (between walk and SSA); since we don't
+ // instrument it here, typedslicecopy is manually
+ // instrumented in runtime. Calls to the write barrier
+ // and typedmemmove are created later by SSA, so those
+ // still appear as OAS nodes at this point.
instrumentnode(&n.Left, init, 0, 0)
- goto ret
case ONOT,
OMINUS,
@@ -190,28 +191,23 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) {
OIMAG,
OCOM:
instrumentnode(&n.Left, init, wr, 0)
- goto ret
case ODOTINTER:
instrumentnode(&n.Left, init, 0, 0)
- goto ret
case ODOT:
instrumentnode(&n.Left, init, 0, 1)
callinstr(&n, init, wr, skip)
- goto ret
case ODOTPTR: // dst = (*x).f with implicit *; otherwise it's ODOT+OIND
instrumentnode(&n.Left, init, 0, 0)
callinstr(&n, init, wr, skip)
- goto ret
case OIND: // *p
instrumentnode(&n.Left, init, 0, 0)
callinstr(&n, init, wr, skip)
- goto ret
case OSPTR, OLEN, OCAP:
instrumentnode(&n.Left, init, 0, 0)
@@ -223,8 +219,6 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) {
callinstr(&n1, init, 0, skip)
}
- goto ret
-
case OLSH,
ORSH,
OAND,
@@ -243,7 +237,6 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) {
OCOMPLEX:
instrumentnode(&n.Left, init, wr, 0)
instrumentnode(&n.Right, init, wr, 0)
- goto ret
case OANDAND, OOROR:
instrumentnode(&n.Left, init, wr, 0)
@@ -254,24 +247,18 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) {
// so instrumentation goes to n->right->ninit, not init.
instrumentnode(&n.Right, &n.Right.Ninit, wr, 0)
- goto ret
-
case ONAME:
callinstr(&n, init, wr, skip)
- goto ret
case OCONV:
instrumentnode(&n.Left, init, wr, 0)
- goto ret
case OCONVNOP:
instrumentnode(&n.Left, init, wr, 0)
- goto ret
case ODIV, OMOD:
instrumentnode(&n.Left, init, wr, 0)
instrumentnode(&n.Right, init, wr, 0)
- goto ret
case OINDEX:
if !n.Left.Type.IsArray() {
@@ -281,14 +268,13 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) {
instrumentnode(&n.Left, init, wr, 0)
instrumentnode(&n.Right, init, 0, 0)
- goto ret
+ break
}
instrumentnode(&n.Right, init, 0, 0)
if !n.Left.Type.IsString() {
callinstr(&n, init, wr, skip)
}
- goto ret
case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR:
instrumentnode(&n.Left, init, 0, 0)
@@ -297,34 +283,26 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) {
instrumentnode(&high, init, 0, 0)
instrumentnode(&max, init, 0, 0)
n.SetSliceBounds(low, high, max)
- goto ret
case OADDR:
instrumentnode(&n.Left, init, 0, 1)
- goto ret
// n->left is Type* which is not interesting.
case OEFACE:
instrumentnode(&n.Right, init, 0, 0)
- goto ret
-
case OITAB, OIDATA:
instrumentnode(&n.Left, init, 0, 0)
- goto ret
case OSTRARRAYBYTETMP:
instrumentnode(&n.Left, init, 0, 0)
- goto ret
case OAS2DOTTYPE:
instrumentnode(&n.Left, init, 1, 0)
instrumentnode(&n.Right, init, 0, 0)
- goto ret
case ODOTTYPE, ODOTTYPE2:
instrumentnode(&n.Left, init, 0, 0)
- goto ret
// should not appear in AST by now
case OSEND,
@@ -332,7 +310,6 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) {
OCLOSE,
ONEW,
OXCASE,
- OXFALL,
OCASE,
OPANIC,
ORECOVER,
@@ -377,13 +354,11 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) {
if n.Right != nil {
instrumentnode(&n.Right, &n.Right.Ninit, 0, 0)
}
- goto ret
case OIF, OSWITCH:
if n.Left != nil {
instrumentnode(&n.Left, &n.Left.Ninit, 0, 0)
}
- goto ret
// just do generic traversal
case OCALLMETH,
@@ -396,7 +371,6 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) {
OFALL,
OGOTO,
OLABEL:
- goto ret
// does not require instrumentation
case OPRINT, // don't bother instrumenting it
@@ -412,10 +386,8 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) {
ONONAME,
OLITERAL,
OTYPESW: // ignored by code generation, do not instrument.
- goto ret
}
-ret:
if n.Op != OBLOCK { // OBLOCK is handled above in a special way.
instrumentlist(n.List, init)
}
@@ -462,6 +434,15 @@ func callinstr(np **Node, init *Nodes, wr int, skip int) bool {
return false
}
t := n.Type
+ // dowidth may not have been called for PEXTERN.
+ dowidth(t)
+ w := t.Width
+ if w == BADWIDTH {
+ Fatalf("instrument: %v badwidth", t)
+ }
+ if w == 0 {
+ return false // can't race on zero-sized things
+ }
if isartificial(n) {
return false
}
@@ -479,9 +460,15 @@ func callinstr(np **Node, init *Nodes, wr int, skip int) bool {
// that has got a pointer inside. Whether it points to
// the heap or not is impossible to know at compile time
if class == PAUTOHEAP || class == PEXTERN || b.Op == OINDEX || b.Op == ODOTPTR || b.Op == OIND {
- hascalls := 0
- foreach(n, hascallspred, &hascalls)
- if hascalls != 0 {
+ hasCalls := false
+ inspect(n, func(n *Node) bool {
+ switch n.Op {
+ case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER:
+ hasCalls = true
+ }
+ return !hasCalls
+ })
+ if hasCalls {
n = detachexpr(n, init)
*np = n
}
@@ -494,26 +481,19 @@ func callinstr(np **Node, init *Nodes, wr int, skip int) bool {
if wr != 0 {
name = "msanwrite"
}
- // dowidth may not have been called for PEXTERN.
- dowidth(t)
- w := t.Width
- if w == BADWIDTH {
- Fatalf("instrument: %v badwidth", t)
- }
f = mkcall(name, nil, init, uintptraddr(n), nodintconst(w))
- } else if flag_race && (t.IsStruct() || t.IsArray()) {
+ } else if flag_race && t.NumComponents() > 1 {
+ // for composite objects we have to write every address
+ // because a write might happen to any subobject.
+ // composites with only one element don't have subobjects, though.
name := "racereadrange"
if wr != 0 {
name = "racewriterange"
}
- // dowidth may not have been called for PEXTERN.
- dowidth(t)
- w := t.Width
- if w == BADWIDTH {
- Fatalf("instrument: %v badwidth", t)
- }
f = mkcall(name, nil, init, uintptraddr(n), nodintconst(w))
} else if flag_race {
+ // for non-composite objects we can write just the start
+ // address, as any write must write the first byte.
name := "raceread"
if wr != 0 {
name = "racewrite"
@@ -552,10 +532,6 @@ func makeaddable(n *Node) {
makeaddable(n.Left)
// nothing to do
- case ODOTPTR:
- fallthrough
- default:
- break
}
}
@@ -580,34 +556,6 @@ func detachexpr(n *Node, init *Nodes) *Node {
return ind
}
-func foreachnode(n *Node, f func(*Node, interface{}), c interface{}) {
- if n != nil {
- f(n, c)
- }
-}
-
-func foreachlist(l Nodes, f func(*Node, interface{}), c interface{}) {
- for _, n := range l.Slice() {
- foreachnode(n, f, c)
- }
-}
-
-func foreach(n *Node, f func(*Node, interface{}), c interface{}) {
- foreachlist(n.Ninit, f, c)
- foreachnode(n.Left, f, c)
- foreachnode(n.Right, f, c)
- foreachlist(n.List, f, c)
- foreachlist(n.Nbody, f, c)
- foreachlist(n.Rlist, f, c)
-}
-
-func hascallspred(n *Node, c interface{}) {
- switch n.Op {
- case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER:
- (*c.(*int))++
- }
-}
-
// appendinit is like addinit in subr.go
// but appends rather than prepends.
func appendinit(np **Node, init Nodes) {
diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go
index 032601ca3df..db852e83a2b 100644
--- a/src/cmd/compile/internal/gc/range.go
+++ b/src/cmd/compile/internal/gc/range.go
@@ -7,19 +7,12 @@ package gc
import (
"cmd/compile/internal/types"
"cmd/internal/objabi"
+ "cmd/internal/sys"
"unicode/utf8"
)
// range
func typecheckrange(n *Node) {
- var toomany int
- var why string
- var t1 *types.Type
- var t2 *types.Type
- var v1 *Node
- var v2 *Node
- var ls []*Node
-
// Typechecking order is important here:
// 0. first typecheck range expression (slice/map/chan),
// it is evaluated only once and so logically it is not part of the loop.
@@ -29,15 +22,31 @@ func typecheckrange(n *Node) {
// 2. decldepth++ to denote loop body.
// 3. typecheck body.
// 4. decldepth--.
+ typecheckrangeExpr(n)
+ // second half of dance, the first half being typecheckrangeExpr
+ n.SetTypecheck(1)
+ ls := n.List.Slice()
+ for i1, n1 := range ls {
+ if n1.Typecheck() == 0 {
+ ls[i1] = typecheck(ls[i1], Erv|Easgn)
+ }
+ }
+
+ decldepth++
+ typecheckslice(n.Nbody.Slice(), Etop)
+ decldepth--
+}
+
+func typecheckrangeExpr(n *Node) {
n.Right = typecheck(n.Right, Erv)
t := n.Right.Type
if t == nil {
- goto out
+ return
}
// delicate little dance. see typecheckas2
- ls = n.List.Slice()
+ ls := n.List.Slice()
for i1, n1 := range ls {
if n1.Name == nil || n1.Name.Defn != n {
ls[i1] = typecheck(ls[i1], Erv|Easgn)
@@ -49,11 +58,12 @@ func typecheckrange(n *Node) {
}
n.Type = t
- toomany = 0
+ var t1, t2 *types.Type
+ toomany := false
switch t.Etype {
default:
- yyerror("cannot range over %L", n.Right)
- goto out
+ yyerrorl(n.Pos, "cannot range over %L", n.Right)
+ return
case TARRAY, TSLICE:
t1 = types.Types[TINT]
@@ -65,14 +75,14 @@ func typecheckrange(n *Node) {
case TCHAN:
if !t.ChanDir().CanRecv() {
- yyerror("invalid operation: range %v (receive from send-only type %v)", n.Right, n.Right.Type)
- goto out
+ yyerrorl(n.Pos, "invalid operation: range %v (receive from send-only type %v)", n.Right, n.Right.Type)
+ return
}
t1 = t.Elem()
t2 = nil
if n.List.Len() == 2 {
- toomany = 1
+ toomany = true
}
case TSTRING:
@@ -80,15 +90,14 @@ func typecheckrange(n *Node) {
t2 = types.Runetype
}
- if n.List.Len() > 2 || toomany != 0 {
- yyerror("too many variables in range")
+ if n.List.Len() > 2 || toomany {
+ yyerrorl(n.Pos, "too many variables in range")
}
- v1 = nil
+ var v1, v2 *Node
if n.List.Len() != 0 {
v1 = n.List.First()
}
- v2 = nil
if n.List.Len() > 1 {
v2 = n.List.Second()
}
@@ -104,11 +113,12 @@ func typecheckrange(n *Node) {
v2 = nil
}
+ var why string
if v1 != nil {
if v1.Name != nil && v1.Name.Defn == n {
v1.Type = t1
} else if v1.Type != nil && assignop(t1, v1.Type, &why) == 0 {
- yyerror("cannot assign type %v to %L in range%s", t1, v1, why)
+ yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t1, v1, why)
}
checkassign(n, v1)
}
@@ -117,24 +127,26 @@ func typecheckrange(n *Node) {
if v2.Name != nil && v2.Name.Defn == n {
v2.Type = t2
} else if v2.Type != nil && assignop(t2, v2.Type, &why) == 0 {
- yyerror("cannot assign type %v to %L in range%s", t2, v2, why)
+ yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t2, v2, why)
}
checkassign(n, v2)
}
+}
- // second half of dance
-out:
- n.SetTypecheck(1)
- ls = n.List.Slice()
- for i1, n1 := range ls {
- if n1.Typecheck() == 0 {
- ls[i1] = typecheck(ls[i1], Erv|Easgn)
+func cheapComputableIndex(width int64) bool {
+ switch thearch.LinkArch.Family {
+ // MIPS does not have R+R addressing
+ // Arm64 may lack ability to generate this code in our assembler,
+ // but the architecture supports it.
+ case sys.PPC64, sys.S390X:
+ return width == 1
+ case sys.AMD64, sys.I386, sys.ARM64, sys.ARM:
+ switch width {
+ case 1, 2, 4, 8:
+ return true
}
}
-
- decldepth++
- typecheckslice(n.Nbody.Slice(), Etop)
- decldepth--
+ return false
}
// walkrange transforms various forms of ORANGE into
@@ -155,27 +167,36 @@ func walkrange(n *Node) *Node {
lno := setlineno(a)
n.Right = nil
- var v1 *Node
- if n.List.Len() != 0 {
+ var v1, v2 *Node
+ l := n.List.Len()
+ if l > 0 {
v1 = n.List.First()
}
- var v2 *Node
- if n.List.Len() > 1 && !isblank(n.List.Second()) {
+
+ if l > 1 {
v2 = n.List.Second()
}
+ if isblank(v2) {
+ v2 = nil
+ }
+
+ if isblank(v1) && v2 == nil {
+ v1 = nil
+ }
+
if v1 == nil && v2 != nil {
Fatalf("walkrange: v2 != nil while v1 == nil")
}
- var ifGuard *Node
-
- translatedLoopOp := OFOR
-
// n.List has no meaning anymore, clear it
// to avoid erroneous processing by racewalk.
n.List.Set(nil)
+ var ifGuard *Node
+
+ translatedLoopOp := OFOR
+
var body []*Node
var init []*Node
switch t.Etype {
@@ -193,65 +214,83 @@ func walkrange(n *Node) *Node {
hv1 := temp(types.Types[TINT])
hn := temp(types.Types[TINT])
- var hp *Node
init = append(init, nod(OAS, hv1, nil))
init = append(init, nod(OAS, hn, nod(OLEN, ha, nil)))
- if v2 != nil {
- hp = temp(types.NewPtr(n.Type.Elem()))
- tmp := nod(OINDEX, ha, nodintconst(0))
- tmp.SetBounded(true)
- init = append(init, nod(OAS, hp, nod(OADDR, tmp, nil)))
- }
-
n.Left = nod(OLT, hv1, hn)
n.Right = nod(OAS, hv1, nod(OADD, hv1, nodintconst(1)))
- if v1 == nil {
- body = nil
- } else if v2 == nil {
- body = []*Node{nod(OAS, v1, hv1)}
- } else { // for i,a := range thing { body }
- if objabi.Preemptibleloops_enabled != 0 {
- // Doing this transformation makes a bounds check removal less trivial; see #20711
- // TODO enhance the preemption check insertion so that this transformation is not necessary.
- ifGuard = nod(OIF, nil, nil)
- ifGuard.Left = nod(OLT, hv1, hn)
- translatedLoopOp = OFORUNTIL
- }
+ // for range ha { body }
+ if v1 == nil {
+ break
+ }
+
+ // for v1 := range ha { body }
+ if v2 == nil {
+ body = []*Node{nod(OAS, v1, hv1)}
+ break
+ }
+
+ // for v1, v2 := range ha { body }
+ if cheapComputableIndex(n.Type.Elem().Width) {
+ // v1, v2 = hv1, ha[hv1]
+ tmp := nod(OINDEX, ha, hv1)
+ tmp.SetBounded(true)
+ // Use OAS2 to correctly handle assignments
+ // of the form "v1, a[v1] := range".
a := nod(OAS2, nil, nil)
a.List.Set2(v1, v2)
- a.Rlist.Set2(hv1, nod(OIND, hp, nil))
+ a.Rlist.Set2(hv1, tmp)
body = []*Node{a}
-
- // Advance pointer as part of increment.
- // We used to advance the pointer before executing the loop body,
- // but doing so would make the pointer point past the end of the
- // array during the final iteration, possibly causing another unrelated
- // piece of memory not to be garbage collected until the loop finished.
- // Advancing during the increment ensures that the pointer p only points
- // pass the end of the array during the final "p++; i++; if(i >= len(x)) break;",
- // after which p is dead, so it cannot confuse the collector.
- tmp := nod(OADD, hp, nodintconst(t.Elem().Width))
-
- tmp.Type = hp.Type
- tmp.SetTypecheck(1)
- tmp.Right.Type = types.Types[types.Tptr]
- tmp.Right.SetTypecheck(1)
- a = nod(OAS, hp, tmp)
- a = typecheck(a, Etop)
- n.Right.Ninit.Set1(a)
+ break
}
+ if objabi.Preemptibleloops_enabled != 0 {
+ // Doing this transformation makes a bounds check removal less trivial; see #20711
+ // TODO enhance the preemption check insertion so that this transformation is not necessary.
+ ifGuard = nod(OIF, nil, nil)
+ ifGuard.Left = nod(OLT, hv1, hn)
+ translatedLoopOp = OFORUNTIL
+ }
+
+ hp := temp(types.NewPtr(n.Type.Elem()))
+ tmp := nod(OINDEX, ha, nodintconst(0))
+ tmp.SetBounded(true)
+ init = append(init, nod(OAS, hp, nod(OADDR, tmp, nil)))
+
+ // Use OAS2 to correctly handle assignments
+ // of the form "v1, a[v1] := range".
+ a := nod(OAS2, nil, nil)
+ a.List.Set2(v1, v2)
+ a.Rlist.Set2(hv1, nod(OIND, hp, nil))
+ body = append(body, a)
+
+ // Advance pointer as part of increment.
+ // We used to advance the pointer before executing the loop body,
+ // but doing so would make the pointer point past the end of the
+ // array during the final iteration, possibly causing another unrelated
+ // piece of memory not to be garbage collected until the loop finished.
+ // Advancing during the increment ensures that the pointer p only points
+ // pass the end of the array during the final "p++; i++; if(i >= len(x)) break;",
+ // after which p is dead, so it cannot confuse the collector.
+ tmp = nod(OADD, hp, nodintconst(t.Elem().Width))
+
+ tmp.Type = hp.Type
+ tmp.SetTypecheck(1)
+ tmp.Right.Type = types.Types[types.Tptr]
+ tmp.Right.SetTypecheck(1)
+ a = nod(OAS, hp, tmp)
+ a = typecheck(a, Etop)
+ n.Right.Ninit.Set1(a)
+
case TMAP:
// orderstmt allocated the iterator for us.
// we only use a once, so no copy needed.
ha := a
- th := hiter(t)
hit := prealloc[n]
- hit.Type = th
+ th := hit.Type
n.Left = nil
keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter
valsym := th.Field(1).Sym // ditto
diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go
index 47ac5418228..bbb263ee8d3 100644
--- a/src/cmd/compile/internal/gc/reflect.go
+++ b/src/cmd/compile/internal/gc/reflect.go
@@ -81,6 +81,7 @@ const (
func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{})
func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{})
+
func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{})
if t.Sym == nil && len(methods(t)) == 0 {
return 0
@@ -95,7 +96,8 @@ func makefield(name string, t *types.Type) *types.Field {
return f
}
-func mapbucket(t *types.Type) *types.Type {
+// bmap makes the map bucket type given the type of the map.
+func bmap(t *types.Type) *types.Type {
if t.MapType().Bucket != nil {
return t.MapType().Bucket
}
@@ -120,11 +122,13 @@ func mapbucket(t *types.Type) *types.Type {
arr = types.NewArray(keytype, BUCKETSIZE)
arr.SetNoalg(true)
- field = append(field, makefield("keys", arr))
+ keys := makefield("keys", arr)
+ field = append(field, keys)
arr = types.NewArray(valtype, BUCKETSIZE)
arr.SetNoalg(true)
- field = append(field, makefield("values", arr))
+ values := makefield("values", arr)
+ field = append(field, values)
// Make sure the overflow pointer is the last memory in the struct,
// because the runtime assumes it can use size-ptrSize as the
@@ -143,7 +147,7 @@ func mapbucket(t *types.Type) *types.Type {
// so if the struct needs 64-bit padding (because a key or value does)
// then it would end with an extra 32-bit padding field.
// Preempt that by emitting the padding here.
- if int(t.Val().Align) > Widthptr || int(t.Key().Align) > Widthptr {
+ if int(valtype.Align) > Widthptr || int(keytype.Align) > Widthptr {
field = append(field, makefield("pad", types.Types[TUINTPTR]))
}
@@ -154,22 +158,65 @@ func mapbucket(t *types.Type) *types.Type {
// the type of the overflow field to uintptr in this case.
// See comment on hmap.overflow in ../../../../runtime/hashmap.go.
otyp := types.NewPtr(bucket)
- if !types.Haspointers(t.Val()) && !types.Haspointers(t.Key()) && t.Val().Width <= MAXVALSIZE && t.Key().Width <= MAXKEYSIZE {
+ if !types.Haspointers(valtype) && !types.Haspointers(keytype) {
otyp = types.Types[TUINTPTR]
}
- ovf := makefield("overflow", otyp)
- field = append(field, ovf)
+ overflow := makefield("overflow", otyp)
+ field = append(field, overflow)
// link up fields
bucket.SetNoalg(true)
- bucket.SetLocal(t.Local())
bucket.SetFields(field[:])
dowidth(bucket)
+ // Check invariants that map code depends on.
+ if !IsComparable(t.Key()) {
+ Fatalf("unsupported map key type for %v", t)
+ }
+ if BUCKETSIZE < 8 {
+ Fatalf("bucket size too small for proper alignment")
+ }
+ if keytype.Align > BUCKETSIZE {
+ Fatalf("key align too big for %v", t)
+ }
+ if valtype.Align > BUCKETSIZE {
+ Fatalf("value align too big for %v", t)
+ }
+ if keytype.Width > MAXKEYSIZE {
+ Fatalf("key size to large for %v", t)
+ }
+ if valtype.Width > MAXVALSIZE {
+ Fatalf("value size to large for %v", t)
+ }
+ if t.Key().Width > MAXKEYSIZE && !keytype.IsPtr() {
+ Fatalf("key indirect incorrect for %v", t)
+ }
+ if t.Val().Width > MAXVALSIZE && !valtype.IsPtr() {
+ Fatalf("value indirect incorrect for %v", t)
+ }
+ if keytype.Width%int64(keytype.Align) != 0 {
+ Fatalf("key size not a multiple of key align for %v", t)
+ }
+ if valtype.Width%int64(valtype.Align) != 0 {
+ Fatalf("value size not a multiple of value align for %v", t)
+ }
+ if bucket.Align%keytype.Align != 0 {
+ Fatalf("bucket align not multiple of key align %v", t)
+ }
+ if bucket.Align%valtype.Align != 0 {
+ Fatalf("bucket align not multiple of value align %v", t)
+ }
+ if keys.Offset%int64(keytype.Align) != 0 {
+ Fatalf("bad alignment of keys in bmap for %v", t)
+ }
+ if values.Offset%int64(valtype.Align) != 0 {
+ Fatalf("bad alignment of values in bmap for %v", t)
+ }
+
// Double-check that overflow field is final memory in struct,
// with no padding at end. See comment above.
- if ovf.Offset != bucket.Width-int64(Widthptr) {
- Fatalf("bad math in mapbucket for %v", t)
+ if overflow.Offset != bucket.Width-int64(Widthptr) {
+ Fatalf("bad offset of overflow in bmap for %v", t)
}
t.MapType().Bucket = bucket
@@ -178,82 +225,114 @@ func mapbucket(t *types.Type) *types.Type {
return bucket
}
-// Builds a type representing a Hmap structure for the given map type.
-// Make sure this stays in sync with ../../../../runtime/hashmap.go!
+// hmap builds a type representing a Hmap structure for the given map type.
+// Make sure this stays in sync with ../../../../runtime/hashmap.go.
func hmap(t *types.Type) *types.Type {
if t.MapType().Hmap != nil {
return t.MapType().Hmap
}
- bucket := mapbucket(t)
+ bmap := bmap(t)
+
+ // build a struct:
+ // type hmap struct {
+ // count int
+ // flags uint8
+ // B uint8
+ // noverflow uint16
+ // hash0 uint32
+ // buckets *bmap
+ // oldbuckets *bmap
+ // nevacuate uintptr
+ // extra unsafe.Pointer // *mapextra
+ // }
+ // must match ../../../../runtime/hashmap.go:hmap.
fields := []*types.Field{
makefield("count", types.Types[TINT]),
makefield("flags", types.Types[TUINT8]),
makefield("B", types.Types[TUINT8]),
makefield("noverflow", types.Types[TUINT16]),
- makefield("hash0", types.Types[TUINT32]),
- makefield("buckets", types.NewPtr(bucket)),
- makefield("oldbuckets", types.NewPtr(bucket)),
+ makefield("hash0", types.Types[TUINT32]), // Used in walk.go for OMAKEMAP.
+ makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for OMAKEMAP.
+ makefield("oldbuckets", types.NewPtr(bmap)),
makefield("nevacuate", types.Types[TUINTPTR]),
- makefield("overflow", types.Types[TUNSAFEPTR]),
+ makefield("extra", types.Types[TUNSAFEPTR]),
}
- h := types.New(TSTRUCT)
- h.SetNoalg(true)
- h.SetLocal(t.Local())
- h.SetFields(fields)
- dowidth(h)
- t.MapType().Hmap = h
- h.StructType().Map = t
- return h
+ hmap := types.New(TSTRUCT)
+ hmap.SetNoalg(true)
+ hmap.SetFields(fields)
+ dowidth(hmap)
+
+ // The size of hmap should be 48 bytes on 64 bit
+ // and 28 bytes on 32 bit platforms.
+ if size := int64(8 + 5*Widthptr); hmap.Width != size {
+ Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size)
+ }
+
+ t.MapType().Hmap = hmap
+ hmap.StructType().Map = t
+ return hmap
}
+// hiter builds a type representing an Hiter structure for the given map type.
+// Make sure this stays in sync with ../../../../runtime/hashmap.go.
func hiter(t *types.Type) *types.Type {
if t.MapType().Hiter != nil {
return t.MapType().Hiter
}
+ hmap := hmap(t)
+ bmap := bmap(t)
+
// build a struct:
- // hiter {
- // key *Key
- // val *Value
- // t *MapType
- // h *Hmap
- // buckets *Bucket
- // bptr *Bucket
- // overflow0 unsafe.Pointer
- // overflow1 unsafe.Pointer
+ // type hiter struct {
+ // key *Key
+ // val *Value
+ // t unsafe.Pointer // *MapType
+ // h *hmap
+ // buckets *bmap
+ // bptr *bmap
+ // overflow unsafe.Pointer // *[]*bmap
+ // oldoverflow unsafe.Pointer // *[]*bmap
// startBucket uintptr
- // stuff uintptr
- // bucket uintptr
+ // offset uint8
+ // wrapped bool
+ // B uint8
+ // i uint8
+ // bucket uintptr
// checkBucket uintptr
// }
// must match ../../../../runtime/hashmap.go:hiter.
- var field [12]*types.Field
- field[0] = makefield("key", types.NewPtr(t.Key()))
- field[1] = makefield("val", types.NewPtr(t.Val()))
- field[2] = makefield("t", types.NewPtr(types.Types[TUINT8]))
- field[3] = makefield("h", types.NewPtr(hmap(t)))
- field[4] = makefield("buckets", types.NewPtr(mapbucket(t)))
- field[5] = makefield("bptr", types.NewPtr(mapbucket(t)))
- field[6] = makefield("overflow0", types.Types[TUNSAFEPTR])
- field[7] = makefield("overflow1", types.Types[TUNSAFEPTR])
- field[8] = makefield("startBucket", types.Types[TUINTPTR])
- field[9] = makefield("stuff", types.Types[TUINTPTR]) // offset+wrapped+B+I
- field[10] = makefield("bucket", types.Types[TUINTPTR])
- field[11] = makefield("checkBucket", types.Types[TUINTPTR])
+ fields := []*types.Field{
+ makefield("key", types.NewPtr(t.Key())), // Used in range.go for TMAP.
+ makefield("val", types.NewPtr(t.Val())), // Used in range.go for TMAP.
+ makefield("t", types.Types[TUNSAFEPTR]),
+ makefield("h", types.NewPtr(hmap)),
+ makefield("buckets", types.NewPtr(bmap)),
+ makefield("bptr", types.NewPtr(bmap)),
+ makefield("overflow", types.Types[TUNSAFEPTR]),
+ makefield("oldoverflow", types.Types[TUNSAFEPTR]),
+ makefield("startBucket", types.Types[TUINTPTR]),
+ makefield("offset", types.Types[TUINT8]),
+ makefield("wrapped", types.Types[TBOOL]),
+ makefield("B", types.Types[TUINT8]),
+ makefield("i", types.Types[TUINT8]),
+ makefield("bucket", types.Types[TUINTPTR]),
+ makefield("checkBucket", types.Types[TUINTPTR]),
+ }
// build iterator struct holding the above fields
- i := types.New(TSTRUCT)
- i.SetNoalg(true)
- i.SetFields(field[:])
- dowidth(i)
- if i.Width != int64(12*Widthptr) {
- Fatalf("hash_iter size not correct %d %d", i.Width, 12*Widthptr)
+ hiter := types.New(TSTRUCT)
+ hiter.SetNoalg(true)
+ hiter.SetFields(fields)
+ dowidth(hiter)
+ if hiter.Width != int64(12*Widthptr) {
+ Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr)
}
- t.MapType().Hiter = i
- i.StructType().Map = t
- return i
+ t.MapType().Hiter = hiter
+ hiter.StructType().Map = t
+ return hiter
}
// f is method type, with receiver.
@@ -359,18 +438,18 @@ func methods(t *types.Type) []*Sig {
if !sig.isym.Siggen() {
sig.isym.SetSiggen(true)
if !eqtype(this, it) || this.Width < int64(Widthptr) {
- compiling_wrappers = 1
- genwrapper(it, f, sig.isym, 1)
- compiling_wrappers = 0
+ compiling_wrappers = true
+ genwrapper(it, f, sig.isym, true)
+ compiling_wrappers = false
}
}
if !sig.tsym.Siggen() {
sig.tsym.SetSiggen(true)
if !eqtype(this, t) {
- compiling_wrappers = 1
- genwrapper(t, f, sig.tsym, 0)
- compiling_wrappers = 0
+ compiling_wrappers = true
+ genwrapper(t, f, sig.tsym, false)
+ compiling_wrappers = false
}
}
}
@@ -421,7 +500,7 @@ func imethods(t *types.Type) []*Sig {
isym := methodsym(method, t, false)
if !isym.Siggen() {
isym.SetSiggen(true)
- genwrapper(t, f, isym, 0)
+ genwrapper(t, f, isym, false)
}
}
@@ -492,32 +571,12 @@ func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int {
return dsymptrOff(s, ot, pkg.Pathsym, 0)
}
-// isExportedField reports whether a struct field is exported.
-// It also returns the package to use for PkgPath for an unexported field.
-func isExportedField(ft *types.Field) (bool, *types.Pkg) {
- if ft.Sym != nil && ft.Embedded == 0 {
- return exportname(ft.Sym.Name), ft.Sym.Pkg
- } else {
- if ft.Type.Sym != nil &&
- (ft.Type.Sym.Pkg == builtinpkg || !exportname(ft.Type.Sym.Name)) {
- return false, ft.Type.Sym.Pkg
- } else {
- return true, nil
- }
- }
-}
-
// dnameField dumps a reflect.name for a struct field.
func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int {
- var name string
- if ft.Sym != nil {
- name = ft.Sym.Name
+ if !exportname(ft.Sym.Name) && ft.Sym.Pkg != spkg {
+ Fatalf("package mismatch for %v", ft.Sym)
}
- isExported, fpkg := isExportedField(ft)
- if isExported || fpkg == spkg {
- fpkg = nil
- }
- nsym := dname(name, ft.Note, fpkg, isExported)
+ nsym := dname(ft.Sym.Name, ft.Note, nil, exportname(ft.Sym.Name))
return dsymptr(lsym, ot, nsym, 0)
}
@@ -665,7 +724,7 @@ func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int {
nsym := dname(a.name, "", pkg, exported)
ot = dsymptrOff(lsym, ot, nsym, 0)
- ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype).Linksym())
+ ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype))
ot = dmethodptrOff(lsym, ot, a.isym.Linksym())
ot = dmethodptrOff(lsym, ot, a.tsym.Linksym())
}
@@ -788,7 +847,7 @@ func dcommontype(lsym *obj.LSym, ot int, t *types.Type) int {
sizeofAlg := 2 * Widthptr
if algarray == nil {
- algarray = Sysfunc("algarray")
+ algarray = sysfunc("algarray")
}
dowidth(t)
alg := algtype(t)
@@ -804,7 +863,7 @@ func dcommontype(lsym *obj.LSym, ot int, t *types.Type) int {
if t.Sym != nil || methods(tptr) != nil {
sptrWeak = false
}
- sptr = dtypesym(tptr).Linksym()
+ sptr = dtypesym(tptr)
}
gcsym, useGCProg, ptrdata := dgcsym(t)
@@ -901,10 +960,17 @@ func dcommontype(lsym *obj.LSym, ot int, t *types.Type) int {
return ot
}
+// typeHasNoAlg returns whether t does not have any associated hash/eq
+// algorithms because t, or some component of t, is marked Noalg.
+func typeHasNoAlg(t *types.Type) bool {
+ a, bad := algtype1(t)
+ return a == ANOEQ && bad.Noalg()
+}
+
func typesymname(t *types.Type) string {
name := t.ShortString()
// Use a separate symbol name for Noalg types for #17752.
- if a, bad := algtype1(t); a == ANOEQ && bad.Noalg() {
+ if typeHasNoAlg(t) {
name = "noalg." + name
}
return name
@@ -1079,15 +1145,16 @@ func formalType(t *types.Type) *types.Type {
return t
}
-func dtypesym(t *types.Type) *types.Sym {
+func dtypesym(t *types.Type) *obj.LSym {
t = formalType(t)
if t.IsUntyped() {
Fatalf("dtypesym %v", t)
}
s := typesym(t)
+ lsym := s.Linksym()
if s.Siggen() {
- return s
+ return lsym
}
s.SetSiggen(true)
@@ -1104,21 +1171,18 @@ func dtypesym(t *types.Type) *types.Sym {
dupok = obj.DUPOK
}
- if myimportpath == "runtime" && (tbase == types.Types[tbase.Etype] || tbase == types.Bytetype || tbase == types.Runetype || tbase == types.Errortype) { // int, float, etc
- goto ok
+ if myimportpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc
+ // named types from other files are defined only by those files
+ if tbase.Sym != nil && tbase.Sym.Pkg != localpkg {
+ return lsym
+ }
+ // TODO(mdempsky): Investigate whether this can happen.
+ if isforw[tbase.Etype] {
+ return lsym
+ }
}
- // named types from other files are defined only by those files
- if tbase.Sym != nil && !tbase.Local() {
- return s
- }
- if isforw[tbase.Etype] {
- return s
- }
-
-ok:
ot := 0
- lsym := s.Linksym()
switch t.Etype {
default:
ot = dcommontype(lsym, ot, t)
@@ -1130,8 +1194,8 @@ ok:
t2 := types.NewSlice(t.Elem())
s2 := dtypesym(t2)
ot = dcommontype(lsym, ot, t)
- ot = dsymptr(lsym, ot, s1.Linksym(), 0)
- ot = dsymptr(lsym, ot, s2.Linksym(), 0)
+ ot = dsymptr(lsym, ot, s1, 0)
+ ot = dsymptr(lsym, ot, s2, 0)
ot = duintptr(lsym, ot, uint64(t.NumElem()))
ot = dextratype(lsym, ot, t, 0)
@@ -1139,14 +1203,14 @@ ok:
// ../../../../runtime/type.go:/sliceType
s1 := dtypesym(t.Elem())
ot = dcommontype(lsym, ot, t)
- ot = dsymptr(lsym, ot, s1.Linksym(), 0)
+ ot = dsymptr(lsym, ot, s1, 0)
ot = dextratype(lsym, ot, t, 0)
case TCHAN:
// ../../../../runtime/type.go:/chanType
s1 := dtypesym(t.Elem())
ot = dcommontype(lsym, ot, t)
- ot = dsymptr(lsym, ot, s1.Linksym(), 0)
+ ot = dsymptr(lsym, ot, s1, 0)
ot = duintptr(lsym, ot, uint64(t.ChanDir()))
ot = dextratype(lsym, ot, t, 0)
@@ -1164,8 +1228,8 @@ ok:
}
ot = dcommontype(lsym, ot, t)
- inCount := t.Recvs().NumFields() + t.Params().NumFields()
- outCount := t.Results().NumFields()
+ inCount := t.NumRecvs() + t.NumParams()
+ outCount := t.NumResults()
if isddd {
outCount |= 1 << 15
}
@@ -1175,18 +1239,18 @@ ok:
ot += 4 // align for *rtype
}
- dataAdd := (inCount + t.Results().NumFields()) * Widthptr
+ dataAdd := (inCount + t.NumResults()) * Widthptr
ot = dextratype(lsym, ot, t, dataAdd)
// Array of rtype pointers follows funcType.
for _, t1 := range t.Recvs().Fields().Slice() {
- ot = dsymptr(lsym, ot, dtypesym(t1.Type).Linksym(), 0)
+ ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
}
for _, t1 := range t.Params().Fields().Slice() {
- ot = dsymptr(lsym, ot, dtypesym(t1.Type).Linksym(), 0)
+ ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
}
for _, t1 := range t.Results().Fields().Slice() {
- ot = dsymptr(lsym, ot, dtypesym(t1.Type).Linksym(), 0)
+ ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
}
case TINTER:
@@ -1221,20 +1285,20 @@ ok:
nsym := dname(a.name, "", pkg, exported)
ot = dsymptrOff(lsym, ot, nsym, 0)
- ot = dsymptrOff(lsym, ot, dtypesym(a.type_).Linksym(), 0)
+ ot = dsymptrOff(lsym, ot, dtypesym(a.type_), 0)
}
// ../../../../runtime/type.go:/mapType
case TMAP:
s1 := dtypesym(t.Key())
s2 := dtypesym(t.Val())
- s3 := dtypesym(mapbucket(t))
+ s3 := dtypesym(bmap(t))
s4 := dtypesym(hmap(t))
ot = dcommontype(lsym, ot, t)
- ot = dsymptr(lsym, ot, s1.Linksym(), 0)
- ot = dsymptr(lsym, ot, s2.Linksym(), 0)
- ot = dsymptr(lsym, ot, s3.Linksym(), 0)
- ot = dsymptr(lsym, ot, s4.Linksym(), 0)
+ ot = dsymptr(lsym, ot, s1, 0)
+ ot = dsymptr(lsym, ot, s2, 0)
+ ot = dsymptr(lsym, ot, s3, 0)
+ ot = dsymptr(lsym, ot, s4, 0)
if t.Key().Width > MAXKEYSIZE {
ot = duint8(lsym, ot, uint8(Widthptr))
ot = duint8(lsym, ot, 1) // indirect
@@ -1251,7 +1315,7 @@ ok:
ot = duint8(lsym, ot, 0) // not indirect
}
- ot = duint16(lsym, ot, uint16(mapbucket(t).Width))
+ ot = duint16(lsym, ot, uint16(bmap(t).Width))
ot = duint8(lsym, ot, uint8(obj.Bool2int(isreflexive(t.Key()))))
ot = duint8(lsym, ot, uint8(obj.Bool2int(needkeyupdate(t.Key()))))
ot = dextratype(lsym, ot, t, 0)
@@ -1269,12 +1333,13 @@ ok:
s1 := dtypesym(t.Elem())
ot = dcommontype(lsym, ot, t)
- ot = dsymptr(lsym, ot, s1.Linksym(), 0)
+ ot = dsymptr(lsym, ot, s1, 0)
ot = dextratype(lsym, ot, t, 0)
// ../../../../runtime/type.go:/structType
// for security, only the exported fields.
case TSTRUCT:
+ fields := t.Fields().Slice()
// omitFieldForAwfulBoringCryptoKludge reports whether
// the field t should be omitted from the reflect data.
@@ -1294,46 +1359,44 @@ ok:
}
return strings.HasPrefix(path, "crypto/")
}
-
- n := 0
-
- for _, t1 := range t.Fields().Slice() {
- if omitFieldForAwfulBoringCryptoKludge(t1) {
- continue
+ newFields := fields[:0:0]
+ for _, t1 := range fields {
+ if !omitFieldForAwfulBoringCryptoKludge(t1) {
+ newFields = append(newFields, t1)
}
+ }
+ fields = newFields
+
+ for _, t1 := range fields {
dtypesym(t1.Type)
- n++
}
- ot = dcommontype(lsym, ot, t)
- pkg := localpkg
- if t.Sym != nil {
- pkg = t.Sym.Pkg
- } else {
- // Unnamed type. Grab the package from the first field, if any.
- for _, f := range t.Fields().Slice() {
- if f.Embedded != 0 {
- continue
- }
- pkg = f.Sym.Pkg
+ // All non-exported struct field names within a struct
+ // type must originate from a single package. By
+ // identifying and recording that package within the
+ // struct type descriptor, we can omit that
+ // information from the field descriptors.
+ var spkg *types.Pkg
+ for _, f := range fields {
+ if !exportname(f.Sym.Name) {
+ spkg = f.Sym.Pkg
break
}
}
- ot = dgopkgpath(lsym, ot, pkg)
- ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t))
- ot = duintptr(lsym, ot, uint64(n))
- ot = duintptr(lsym, ot, uint64(n))
- dataAdd := n * structfieldSize()
+ ot = dcommontype(lsym, ot, t)
+ ot = dgopkgpath(lsym, ot, spkg)
+ ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t))
+ ot = duintptr(lsym, ot, uint64(len(fields)))
+ ot = duintptr(lsym, ot, uint64(len(fields)))
+
+ dataAdd := len(fields) * structfieldSize()
ot = dextratype(lsym, ot, t, dataAdd)
- for _, f := range t.Fields().Slice() {
- if omitFieldForAwfulBoringCryptoKludge(f) {
- continue
- }
+ for _, f := range fields {
// ../../../../runtime/type.go:/structField
- ot = dnameField(lsym, ot, pkg, f)
- ot = dsymptr(lsym, ot, dtypesym(f.Type).Linksym(), 0)
+ ot = dnameField(lsym, ot, spkg, f)
+ ot = dsymptr(lsym, ot, dtypesym(f.Type), 0)
offsetAnon := uint64(f.Offset) << 1
if offsetAnon>>1 != uint64(f.Offset) {
Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
@@ -1365,9 +1428,13 @@ ok:
keep = true
}
}
+ // Do not put Noalg types in typelinks. See issue #22605.
+ if typeHasNoAlg(t) {
+ keep = false
+ }
lsym.Set(obj.AttrMakeTypelink, keep)
- return s
+ return lsym
}
// for each itabEntry, gather the methods on
@@ -1434,7 +1501,7 @@ func itabsym(it *obj.LSym, offset int64) *obj.LSym {
}
// keep this arithmetic in sync with *itab layout
- methodnum := int((offset - 3*int64(Widthptr) - 8) / int64(Widthptr))
+ methodnum := int((offset - 2*int64(Widthptr) - 8) / int64(Widthptr))
if methodnum >= len(syms) {
return nil
}
@@ -1483,23 +1550,19 @@ func dumptabs() {
// type itab struct {
// inter *interfacetype
// _type *_type
- // link *itab
// hash uint32
- // bad bool
- // inhash bool
- // unused [2]byte
+ // _ [4]byte
// fun [1]uintptr // variable sized
// }
- o := dsymptr(i.lsym, 0, dtypesym(i.itype).Linksym(), 0)
- o = dsymptr(i.lsym, o, dtypesym(i.t).Linksym(), 0)
- o += Widthptr // skip link field
- o = duint32(i.lsym, o, typehash(i.t)) // copy of type hash
- o += 4 // skip bad/inhash/unused fields
- o += len(imethods(i.itype)) * Widthptr // skip fun method pointers
- // at runtime the itab will contain pointers to types, other itabs and
- // method functions. None are allocated on heap, so we can use obj.NOPTR.
- ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.NOPTR))
-
+ o := dsymptr(i.lsym, 0, dtypesym(i.itype), 0)
+ o = dsymptr(i.lsym, o, dtypesym(i.t), 0)
+ o = duint32(i.lsym, o, typehash(i.t)) // copy of type hash
+ o += 4 // skip unused field
+ for _, fn := range genfun(i.t, i.itype) {
+ o = dsymptr(i.lsym, o, fn, 0) // method pointer for each method
+ }
+ // Nothing writes static itabs, so they are read only.
+ ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
ilink := itablinkpkg.Lookup(i.t.ShortString() + "," + i.itype.ShortString()).Linksym()
dsymptr(ilink, 0, i.lsym, 0)
ggloblsym(ilink, int32(Widthptr), int16(obj.DUPOK|obj.RODATA))
@@ -1518,7 +1581,7 @@ func dumptabs() {
// }
nsym := dname(p.s.Name, "", nil, true)
ot = dsymptrOff(s, ot, nsym, 0)
- ot = dsymptrOff(s, ot, dtypesym(p.t).Linksym(), 0)
+ ot = dsymptrOff(s, ot, dtypesym(p.t), 0)
}
ggloblsym(s, int32(ot), int16(obj.RODATA))
@@ -1612,8 +1675,8 @@ func dalgsym(t *types.Type) *obj.LSym {
s.SetAlgGen(true)
if memhashvarlen == nil {
- memhashvarlen = Sysfunc("memhash_varlen")
- memequalvarlen = Sysfunc("memequal_varlen")
+ memhashvarlen = sysfunc("memhash_varlen")
+ memequalvarlen = sysfunc("memequal_varlen")
}
// make hash closure
@@ -1743,8 +1806,7 @@ func fillptrmask(t *types.Type, ptrmask []byte) {
}
vec := bvalloc(8 * int32(len(ptrmask)))
- xoffset := int64(0)
- onebitwalktype1(t, &xoffset, vec)
+ onebitwalktype1(t, 0, vec)
nptr := typeptrdata(t) / int64(Widthptr)
for i := int64(0); i < nptr; i++ {
diff --git a/src/cmd/compile/internal/gc/scope.go b/src/cmd/compile/internal/gc/scope.go
index b0bc7f69086..ebdaa19994a 100644
--- a/src/cmd/compile/internal/gc/scope.go
+++ b/src/cmd/compile/internal/gc/scope.go
@@ -168,7 +168,7 @@ func (v varsByScopeAndOffset) Less(i, j int) bool {
if v.scopes[i] != v.scopes[j] {
return v.scopes[i] < v.scopes[j]
}
- return v.vars[i].Offset < v.vars[j].Offset
+ return v.vars[i].StackOffset < v.vars[j].StackOffset
}
func (v varsByScopeAndOffset) Swap(i, j int) {
diff --git a/src/cmd/compile/internal/gc/scope_test.go b/src/cmd/compile/internal/gc/scope_test.go
index 9113afe279b..5d44b7a4f4f 100644
--- a/src/cmd/compile/internal/gc/scope_test.go
+++ b/src/cmd/compile/internal/gc/scope_test.go
@@ -173,6 +173,18 @@ var testfile = []testline{
{line: " fi(p)", scopes: []int{1}},
{line: " }"},
{line: "}"},
+ {line: "func TestCaptureVar(flag bool) func() int {"},
+ {line: " a := 1", vars: []string{"arg flag bool", "arg ~r1 func() int", "var a int"}},
+ {line: " if flag {"},
+ {line: " b := 2", scopes: []int{1}, vars: []string{"var b int", "var f func() int"}},
+ {line: " f := func() int {", scopes: []int{1, 0}},
+ {line: " return b + 1"},
+ {line: " }"},
+ {line: " return f", scopes: []int{1}},
+ {line: " }"},
+ {line: " f1(a)"},
+ {line: " return nil"},
+ {line: "}"},
{line: "func main() {"},
{line: " TestNestedFor()"},
{line: " TestOas2()"},
@@ -184,6 +196,7 @@ var testfile = []testline{
{line: " TestDiscontiguousRanges()"},
{line: " TestClosureScope()"},
{line: " TestEscape()"},
+ {line: " TestCaptureVar(true)"},
{line: "}"},
}
diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go
index 320cd9a47ef..38eaaccfd27 100644
--- a/src/cmd/compile/internal/gc/select.go
+++ b/src/cmd/compile/internal/gc/select.go
@@ -8,39 +8,32 @@ import "cmd/compile/internal/types"
// select
func typecheckselect(sel *Node) {
- var ncase *Node
- var n *Node
-
var def *Node
lno := setlineno(sel)
- count := 0
typecheckslice(sel.Ninit.Slice(), Etop)
- for _, n1 := range sel.List.Slice() {
- count++
- ncase = n1
- setlineno(ncase)
+ for _, ncase := range sel.List.Slice() {
if ncase.Op != OXCASE {
+ setlineno(ncase)
Fatalf("typecheckselect %v", ncase.Op)
}
if ncase.List.Len() == 0 {
// default
if def != nil {
- yyerror("multiple defaults in select (first at %v)", def.Line())
+ yyerrorl(ncase.Pos, "multiple defaults in select (first at %v)", def.Line())
} else {
def = ncase
}
} else if ncase.List.Len() > 1 {
- yyerror("select cases cannot be lists")
+ yyerrorl(ncase.Pos, "select cases cannot be lists")
} else {
ncase.List.SetFirst(typecheck(ncase.List.First(), Etop))
- n = ncase.List.First()
+ n := ncase.List.First()
ncase.Left = n
ncase.List.Set(nil)
- setlineno(n)
switch n.Op {
default:
- yyerror("select case must be receive, send or assign recv")
+ yyerrorl(n.Pos, "select case must be receive, send or assign recv")
// convert x = <-c into OSELRECV(x, <-c).
// remove implicit conversions; the eventual assignment
@@ -51,7 +44,7 @@ func typecheckselect(sel *Node) {
}
if n.Right.Op != ORECV {
- yyerror("select assignment must have receive on right hand side")
+ yyerrorl(n.Pos, "select assignment must have receive on right hand side")
break
}
@@ -60,7 +53,7 @@ func typecheckselect(sel *Node) {
// convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok
case OAS2RECV:
if n.Rlist.First().Op != ORECV {
- yyerror("select assignment must have receive on right hand side")
+ yyerrorl(n.Pos, "select assignment must have receive on right hand side")
break
}
@@ -72,7 +65,7 @@ func typecheckselect(sel *Node) {
// convert <-c into OSELRECV(N, <-c)
case ORECV:
- n = nod(OSELRECV, nil, n)
+ n = nodl(n.Pos, OSELRECV, nil, n)
n.SetTypecheck(1)
ncase.Left = n
@@ -85,35 +78,41 @@ func typecheckselect(sel *Node) {
typecheckslice(ncase.Nbody.Slice(), Etop)
}
- sel.Xoffset = int64(count)
lineno = lno
}
func walkselect(sel *Node) {
- if sel.List.Len() == 0 && sel.Xoffset != 0 {
- Fatalf("double walkselect") // already rewrote
+ lno := setlineno(sel)
+ if sel.Nbody.Len() != 0 {
+ Fatalf("double walkselect")
}
- lno := setlineno(sel)
- i := sel.List.Len()
+ init := sel.Ninit.Slice()
+ sel.Ninit.Set(nil)
+
+ init = append(init, walkselectcases(&sel.List)...)
+ sel.List.Set(nil)
+
+ sel.Nbody.Set(init)
+ walkstmtlist(sel.Nbody.Slice())
+
+ lineno = lno
+}
+
+func walkselectcases(cases *Nodes) []*Node {
+ n := cases.Len()
+ sellineno := lineno
// optimization: zero-case select
- var init []*Node
- var r *Node
- var n *Node
- var var_ *Node
- var selv *Node
- var chosen *Node
- if i == 0 {
- sel.Nbody.Set1(mkcall("block", nil, nil))
- goto out
+ if n == 0 {
+ return []*Node{mkcall("block", nil, nil)}
}
// optimization: one-case select: single op.
// TODO(rsc): Reenable optimization once order.go can handle it.
// golang.org/issue/7672.
- if i == 1 {
- cas := sel.List.First()
+ if n == 1 {
+ cas := cases.First()
setlineno(cas)
l := cas.Ninit.Slice()
if cas.Left != nil { // not default:
@@ -163,21 +162,19 @@ func walkselect(sel *Node) {
a.Nbody.Set1(mkcall("block", nil, &ln))
l = ln.Slice()
a = typecheck(a, Etop)
- l = append(l, a)
- l = append(l, n)
+ l = append(l, a, n)
}
l = append(l, cas.Nbody.Slice()...)
l = append(l, nod(OBREAK, nil, nil))
- sel.Nbody.Set(l)
- goto out
+ return l
}
// convert case value arguments to addresses.
// this rewrite is used by both the general code and the next optimization.
- for _, cas := range sel.List.Slice() {
+ for _, cas := range cases.Slice() {
setlineno(cas)
- n = cas.Left
+ n := cas.Left
if n == nil {
continue
}
@@ -205,15 +202,15 @@ func walkselect(sel *Node) {
}
// optimization: two-case select but one is default: single non-blocking op.
- if i == 2 && (sel.List.First().Left == nil || sel.List.Second().Left == nil) {
+ if n == 2 && (cases.First().Left == nil || cases.Second().Left == nil) {
var cas *Node
var dflt *Node
- if sel.List.First().Left == nil {
- cas = sel.List.Second()
- dflt = sel.List.First()
+ if cases.First().Left == nil {
+ cas = cases.Second()
+ dflt = cases.First()
} else {
- dflt = sel.List.Second()
- cas = sel.List.First()
+ dflt = cases.Second()
+ cas = cases.First()
}
n := cas.Left
@@ -247,26 +244,24 @@ func walkselect(sel *Node) {
r.Left = typecheck(r.Left, Erv)
r.Nbody.Set(cas.Nbody.Slice())
r.Rlist.Set(append(dflt.Ninit.Slice(), dflt.Nbody.Slice()...))
- sel.Nbody.Set2(r, nod(OBREAK, nil, nil))
- goto out
+ return []*Node{r, nod(OBREAK, nil, nil)}
}
- init = sel.Ninit.Slice()
- sel.Ninit.Set(nil)
+ var init []*Node
// generate sel-struct
- setlineno(sel)
- selv = temp(selecttype(sel.Xoffset))
- r = nod(OAS, selv, nil)
+ lineno = sellineno
+ selv := temp(selecttype(int64(n)))
+ r := nod(OAS, selv, nil)
r = typecheck(r, Etop)
init = append(init, r)
- var_ = conv(conv(nod(OADDR, selv, nil), types.Types[TUNSAFEPTR]), types.NewPtr(types.Types[TUINT8]))
- r = mkcall("newselect", nil, nil, var_, nodintconst(selv.Type.Width), nodintconst(sel.Xoffset))
+ var_ := conv(conv(nod(OADDR, selv, nil), types.Types[TUNSAFEPTR]), types.NewPtr(types.Types[TUINT8]))
+ r = mkcall("newselect", nil, nil, var_, nodintconst(selv.Type.Width), nodintconst(int64(n)))
r = typecheck(r, Etop)
init = append(init, r)
// register cases
- for _, cas := range sel.List.Slice() {
+ for _, cas := range cases.Slice() {
setlineno(cas)
init = append(init, cas.Ninit.Slice()...)
@@ -298,8 +293,8 @@ func walkselect(sel *Node) {
}
// run the select
- setlineno(sel)
- chosen = temp(types.Types[TINT])
+ lineno = sellineno
+ chosen := temp(types.Types[TINT])
r = nod(OAS, chosen, mkcall("selectgo", types.Types[TINT], nil, var_))
r = typecheck(r, Etop)
init = append(init, r)
@@ -308,7 +303,7 @@ func walkselect(sel *Node) {
init = append(init, nod(OVARKILL, selv, nil))
// dispatch cases
- for i, cas := range sel.List.Slice() {
+ for i, cas := range cases.Slice() {
setlineno(cas)
cond := nod(OEQ, chosen, nodintconst(int64(i)))
@@ -320,12 +315,7 @@ func walkselect(sel *Node) {
init = append(init, r)
}
- sel.Nbody.Set(init)
-
-out:
- sel.List.Set(nil)
- walkstmtlist(sel.Nbody.Slice())
- lineno = lno
+ return init
}
// Keep in sync with src/runtime/select.go.
@@ -342,7 +332,6 @@ func selecttype(size int64) *types.Type {
namedfield("releasetime", types.Types[TUINT64]),
})
scase.SetNoalg(true)
- scase.SetLocal(true)
sel := tostruct([]*Node{
namedfield("tcase", types.Types[TUINT16]),
@@ -354,7 +343,6 @@ func selecttype(size int64) *types.Type {
namedfield("pollorderarr", types.NewArray(types.Types[TUINT16], size)),
})
sel.SetNoalg(true)
- sel.SetLocal(true)
return sel
}
diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go
index 613cdf6e74f..3af2460a802 100644
--- a/src/cmd/compile/internal/gc/sinit.go
+++ b/src/cmd/compile/internal/gc/sinit.go
@@ -44,7 +44,7 @@ func init1(n *Node, out *[]*Node) {
init1(n1, out)
}
- if n.Left != nil && n.Type != nil && n.Left.Op == OTYPE && n.Class() == PFUNC {
+ if n.isMethodExpression() {
// Methods called as Type.Method(receiver, ...).
// Definitions for method expressions are stored in type->nname.
init1(asNode(n.Type.FuncType().Nname), out)
@@ -157,7 +157,6 @@ func init1(n *Node, out *[]*Node) {
initlist = initlist[:last]
n.SetInitorder(InitDone)
- return
}
// foundinitloop prints an init loop error and exits.
@@ -214,10 +213,10 @@ func init2(n *Node, out *[]*Node) {
init2list(n.Rlist, out)
init2list(n.Nbody, out)
- if n.Op == OCLOSURE {
+ switch n.Op {
+ case OCLOSURE:
init2list(n.Func.Closure.Nbody, out)
- }
- if n.Op == ODOTMETH || n.Op == OCALLPART {
+ case ODOTMETH, OCALLPART:
init2(asNode(n.Type.FuncType().Nname), out)
}
}
@@ -229,8 +228,7 @@ func init2list(l Nodes, out *[]*Node) {
}
func initreorder(l []*Node, out *[]*Node) {
- var n *Node
- for _, n = range l {
+ for _, n := range l {
switch n.Op {
case ODCLFUNC, ODCLCONST, ODCLTYPE:
continue
@@ -480,9 +478,8 @@ func staticassign(l *Node, r *Node, out *[]*Node) bool {
n := *l
gdata(&n, r.Func.Closure.Func.Nname, Widthptr)
return true
- } else {
- closuredebugruntimecheck(r)
}
+ closuredebugruntimecheck(r)
case OCONVIFACE:
// This logic is mirrored in isStaticCompositeLiteral.
@@ -885,11 +882,10 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
// put dynamics into array (5)
var index int64
- for _, r := range n.List.Slice() {
- value := r
- if r.Op == OKEY {
- index = nonnegintconst(r.Left)
- value = r.Right
+ for _, value := range n.List.Slice() {
+ if value.Op == OKEY {
+ index = nonnegintconst(value.Left)
+ value = value.Right
}
a := nod(OINDEX, vauto, nodintconst(index))
a.SetBounded(true)
@@ -932,6 +928,7 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
func maplit(n *Node, m *Node, init *Nodes) {
// make the map var
a := nod(OMAKE, nil, nil)
+ a.Esc = n.Esc
a.List.Set2(typenod(n.Type), nodintconst(int64(n.List.Len())))
litas(m, a, init)
@@ -941,7 +938,7 @@ func maplit(n *Node, m *Node, init *Nodes) {
if r.Op != OKEY {
Fatalf("maplit: rhs not OKEY: %v", r)
}
- if isliteral(r.Left) && isliteral(r.Right) {
+ if isStaticCompositeLiteral(r.Left) && isStaticCompositeLiteral(r.Right) {
stat = append(stat, r)
} else {
dyn = append(dyn, r)
@@ -966,24 +963,14 @@ func maplit(n *Node, m *Node, init *Nodes) {
vstatv := staticname(tv)
vstatv.Name.SetReadonly(true)
- for i, r := range stat {
- index := r.Left
- value := r.Right
-
- // build vstatk[b] = index
- setlineno(index)
- lhs := nod(OINDEX, vstatk, nodintconst(int64(i)))
- as := nod(OAS, lhs, index)
- as = typecheck(as, Etop)
- genAsStatic(as)
-
- // build vstatv[b] = value
- setlineno(value)
- lhs = nod(OINDEX, vstatv, nodintconst(int64(i)))
- as = nod(OAS, lhs, value)
- as = typecheck(as, Etop)
- genAsStatic(as)
+ datak := nod(OARRAYLIT, nil, nil)
+ datav := nod(OARRAYLIT, nil, nil)
+ for _, r := range stat {
+ datak.List.Append(r.Left)
+ datav.List.Append(r.Right)
}
+ fixedlit(inInitFunction, initKindStatic, datak, vstatk, init)
+ fixedlit(inInitFunction, initKindStatic, datav, vstatv, init)
// loop adding structure elements to map
// for i = 0; i < len(vstatk); i++ {
diff --git a/src/cmd/compile/internal/gc/sizeof_test.go b/src/cmd/compile/internal/gc/sizeof_test.go
index 1ca0a615353..48d357a0b03 100644
--- a/src/cmd/compile/internal/gc/sizeof_test.go
+++ b/src/cmd/compile/internal/gc/sizeof_test.go
@@ -22,7 +22,7 @@ func TestSizeof(t *testing.T) {
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
- {Func{}, 124, 216},
+ {Func{}, 132, 232},
{Name{}, 36, 56},
{Param{}, 28, 56},
{Node{}, 76, 128},
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index 63e9622983c..fe062da4095 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -37,6 +37,7 @@ func initssaconfig() {
Float32: types.Types[TFLOAT32],
Float64: types.Types[TFLOAT64],
Int: types.Types[TINT],
+ UInt: types.Types[TUINT],
Uintptr: types.Types[TUINTPTR],
String: types.Types[TSTRING],
BytePtr: types.NewPtr(types.Types[TUINT8]),
@@ -48,6 +49,11 @@ func initssaconfig() {
Float64Ptr: types.NewPtr(types.Types[TFLOAT64]),
BytePtrPtr: types.NewPtr(types.NewPtr(types.Types[TUINT8])),
}
+
+ if thearch.SoftFloat {
+ softfloatInit()
+ }
+
// Generate a few pointer types that are uncommon in the frontend but common in the backend.
// Caching is disabled in the backend, so generating these here avoids allocations.
_ = types.NewPtr(types.Types[TINTER]) // *interface{}
@@ -67,35 +73,37 @@ func initssaconfig() {
if thearch.LinkArch.Name == "386" {
ssaConfig.Set387(thearch.Use387)
}
+ ssaConfig.SoftFloat = thearch.SoftFloat
ssaCaches = make([]ssa.Cache, nBackendWorkers)
// Set up some runtime functions we'll need to call.
- Newproc = Sysfunc("newproc")
- Deferproc = Sysfunc("deferproc")
- Deferreturn = Sysfunc("deferreturn")
- Duffcopy = Sysfunc("duffcopy")
- Duffzero = Sysfunc("duffzero")
- panicindex = Sysfunc("panicindex")
- panicslice = Sysfunc("panicslice")
- panicdivide = Sysfunc("panicdivide")
- growslice = Sysfunc("growslice")
- panicdottypeE = Sysfunc("panicdottypeE")
- panicdottypeI = Sysfunc("panicdottypeI")
- panicnildottype = Sysfunc("panicnildottype")
- assertE2I = Sysfunc("assertE2I")
- assertE2I2 = Sysfunc("assertE2I2")
- assertI2I = Sysfunc("assertI2I")
- assertI2I2 = Sysfunc("assertI2I2")
- goschedguarded = Sysfunc("goschedguarded")
- writeBarrier = Sysfunc("writeBarrier")
- writebarrierptr = Sysfunc("writebarrierptr")
- typedmemmove = Sysfunc("typedmemmove")
- typedmemclr = Sysfunc("typedmemclr")
- Udiv = Sysfunc("udiv")
+ Newproc = sysfunc("newproc")
+ Deferproc = sysfunc("deferproc")
+ Deferreturn = sysfunc("deferreturn")
+ Duffcopy = sysfunc("duffcopy")
+ Duffzero = sysfunc("duffzero")
+ panicindex = sysfunc("panicindex")
+ panicslice = sysfunc("panicslice")
+ panicdivide = sysfunc("panicdivide")
+ growslice = sysfunc("growslice")
+ panicdottypeE = sysfunc("panicdottypeE")
+ panicdottypeI = sysfunc("panicdottypeI")
+ panicnildottype = sysfunc("panicnildottype")
+ assertE2I = sysfunc("assertE2I")
+ assertE2I2 = sysfunc("assertE2I2")
+ assertI2I = sysfunc("assertI2I")
+ assertI2I2 = sysfunc("assertI2I2")
+ goschedguarded = sysfunc("goschedguarded")
+ writeBarrier = sysfunc("writeBarrier")
+ writebarrierptr = sysfunc("writebarrierptr")
+ gcWriteBarrier = sysfunc("gcWriteBarrier")
+ typedmemmove = sysfunc("typedmemmove")
+ typedmemclr = sysfunc("typedmemclr")
+ Udiv = sysfunc("udiv")
// GO386=387 runtime functions
- ControlWord64trunc = Sysfunc("controlWord64trunc")
- ControlWord32 = Sysfunc("controlWord32")
+ ControlWord64trunc = sysfunc("controlWord64trunc")
+ ControlWord32 = sysfunc("controlWord32")
}
// buildssa builds an SSA function for fn.
@@ -135,13 +143,9 @@ func buildssa(fn *Node, worker int) *ssa.Func {
if fn.Func.Pragma&Nosplit != 0 {
s.f.NoSplit = true
}
- defer func() {
- if s.f.WBPos.IsKnown() {
- fn.Func.WBPos = s.f.WBPos
- }
- }()
s.exitCode = fn.Func.Exit
s.panics = map[funcLine]*ssa.Block{}
+ s.softFloat = s.config.SoftFloat
if name == os.Getenv("GOSSAFUNC") {
s.f.HTMLWriter = ssa.NewHTMLWriter("ssa.html", s.f.Frontend(), name)
@@ -162,15 +166,12 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.startBlock(s.f.Entry)
s.vars[&memVar] = s.startmem
- s.varsyms = map[*Node]interface{}{}
-
// Generate addresses of local declarations
s.decladdrs = map[*Node]*ssa.Value{}
for _, n := range fn.Func.Dcl {
switch n.Class() {
case PPARAM, PPARAMOUT:
- aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n})
- s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), aux, s.sp)
+ s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), n, s.sp)
if n.Class() == PPARAMOUT && s.canSSA(n) {
// Save ssa-able PPARAMOUT variables so we can
// store them back to the stack at the end of
@@ -185,7 +186,7 @@ func buildssa(fn *Node, worker int) *ssa.Func {
case PFUNC:
// local function - already handled by frontend
default:
- s.Fatalf("local variable with class %s unimplemented", classnames[n.Class()])
+ s.Fatalf("local variable with class %v unimplemented", n.Class())
}
}
@@ -207,6 +208,12 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.popLine()
}
+ for _, b := range s.f.Blocks {
+ if b.Pos != src.NoXPos {
+ s.updateUnsetPredPos(b)
+ }
+ }
+
s.insertPhis()
// Don't carry reference this around longer than necessary
@@ -217,6 +224,39 @@ func buildssa(fn *Node, worker int) *ssa.Func {
return s.f
}
+// updateUnsetPredPos propagates the earliest-value position information for b
+// towards all of b's predecessors that need a position, and recurs on that
+// predecessor if its position is updated. B should have a non-empty position.
+func (s *state) updateUnsetPredPos(b *ssa.Block) {
+ if b.Pos == src.NoXPos {
+ s.Fatalf("Block %s should have a position", b)
+ }
+ bestPos := src.NoXPos
+ for _, e := range b.Preds {
+ p := e.Block()
+ if !p.LackingPos() {
+ continue
+ }
+ if bestPos == src.NoXPos {
+ bestPos = b.Pos
+ for _, v := range b.Values {
+ if v.LackingPos() {
+ continue
+ }
+ if v.Pos != src.NoXPos {
+ // Assume values are still in roughly textual order;
+ // TODO: could also seek minimum position?
+ bestPos = v.Pos
+ break
+ }
+ }
+ }
+ p.Pos = bestPos
+ s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay.
+ }
+ return
+}
+
type state struct {
// configuration (arch) information
config *ssa.Config
@@ -258,9 +298,6 @@ type state struct {
// addresses of PPARAM and PPARAMOUT variables.
decladdrs map[*Node]*ssa.Value
- // symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused.
- varsyms map[*Node]interface{}
-
// starting values. Memory, stack pointer, and globals pointer
startmem *ssa.Value
sp *ssa.Value
@@ -268,6 +305,8 @@ type state struct {
// line number stack. The current line number is top of stack
line []src.XPos
+ // the last line number processed; it may have been popped
+ lastPos src.XPos
// list of panic calls by function name and line number.
// Used to deduplicate panic calls.
@@ -278,6 +317,7 @@ type state struct {
cgoUnsafeArgs bool
hasdefer bool // whether the function contains a defer statement
+ softFloat bool
}
type funcLine struct {
@@ -349,7 +389,14 @@ func (s *state) endBlock() *ssa.Block {
s.defvars[b.ID] = s.vars
s.curBlock = nil
s.vars = nil
- b.Pos = s.peekPos()
+ if b.LackingPos() {
+ // Empty plain blocks get the line of their successor (handled after all blocks created),
+ // except for increment blocks in For statements (handled in ssa conversion of OFOR),
+ // and for blocks ending in GOTO/BREAK/CONTINUE.
+ b.Pos = src.NoXPos
+ } else {
+ b.Pos = s.lastPos
+ }
return b
}
@@ -362,7 +409,10 @@ func (s *state) pushLine(line src.XPos) {
if Debug['K'] != 0 {
Warn("buildssa: unknown position (line 0)")
}
+ } else {
+ s.lastPos = line
}
+
s.line = append(s.line, line)
}
@@ -443,27 +493,27 @@ func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value {
// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value {
- return s.f.Entry.NewValue0A(s.peekPos(), op, t, aux)
+ return s.f.Entry.NewValue0A(src.NoXPos, op, t, aux)
}
// entryNewValue1 adds a new value with one argument to the entry block.
func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
- return s.f.Entry.NewValue1(s.peekPos(), op, t, arg)
+ return s.f.Entry.NewValue1(src.NoXPos, op, t, arg)
}
// entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value {
- return s.f.Entry.NewValue1I(s.peekPos(), op, t, auxint, arg)
+ return s.f.Entry.NewValue1I(src.NoXPos, op, t, auxint, arg)
}
// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
- return s.f.Entry.NewValue1A(s.peekPos(), op, t, aux, arg)
+ return s.f.Entry.NewValue1A(src.NoXPos, op, t, aux, arg)
}
// entryNewValue2 adds a new value with two arguments to the entry block.
func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
- return s.f.Entry.NewValue2(s.peekPos(), op, t, arg0, arg1)
+ return s.f.Entry.NewValue2(src.NoXPos, op, t, arg0, arg1)
}
// const* routines add a new const value to the entry block.
@@ -511,6 +561,25 @@ func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
return s.f.ConstOffPtrSP(s.peekPos(), t, c, s.sp)
}
+// newValueOrSfCall* are wrappers around newValue*, which may create a call to a
+// soft-float runtime function instead (when emitting soft-float code).
+func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
+ if s.softFloat {
+ if c, ok := s.sfcall(op, arg); ok {
+ return c
+ }
+ }
+ return s.newValue1(op, t, arg)
+}
+func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
+ if s.softFloat {
+ if c, ok := s.sfcall(op, arg0, arg1); ok {
+ return c
+ }
+ }
+ return s.newValue2(op, t, arg0, arg1)
+}
+
// stmtList converts the statement list n to SSA and adds it to s.
func (s *state) stmtList(l Nodes) {
for _, n := range l.Slice() {
@@ -520,8 +589,11 @@ func (s *state) stmtList(l Nodes) {
// stmt converts the statement n to SSA and adds it to s.
func (s *state) stmt(n *Node) {
- s.pushLine(n.Pos)
- defer s.popLine()
+ if !(n.Op == OVARKILL || n.Op == OVARLIVE) {
+ // OVARKILL and OVARLIVE are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
+ s.pushLine(n.Pos)
+ defer s.popLine()
+ }
// If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
// then this code is dead. Stop here.
@@ -634,6 +706,7 @@ func (s *state) stmt(n *Node) {
}
b := s.endBlock()
+ b.Pos = s.lastPos // Do this even if b is an empty block.
b.AddEdgeTo(lab.target)
case OAS:
@@ -661,24 +734,26 @@ func (s *state) stmt(n *Node) {
}
rhs = nil
case OAPPEND:
- // If we're writing the result of an append back to the same slice,
- // handle it specially to avoid write barriers on the fast (non-growth) path.
+ // Check whether we're writing the result of an append back to the same slice.
+ // If so, we handle it specially to avoid write barriers on the fast
+ // (non-growth) path.
+ if !samesafeexpr(n.Left, rhs.List.First()) || Debug['N'] != 0 {
+ break
+ }
// If the slice can be SSA'd, it'll be on the stack,
// so there will be no write barriers,
// so there's no need to attempt to prevent them.
- if samesafeexpr(n.Left, rhs.List.First()) {
- if !s.canSSA(n.Left) {
- if Debug_append > 0 {
- Warnl(n.Pos, "append: len-only update")
- }
- s.append(rhs, true)
- return
- } else {
- if Debug_append > 0 { // replicating old diagnostic message
- Warnl(n.Pos, "append: len-only update (in local slice)")
- }
+ if s.canSSA(n.Left) {
+ if Debug_append > 0 { // replicating old diagnostic message
+ Warnl(n.Pos, "append: len-only update (in local slice)")
}
+ break
}
+ if Debug_append > 0 {
+ Warnl(n.Pos, "append: len-only update")
+ }
+ s.append(rhs, true)
+ return
}
}
@@ -778,12 +853,14 @@ func (s *state) stmt(n *Node) {
case ORETURN:
s.stmtList(n.List)
- s.exit()
+ b := s.exit()
+ b.Pos = s.lastPos
+
case ORETJMP:
s.stmtList(n.List)
b := s.exit()
b.Kind = ssa.BlockRetJmp // override BlockRet
- b.Aux = n.Left.Sym.Linksym()
+ b.Aux = n.Sym.Linksym()
case OCONTINUE, OBREAK:
var to *ssa.Block
@@ -808,6 +885,7 @@ func (s *state) stmt(n *Node) {
}
b := s.endBlock()
+ b.Pos = s.lastPos // Do this even if b is an empty block.
b.AddEdgeTo(to)
case OFOR, OFORUNTIL:
@@ -873,6 +951,11 @@ func (s *state) stmt(n *Node) {
}
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bCond)
+ // It can happen that bIncr ends in a block containing only VARKILL,
+ // and that muddles the debugging experience.
+ if n.Op != OFORUNTIL && b.Pos == src.NoXPos {
+ b.Pos = bCond.Pos
+ }
}
if n.Op == OFORUNTIL {
@@ -934,6 +1017,11 @@ func (s *state) stmt(n *Node) {
if !n.Left.Addrtaken() {
s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left)
}
+ switch n.Left.Class() {
+ case PAUTO, PPARAM, PPARAMOUT:
+ default:
+ s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left)
+ }
s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem())
case OCHECKNIL:
@@ -1408,14 +1496,13 @@ func (s *state) expr(n *Node) *ssa.Value {
len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str)
return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len)
case OCFUNC:
- aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: n.Left.Sym.Linksym()})
+ aux := n.Left.Sym.Linksym()
return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb)
case ONAME:
if n.Class() == PFUNC {
// "value" of a function is the address of the function's closure
sym := funcsym(n.Sym).Linksym()
- aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: sym})
- return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), aux, s.sb)
+ return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb)
}
if s.canSSA(n) {
return s.variable(n, n.Type)
@@ -1525,6 +1612,12 @@ func (s *state) expr(n *Node) *ssa.Value {
return v
}
+ // map <--> *hmap
+ if to.Etype == TMAP && from.IsPtr() &&
+ to.MapType().Hmap == from.Elem() {
+ return v
+ }
+
dowidth(from)
dowidth(to)
if from.Width != to.Width {
@@ -1623,18 +1716,18 @@ func (s *state) expr(n *Node) *ssa.Value {
if ft.IsFloat() || tt.IsFloat() {
conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
- if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS {
+ if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS && !s.softFloat {
if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
- if thearch.LinkArch.Family == sys.ARM64 {
+ if thearch.LinkArch.Family == sys.ARM64 || s.softFloat {
if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
- if thearch.LinkArch.Family == sys.MIPS {
+ if thearch.LinkArch.Family == sys.MIPS && !s.softFloat {
if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
// tt is float32 or float64, and ft is also unsigned
if tt.Size() == 4 {
@@ -1665,12 +1758,12 @@ func (s *state) expr(n *Node) *ssa.Value {
if op2 == ssa.OpCopy {
return x
}
- return s.newValue1(op2, n.Type, x)
+ return s.newValueOrSfCall1(op2, n.Type, x)
}
if op2 == ssa.OpCopy {
- return s.newValue1(op1, n.Type, x)
+ return s.newValueOrSfCall1(op1, n.Type, x)
}
- return s.newValue1(op2, n.Type, s.newValue1(op1, types.Types[it], x))
+ return s.newValueOrSfCall1(op2, n.Type, s.newValueOrSfCall1(op1, types.Types[it], x))
}
// Tricky 64-bit unsigned cases.
if ft.IsInteger() {
@@ -1715,8 +1808,8 @@ func (s *state) expr(n *Node) *ssa.Value {
ftp := floatForComplex(ft)
ttp := floatForComplex(tt)
return s.newValue2(ssa.OpComplexMake, tt,
- s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
- s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
+ s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
+ s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
}
s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype)
@@ -1733,8 +1826,8 @@ func (s *state) expr(n *Node) *ssa.Value {
if n.Left.Type.IsComplex() {
pt := floatForComplex(n.Left.Type)
op := s.ssaOp(OEQ, pt)
- r := s.newValue2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
- i := s.newValue2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
+ r := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
+ i := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i)
switch n.Op {
case OEQ:
@@ -1745,6 +1838,9 @@ func (s *state) expr(n *Node) *ssa.Value {
s.Fatalf("ordered complex compare %v", n.Op)
}
}
+ if n.Left.Type.IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b)
+ }
return s.newValue2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b)
case OMUL:
a := s.expr(n.Left)
@@ -1762,22 +1858,27 @@ func (s *state) expr(n *Node) *ssa.Value {
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
if pt != wt { // Widen for calculation
- areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
- breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
- aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
- bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
+ areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
+ breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
+ aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
+ bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
}
- xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
- ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal))
+ xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
+ ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal))
if pt != wt { // Narrow to store back
- xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
- ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
+ xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
+ ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
}
return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
}
+
+ if n.Type.IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ }
+
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case ODIV:
@@ -1800,31 +1901,31 @@ func (s *state) expr(n *Node) *ssa.Value {
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
if pt != wt { // Widen for calculation
- areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
- breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
- aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
- bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
+ areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
+ breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
+ aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
+ bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
}
- denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag))
- xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
- ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag))
+ denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag))
+ xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
+ ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag))
// TODO not sure if this is best done in wide precision or narrow
// Double-rounding might be an issue.
// Note that the pre-SSA implementation does the entire calculation
// in wide format, so wide is compatible.
- xreal = s.newValue2(divop, wt, xreal, denom)
- ximag = s.newValue2(divop, wt, ximag, denom)
+ xreal = s.newValueOrSfCall2(divop, wt, xreal, denom)
+ ximag = s.newValueOrSfCall2(divop, wt, ximag, denom)
if pt != wt { // Narrow to store back
- xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
- ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
+ xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
+ ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
}
return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
}
if n.Type.IsFloat() {
- return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
}
return s.intDivide(n, a, b)
case OMOD:
@@ -1838,8 +1939,11 @@ func (s *state) expr(n *Node) *ssa.Value {
pt := floatForComplex(n.Type)
op := s.ssaOp(n.Op, pt)
return s.newValue2(ssa.OpComplexMake, n.Type,
- s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
- s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
+ s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
+ s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
+ }
+ if n.Type.IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
}
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case OAND, OOR, OXOR:
@@ -2191,7 +2295,7 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl)
if inplace {
- if sn.Op == ONAME {
+ if sn.Op == ONAME && sn.Class() != PEXTERN {
// Tell liveness we're about to build a new slice
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
}
@@ -2270,7 +2374,8 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
// This function is intended to handle && and || better than just calling
// s.expr(cond) and branching on the result.
func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
- if cond.Op == OANDAND {
+ switch cond.Op {
+ case OANDAND:
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Ninit)
s.condBranch(cond.Left, mid, no, max8(likely, 0))
@@ -2283,8 +2388,7 @@ func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
// the likeliness of the first branch.
// TODO: have the frontend give us branch prediction hints for
// OANDAND and OOROR nodes (if it ever has such info).
- }
- if cond.Op == OOROR {
+ case OOROR:
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Ninit)
s.condBranch(cond.Left, yes, mid, min8(likely, 0))
@@ -2294,8 +2398,7 @@ func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
// Note: if likely==-1, then both recursive calls pass -1.
// If likely==1, then we don't have enough info to decide
// the likelihood of the first branch.
- }
- if cond.Op == ONOT {
+ case ONOT:
s.stmtList(cond.Ninit)
s.condBranch(cond.Left, no, yes, -likely)
return
@@ -2398,7 +2501,7 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask)
}
// Left is not ssa-able. Compute its address.
addr := s.addr(left, false)
- if left.Op == ONAME && skip == 0 {
+ if left.Op == ONAME && left.Class() != PEXTERN && skip == 0 {
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, left, s.mem())
}
if isReflectHeaderDataField(left) {
@@ -2499,6 +2602,79 @@ const (
callGo
)
+type sfRtCallDef struct {
+ rtfn *obj.LSym
+ rtype types.EType
+}
+
+var softFloatOps map[ssa.Op]sfRtCallDef
+
+func softfloatInit() {
+ // Some of these operations get transformed by sfcall.
+ softFloatOps = map[ssa.Op]sfRtCallDef{
+ ssa.OpAdd32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32},
+ ssa.OpAdd64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64},
+ ssa.OpSub32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32},
+ ssa.OpSub64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64},
+ ssa.OpMul32F: sfRtCallDef{sysfunc("fmul32"), TFLOAT32},
+ ssa.OpMul64F: sfRtCallDef{sysfunc("fmul64"), TFLOAT64},
+ ssa.OpDiv32F: sfRtCallDef{sysfunc("fdiv32"), TFLOAT32},
+ ssa.OpDiv64F: sfRtCallDef{sysfunc("fdiv64"), TFLOAT64},
+
+ ssa.OpEq64F: sfRtCallDef{sysfunc("feq64"), TBOOL},
+ ssa.OpEq32F: sfRtCallDef{sysfunc("feq32"), TBOOL},
+ ssa.OpNeq64F: sfRtCallDef{sysfunc("feq64"), TBOOL},
+ ssa.OpNeq32F: sfRtCallDef{sysfunc("feq32"), TBOOL},
+ ssa.OpLess64F: sfRtCallDef{sysfunc("fgt64"), TBOOL},
+ ssa.OpLess32F: sfRtCallDef{sysfunc("fgt32"), TBOOL},
+ ssa.OpGreater64F: sfRtCallDef{sysfunc("fgt64"), TBOOL},
+ ssa.OpGreater32F: sfRtCallDef{sysfunc("fgt32"), TBOOL},
+ ssa.OpLeq64F: sfRtCallDef{sysfunc("fge64"), TBOOL},
+ ssa.OpLeq32F: sfRtCallDef{sysfunc("fge32"), TBOOL},
+ ssa.OpGeq64F: sfRtCallDef{sysfunc("fge64"), TBOOL},
+ ssa.OpGeq32F: sfRtCallDef{sysfunc("fge32"), TBOOL},
+
+ ssa.OpCvt32to32F: sfRtCallDef{sysfunc("fint32to32"), TFLOAT32},
+ ssa.OpCvt32Fto32: sfRtCallDef{sysfunc("f32toint32"), TINT32},
+ ssa.OpCvt64to32F: sfRtCallDef{sysfunc("fint64to32"), TFLOAT32},
+ ssa.OpCvt32Fto64: sfRtCallDef{sysfunc("f32toint64"), TINT64},
+ ssa.OpCvt64Uto32F: sfRtCallDef{sysfunc("fuint64to32"), TFLOAT32},
+ ssa.OpCvt32Fto64U: sfRtCallDef{sysfunc("f32touint64"), TUINT64},
+ ssa.OpCvt32to64F: sfRtCallDef{sysfunc("fint32to64"), TFLOAT64},
+ ssa.OpCvt64Fto32: sfRtCallDef{sysfunc("f64toint32"), TINT32},
+ ssa.OpCvt64to64F: sfRtCallDef{sysfunc("fint64to64"), TFLOAT64},
+ ssa.OpCvt64Fto64: sfRtCallDef{sysfunc("f64toint64"), TINT64},
+ ssa.OpCvt64Uto64F: sfRtCallDef{sysfunc("fuint64to64"), TFLOAT64},
+ ssa.OpCvt64Fto64U: sfRtCallDef{sysfunc("f64touint64"), TUINT64},
+ ssa.OpCvt32Fto64F: sfRtCallDef{sysfunc("f32to64"), TFLOAT64},
+ ssa.OpCvt64Fto32F: sfRtCallDef{sysfunc("f64to32"), TFLOAT32},
+ }
+}
+
+// TODO: do not emit sfcall if operation can be optimized to constant in later
+// opt phase
+func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
+ if callDef, ok := softFloatOps[op]; ok {
+ switch op {
+ case ssa.OpLess32F,
+ ssa.OpLess64F,
+ ssa.OpLeq32F,
+ ssa.OpLeq64F:
+ args[0], args[1] = args[1], args[0]
+ case ssa.OpSub32F,
+ ssa.OpSub64F:
+ args[1] = s.newValue1(s.ssaOp(OMINUS, types.Types[callDef.rtype]), args[1].Type, args[1])
+ }
+
+ result := s.rtcall(callDef.rtfn, true, []*types.Type{types.Types[callDef.rtype]}, args...)[0]
+ if op == ssa.OpNeq32F || op == ssa.OpNeq64F {
+ result = s.newValue1(ssa.OpNot, result.Type, result)
+ }
+ return result, true
+ }
+ return nil, false
+}
+
var intrinsics map[intrinsicKey]intrinsicBuilder
// An intrinsicBuilder converts a call node n into an ssa value that
@@ -2577,18 +2753,34 @@ func init() {
return nil
},
all...)
+ add("runtime", "getclosureptr",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
+ },
+ all...)
+
+ addF("runtime", "getcallerpc",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
+ }, sys.AMD64, sys.I386)
+
+ add("runtime", "getcallersp",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr)
+ },
+ all...)
/******** runtime/internal/sys ********/
addF("runtime/internal/sys", "Ctz32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
},
- sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/sys", "Ctz64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
},
- sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/sys", "Bswap32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0])
@@ -2607,41 +2799,40 @@ func init() {
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
- sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
-
+ sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "Load64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
- sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
+ sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "Loadp",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
},
- sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
+ sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "Store",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
- sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
+ sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "Store64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
- sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
+ sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "StorepNoWB",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
return nil
},
- sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS)
+ sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64)
addF("runtime/internal/atomic", "Xchg",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
@@ -2649,14 +2840,14 @@ func init() {
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
- sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
+ sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "Xchg64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
- sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
+ sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "Xadd",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
@@ -2664,14 +2855,14 @@ func init() {
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
- sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
+ sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "Xadd64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
- sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
+ sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "Cas",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
@@ -2679,14 +2870,14 @@ func init() {
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
},
- sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
+ sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "Cas64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
},
- sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
+ sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "And8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
@@ -2724,6 +2915,85 @@ func init() {
return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
+ addF("math", "Trunc",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpTrunc, types.Types[TFLOAT64], args[0])
+ },
+ sys.PPC64, sys.S390X)
+ addF("math", "Ceil",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCeil, types.Types[TFLOAT64], args[0])
+ },
+ sys.PPC64, sys.S390X)
+ addF("math", "Floor",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpFloor, types.Types[TFLOAT64], args[0])
+ },
+ sys.PPC64, sys.S390X)
+ addF("math", "Round",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpRound, types.Types[TFLOAT64], args[0])
+ },
+ sys.S390X)
+ addF("math", "RoundToEven",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpRoundToEven, types.Types[TFLOAT64], args[0])
+ },
+ sys.S390X)
+ addF("math", "Abs",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpAbs, types.Types[TFLOAT64], args[0])
+ },
+ sys.PPC64)
+ addF("math", "Copysign",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpCopysign, types.Types[TFLOAT64], args[0], args[1])
+ },
+ sys.PPC64)
+
+ makeRoundAMD64 := func(op ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ aux := syslook("support_sse41").Sym.Linksym()
+ addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), aux, s.sb)
+ v := s.newValue2(ssa.OpLoad, types.Types[TBOOL], addr, s.mem())
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(v)
+ bTrue := s.f.NewBlock(ssa.BlockPlain)
+ bFalse := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bTrue)
+ b.AddEdgeTo(bFalse)
+ b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays
+
+ // We have the intrinsic - use it directly.
+ s.startBlock(bTrue)
+ s.vars[n] = s.newValue1(op, types.Types[TFLOAT64], args[0])
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Call the pure Go version.
+ s.startBlock(bFalse)
+ a := s.call(n, callNormal)
+ s.vars[n] = s.newValue2(ssa.OpLoad, types.Types[TFLOAT64], a, s.mem())
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Merge results.
+ s.startBlock(bEnd)
+ return s.variable(n, types.Types[TFLOAT64])
+ }
+ }
+ addF("math", "RoundToEven",
+ makeRoundAMD64(ssa.OpRoundToEven),
+ sys.AMD64)
+ addF("math", "Floor",
+ makeRoundAMD64(ssa.OpFloor),
+ sys.AMD64)
+ addF("math", "Ceil",
+ makeRoundAMD64(ssa.OpCeil),
+ sys.AMD64)
+ addF("math", "Trunc",
+ makeRoundAMD64(ssa.OpTrunc),
+ sys.AMD64)
/******** math/bits ********/
addF("math/bits", "TrailingZeros64",
@@ -2847,7 +3117,7 @@ func init() {
sys.ARM64)
makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: syslook("support_popcnt").Sym.Linksym()})
+ aux := syslook("support_popcnt").Sym.Linksym()
addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), aux, s.sb)
v := s.newValue2(ssa.OpLoad, types.Types[TBOOL], addr, s.mem())
b := s.endBlock()
@@ -2975,6 +3245,12 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder {
// We can't intrinsify them.
return nil
}
+ // Skip intrinsifying math functions (which may contain hard-float
+ // instructions) when soft-float
+ if thearch.SoftFloat && pkg == "math" {
+ return nil
+ }
+
fn := sym.Name
return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}]
}
@@ -3026,7 +3302,7 @@ func (s *state) intrinsicArgs(n *Node) []*ssa.Value {
temps := map[*Node]*ssa.Value{}
for _, a := range n.List.Slice() {
if a.Op != OAS {
- s.Fatalf("non-assignment as a function argument %s", opnames[a.Op])
+ s.Fatalf("non-assignment as a function argument %v", a.Op)
}
l, r := a.Left, a.Right
switch l.Op {
@@ -3046,7 +3322,7 @@ func (s *state) intrinsicArgs(n *Node) []*ssa.Value {
}
args = append(args, callArg{l.Xoffset, v})
default:
- s.Fatalf("function argument assignment target not allowed: %s", opnames[l.Op])
+ s.Fatalf("function argument assignment target not allowed: %v", l.Op)
}
}
sort.Sort(byOffset(args))
@@ -3099,10 +3375,8 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
}
i := s.expr(fn.Left)
itab := s.newValue1(ssa.OpITab, types.Types[TUINTPTR], i)
- if k != callNormal {
- s.nilCheck(itab)
- }
- itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab
+ s.nilCheck(itab)
+ itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
itab = s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
if k == callNormal {
codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], itab, s.mem())
@@ -3199,24 +3473,6 @@ func etypesign(e types.EType) int8 {
return 0
}
-// lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node.
-// This improves the effectiveness of cse by using the same Aux values for the
-// same symbols.
-func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} {
- switch sym.(type) {
- default:
- s.Fatalf("sym %v is of unknown type %T", sym, sym)
- case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol:
- // these are the only valid types
- }
-
- if lsym, ok := s.varsyms[n]; ok {
- return lsym
- }
- s.varsyms[n] = sym
- return sym
-}
-
// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
// The value that the returned Value represents is guaranteed to be non-nil.
// If bounded is true then this address does not require a nil check for its operand
@@ -3228,8 +3484,7 @@ func (s *state) addr(n *Node, bounded bool) *ssa.Value {
switch n.Class() {
case PEXTERN:
// global variable
- aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: n.Sym.Linksym()})
- v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb)
+ v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym.Linksym(), s.sb)
// TODO: Make OpAddr use AuxInt as well as Aux.
if n.Xoffset != 0 {
v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
@@ -3243,21 +3498,18 @@ func (s *state) addr(n *Node, bounded bool) *ssa.Value {
}
if n == nodfp {
// Special arg that points to the frame pointer (Used by ORECOVER).
- aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n})
- return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp)
+ return s.entryNewValue1A(ssa.OpAddr, t, n, s.sp)
}
s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
return nil
case PAUTO:
- aux := s.lookupSymbol(n, &ssa.AutoSymbol{Node: n})
- return s.newValue1A(ssa.OpAddr, t, aux, s.sp)
+ return s.newValue1A(ssa.OpAddr, t, n, s.sp)
case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
// ensure that we reuse symbols for out parameters so
// that cse works on their addresses
- aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n})
- return s.newValue1A(ssa.OpAddr, t, aux, s.sp)
+ return s.newValue1A(ssa.OpAddr, t, n, s.sp)
default:
- s.Fatalf("variable address class %v not implemented", classnames[n.Class()])
+ s.Fatalf("variable address class %v not implemented", n.Class())
return nil
}
case OINDREGSP:
@@ -3342,7 +3594,7 @@ func (s *state) canSSA(n *Node) bool {
return false
case PPARAMOUT:
if s.hasdefer {
- // TODO: handle this case? Named return values must be
+ // TODO: handle this case? Named return values must be
// in memory so that the deferred function can see them.
// Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
// Or maybe not, see issue 18860. Even unnamed return values
@@ -3610,8 +3862,9 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem())
case t.IsSlice():
- ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, right)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem())
+ elType := types.NewPtr(t.Elem())
+ ptr := s.newValue1(ssa.OpSlicePtr, elType, right)
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, elType, left, ptr, s.mem())
case t.IsInterface():
// itab field is treated as a scalar.
idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
@@ -3743,7 +3996,7 @@ type u642fcvtTab struct {
one func(*state, *types.Type, int64) *ssa.Value
}
-var u64_f64 u642fcvtTab = u642fcvtTab{
+var u64_f64 = u642fcvtTab{
geq: ssa.OpGeq64,
cvt2F: ssa.OpCvt64to64F,
and: ssa.OpAnd64,
@@ -3753,7 +4006,7 @@ var u64_f64 u642fcvtTab = u642fcvtTab{
one: (*state).constInt64,
}
-var u64_f32 u642fcvtTab = u642fcvtTab{
+var u64_f32 = u642fcvtTab{
geq: ssa.OpGeq64,
cvt2F: ssa.OpCvt64to32F,
and: ssa.OpAnd64,
@@ -3834,12 +4087,12 @@ type u322fcvtTab struct {
cvtI2F, cvtF2F ssa.Op
}
-var u32_f64 u322fcvtTab = u322fcvtTab{
+var u32_f64 = u322fcvtTab{
cvtI2F: ssa.OpCvt32to64F,
cvtF2F: ssa.OpCopy,
}
-var u32_f32 u322fcvtTab = u322fcvtTab{
+var u32_f32 = u322fcvtTab{
cvtI2F: ssa.OpCvt32to32F,
cvtF2F: ssa.OpCvt64Fto32F,
}
@@ -3924,14 +4177,15 @@ func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
b.AddEdgeTo(bElse)
s.startBlock(bElse)
- if n.Op == OLEN {
+ switch n.Op {
+ case OLEN:
// length is stored in the first word for map/chan
s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem())
- } else if n.Op == OCAP {
+ case OCAP:
// capacity is stored in the second word for chan
sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem())
- } else {
+ default:
s.Fatalf("op must be OLEN or OCAP")
}
s.endBlock()
@@ -3948,7 +4202,7 @@ type f2uCvtTab struct {
cutoff uint64
}
-var f32_u64 f2uCvtTab = f2uCvtTab{
+var f32_u64 = f2uCvtTab{
ltf: ssa.OpLess32F,
cvt2U: ssa.OpCvt32Fto64,
subf: ssa.OpSub32F,
@@ -3958,7 +4212,7 @@ var f32_u64 f2uCvtTab = f2uCvtTab{
cutoff: 9223372036854775808,
}
-var f64_u64 f2uCvtTab = f2uCvtTab{
+var f64_u64 = f2uCvtTab{
ltf: ssa.OpLess64F,
cvt2U: ssa.OpCvt64Fto64,
subf: ssa.OpSub64F,
@@ -3968,7 +4222,7 @@ var f64_u64 f2uCvtTab = f2uCvtTab{
cutoff: 9223372036854775808,
}
-var f32_u32 f2uCvtTab = f2uCvtTab{
+var f32_u32 = f2uCvtTab{
ltf: ssa.OpLess32F,
cvt2U: ssa.OpCvt32Fto32,
subf: ssa.OpSub32F,
@@ -3978,7 +4232,7 @@ var f32_u32 f2uCvtTab = f2uCvtTab{
cutoff: 2147483648,
}
-var f64_u32 f2uCvtTab = f2uCvtTab{
+var f64_u32 = f2uCvtTab{
ltf: ssa.OpLess64F,
cvt2U: ssa.OpCvt64Fto32,
subf: ssa.OpSub64F,
@@ -4377,23 +4631,20 @@ func genssa(f *ssa.Func, pp *Progs) {
e := f.Frontend().(*ssafn)
- // Generate GC bitmaps, except if the stack is too large,
- // in which compilation will fail later anyway (issue 20529).
- if e.stksize < maxStackSize {
- s.stackMapIndex = liveness(e, f)
- }
+ s.stackMapIndex = liveness(e, f)
// Remember where each block starts.
s.bstart = make([]*obj.Prog, f.NumBlocks())
s.pp = pp
- var valueProgs map[*obj.Prog]*ssa.Value
- var blockProgs map[*obj.Prog]*ssa.Block
+ var progToValue map[*obj.Prog]*ssa.Value
+ var progToBlock map[*obj.Prog]*ssa.Block
+ var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point.
var logProgs = e.log
if logProgs {
- valueProgs = make(map[*obj.Prog]*ssa.Value, f.NumValues())
- blockProgs = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
+ progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
+ progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
f.Logf("genssa %s\n", f.Name)
- blockProgs[s.pp.next] = f.Blocks[0]
+ progToBlock[s.pp.next] = f.Blocks[0]
}
if thearch.Use387 {
@@ -4402,6 +4653,12 @@ func genssa(f *ssa.Func, pp *Progs) {
s.ScratchFpMem = e.scratchFpMem
+ logLocationLists := Debug_locationlist != 0
+ if Ctxt.Flag_locationlists {
+ e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(f, logLocationLists)
+ valueToProgAfter = make([]*obj.Prog, f.NumValues())
+ }
+
// Emit basic blocks
for i, b := range f.Blocks {
s.bstart[b.ID] = s.pp.next
@@ -4442,15 +4699,19 @@ func genssa(f *ssa.Func, pp *Progs) {
}
case ssa.OpPhi:
CheckLoweredPhi(v)
-
+ case ssa.OpRegKill:
+ // nothing to do
default:
// let the backend handle it
thearch.SSAGenValue(&s, v)
}
+ if Ctxt.Flag_locationlists {
+ valueToProgAfter[v.ID] = s.pp.next
+ }
if logProgs {
for ; x != s.pp.next; x = x.Link {
- valueProgs[x] = v
+ progToValue[x] = v
}
}
}
@@ -4468,7 +4729,47 @@ func genssa(f *ssa.Func, pp *Progs) {
thearch.SSAGenBlock(&s, b, next)
if logProgs {
for ; x != s.pp.next; x = x.Link {
- blockProgs[x] = b
+ progToBlock[x] = b
+ }
+ }
+ }
+
+ if Ctxt.Flag_locationlists {
+ for i := range f.Blocks {
+ blockDebug := e.curfn.Func.DebugInfo.Blocks[i]
+ for _, locList := range blockDebug.Variables {
+ for _, loc := range locList.Locations {
+ if loc.Start == ssa.BlockStart {
+ loc.StartProg = s.bstart[f.Blocks[i].ID]
+ } else {
+ loc.StartProg = valueToProgAfter[loc.Start.ID]
+ }
+ if loc.End == nil {
+ Fatalf("empty loc %v compiling %v", loc, f.Name)
+ }
+
+ if loc.End == ssa.BlockEnd {
+ // If this variable was live at the end of the block, it should be
+ // live over the control flow instructions. Extend it up to the
+ // beginning of the next block.
+ // If this is the last block, then there's no Prog to use for it, and
+ // EndProg is unset.
+ if i < len(f.Blocks)-1 {
+ loc.EndProg = s.bstart[f.Blocks[i+1].ID]
+ }
+ } else {
+ // Advance the "end" forward by one; the end-of-range doesn't take effect
+ // until the instruction actually executes.
+ loc.EndProg = valueToProgAfter[loc.End.ID].Link
+ if loc.EndProg == nil {
+ Fatalf("nil loc.EndProg compiling %v, loc=%v", f.Name, loc)
+ }
+ }
+ if !logLocationLists {
+ loc.Start = nil
+ loc.End = nil
+ }
+ }
}
}
}
@@ -4479,16 +4780,22 @@ func genssa(f *ssa.Func, pp *Progs) {
}
if logProgs {
+ filename := ""
for p := pp.Text; p != nil; p = p.Link {
+ if p.Pos.IsKnown() && p.InnermostFilename() != filename {
+ filename = p.InnermostFilename()
+ f.Logf("# %s\n", filename)
+ }
+
var s string
- if v, ok := valueProgs[p]; ok {
+ if v, ok := progToValue[p]; ok {
s = v.String()
- } else if b, ok := blockProgs[p]; ok {
+ } else if b, ok := progToBlock[p]; ok {
s = b.String()
} else {
s = " " // most value and branch strings are 2-3 characters long
}
- f.Logf("%s\t%s\n", s, p)
+ f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString())
}
if f.HTMLWriter != nil {
// LineHist is defunct now - this code won't do
@@ -4499,22 +4806,31 @@ func genssa(f *ssa.Func, pp *Progs) {
var buf bytes.Buffer
buf.WriteString("")
buf.WriteString("
")
+ filename := ""
for p := pp.Text; p != nil; p = p.Link {
+ // Don't spam every line with the file name, which is often huge.
+ // Only print changes, and "unknown" is not a change.
+ if p.Pos.IsKnown() && p.InnermostFilename() != filename {
+ filename = p.InnermostFilename()
+ buf.WriteString("
")
- if v, ok := valueProgs[p]; ok {
+ if v, ok := progToValue[p]; ok {
buf.WriteString(v.HTML())
- } else if b, ok := blockProgs[p]; ok {
- buf.WriteString(b.HTML())
+ } else if b, ok := progToBlock[p]; ok {
+ buf.WriteString("" + b.HTML() + "")
}
buf.WriteString("