mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
[dev.link] all: merge branch 'master' into dev.link
Clean merge. Change-Id: If9bfb0f27f41563fd5d386de9c1081542c3ce498
This commit is contained in:
commit
24f9238b95
506 changed files with 12164 additions and 17264 deletions
|
|
@ -51,6 +51,38 @@ TODO
|
||||||
|
|
||||||
<h3 id="go-command">Go command</h3>
|
<h3 id="go-command">Go command</h3>
|
||||||
|
|
||||||
|
<!-- golang.org/issue/33848 -->
|
||||||
|
<p>
|
||||||
|
When the main module contains a top-level <code>vendor</code> directory and
|
||||||
|
its <code>go.mod</code> file specifies <code>go</code> <code>1.14</code> or
|
||||||
|
higher, the <code>go</code> command now defaults to <code>-mod=vendor</code>
|
||||||
|
for operations that accept that flag. A new value for that flag,
|
||||||
|
<code>-mod=mod</code>, causes the <code>go</code> command to instead load
|
||||||
|
modules from the module cache (as when no <code>vendor</code> directory is
|
||||||
|
present).
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
When <code>-mod=vendor</code> is set (explicitly or by default), the
|
||||||
|
<code>go</code> command now verifies that the main module's
|
||||||
|
<code>vendor/modules.txt</code> file is consistent with its
|
||||||
|
<code>go.mod</code> file.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
<code>go</code> <code>list</code> <code>-m</code> no longer silently omits
|
||||||
|
transitive dependencies that do not provide packages in
|
||||||
|
the <code>vendor</code> directory. It now fails explicitly if
|
||||||
|
<code>-mod=vendor</code> is set.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p><!-- golang.org/issue/32502, golang.org/issue/30345 -->
|
||||||
|
The <code>go</code> <code>get</code> command no longer accepts
|
||||||
|
the <code>-mod</code> flag. Previously, the flag's setting either
|
||||||
|
<a href="https://golang.org/issue/30345">was ignored</a> or
|
||||||
|
<a href="https://golang.org/issue/32502">caused the build to fail</a>.
|
||||||
|
</p>
|
||||||
|
|
||||||
<p><!-- golang.org/issue/30748 -->
|
<p><!-- golang.org/issue/30748 -->
|
||||||
The <code>go</code> command now includes snippets of plain-text error messages
|
The <code>go</code> command now includes snippets of plain-text error messages
|
||||||
from module proxies and other HTTP servers.
|
from module proxies and other HTTP servers.
|
||||||
|
|
@ -58,13 +90,6 @@ TODO
|
||||||
graphic characters and spaces.
|
graphic characters and spaces.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p><!-- golang.org/issue/32502, golang.org/issue/30345 -->
|
|
||||||
The <code>go</code> <code>get</code> subcommand no longer accepts
|
|
||||||
the <code>-mod</code> flag. Previously, the flag's setting either
|
|
||||||
<a href="https://golang.org/issue/30345">was ignored</a> or
|
|
||||||
<a href="https://golang.org/issue/32502">caused the build to fail</a>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<h2 id="runtime">Runtime</h2>
|
<h2 id="runtime">Runtime</h2>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
|
|
|
||||||
|
|
@ -58,6 +58,7 @@ func Test27660(t *testing.T) { test27660(t) }
|
||||||
func Test28896(t *testing.T) { test28896(t) }
|
func Test28896(t *testing.T) { test28896(t) }
|
||||||
func Test30065(t *testing.T) { test30065(t) }
|
func Test30065(t *testing.T) { test30065(t) }
|
||||||
func Test32579(t *testing.T) { test32579(t) }
|
func Test32579(t *testing.T) { test32579(t) }
|
||||||
|
func Test31891(t *testing.T) { test31891(t) }
|
||||||
func TestAlign(t *testing.T) { testAlign(t) }
|
func TestAlign(t *testing.T) { testAlign(t) }
|
||||||
func TestAtol(t *testing.T) { testAtol(t) }
|
func TestAtol(t *testing.T) { testAtol(t) }
|
||||||
func TestBlocking(t *testing.T) { testBlocking(t) }
|
func TestBlocking(t *testing.T) { testBlocking(t) }
|
||||||
|
|
|
||||||
13
misc/cgo/test/issue31891.c
Normal file
13
misc/cgo/test/issue31891.c
Normal file
|
|
@ -0,0 +1,13 @@
|
||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "_cgo_export.h"
|
||||||
|
|
||||||
|
void callIssue31891() {
|
||||||
|
Issue31891A a;
|
||||||
|
useIssue31891A(&a);
|
||||||
|
|
||||||
|
Issue31891B b;
|
||||||
|
useIssue31891B(&b);
|
||||||
|
}
|
||||||
|
|
@ -29,7 +29,7 @@ func Test(t *testing.T) {
|
||||||
// Brittle: the assertion may fail spuriously when the algorithm
|
// Brittle: the assertion may fail spuriously when the algorithm
|
||||||
// changes, but should remain stable otherwise.
|
// changes, but should remain stable otherwise.
|
||||||
got := fmt.Sprintf("%T %T", in, opts)
|
got := fmt.Sprintf("%T %T", in, opts)
|
||||||
want := "issue9026._Ctype_struct___0 *issue9026._Ctype_struct___0"
|
want := "issue9026._Ctype_struct___0 *issue9026._Ctype_struct___1"
|
||||||
if got != want {
|
if got != want {
|
||||||
t.Errorf("Non-deterministic type names: got %s, want %s", got, want)
|
t.Errorf("Non-deterministic type names: got %s, want %s", got, want)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -108,6 +108,17 @@ void callMulti(void);
|
||||||
// issue 28772 part 2 - part 1 in issuex.go
|
// issue 28772 part 2 - part 1 in issuex.go
|
||||||
#define issue28772Constant2 2
|
#define issue28772Constant2 2
|
||||||
|
|
||||||
|
|
||||||
|
// issue 31891
|
||||||
|
typedef struct {
|
||||||
|
long obj;
|
||||||
|
} Issue31891A;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
long obj;
|
||||||
|
} Issue31891B;
|
||||||
|
|
||||||
|
void callIssue31891(void);
|
||||||
*/
|
*/
|
||||||
import "C"
|
import "C"
|
||||||
|
|
||||||
|
|
@ -517,3 +528,15 @@ func test20910(t *testing.T) {
|
||||||
// issue 28772 part 2
|
// issue 28772 part 2
|
||||||
|
|
||||||
const issue28772Constant2 = C.issue28772Constant2
|
const issue28772Constant2 = C.issue28772Constant2
|
||||||
|
|
||||||
|
// issue 31891
|
||||||
|
|
||||||
|
//export useIssue31891A
|
||||||
|
func useIssue31891A(c *C.Issue31891A) {}
|
||||||
|
|
||||||
|
//export useIssue31891B
|
||||||
|
func useIssue31891B(c *C.Issue31891B) {}
|
||||||
|
|
||||||
|
func test31891(t *testing.T) {
|
||||||
|
C.callIssue31891()
|
||||||
|
}
|
||||||
|
|
|
||||||
121
misc/nacl/README
121
misc/nacl/README
|
|
@ -1,121 +0,0 @@
|
||||||
Native Client
|
|
||||||
=============
|
|
||||||
|
|
||||||
This document outlines the basics of building and developing the Go runtime and
|
|
||||||
programs in the Native Client (NaCl) environment.
|
|
||||||
|
|
||||||
Go 1.3 supports three architectures
|
|
||||||
|
|
||||||
* nacl/386 which is standard 386.
|
|
||||||
* nacl/amd64p32 which is a 64 bit architecture, where the address space is
|
|
||||||
limited to a 4gb window.
|
|
||||||
* nacl/arm which is 32-bit ARMv7A architecture with 1GB address space.
|
|
||||||
|
|
||||||
For background it is recommended that you read https://golang.org/s/go13nacl.
|
|
||||||
|
|
||||||
Prerequisites
|
|
||||||
-------------
|
|
||||||
|
|
||||||
Native Client programs are executed inside a sandbox, the NaCl runtime. This
|
|
||||||
runtime must be installed before you can use NaCl programs.
|
|
||||||
|
|
||||||
The NaCl distribution comes with an installer which ensures you have access to
|
|
||||||
the latest version of the runtime. The version tracks the Chrome numbering
|
|
||||||
scheme.
|
|
||||||
|
|
||||||
# Download NaCl
|
|
||||||
|
|
||||||
Download nacl_sdk.zip file from
|
|
||||||
https://developer.chrome.com/native-client/sdk/download
|
|
||||||
and unpack it. I chose /opt/nacl_sdk.
|
|
||||||
|
|
||||||
# Update
|
|
||||||
|
|
||||||
The zip file contains a small skeleton that can be used to download the correct
|
|
||||||
sdk. These are released every 6-8 weeks, in line with Chrome releases.
|
|
||||||
|
|
||||||
% cd /opt/nacl_sdk
|
|
||||||
% ./naclsdk update
|
|
||||||
|
|
||||||
At this time pepper_49 is the stable version. The NaCl port needs at least pepper_39
|
|
||||||
to work. If naclsdk downloads a later version, please adjust accordingly.
|
|
||||||
|
|
||||||
The cmd/go helper scripts expect that the loaders sel_ldr_{x86_{32,64},arm} and
|
|
||||||
nacl_helper_bootstrap_arm are in your path. I find it easiest to make a symlink
|
|
||||||
from the NaCl distribution to my $GOPATH/bin directory.
|
|
||||||
|
|
||||||
% ln -nfs /opt/nacl_sdk/pepper_39/tools/sel_ldr_x86_32 $GOPATH/bin/sel_ldr_x86_32
|
|
||||||
% ln -nfs /opt/nacl_sdk/pepper_39/tools/sel_ldr_x86_64 $GOPATH/bin/sel_ldr_x86_64
|
|
||||||
% ln -nfs /opt/nacl_sdk/pepper_39/tools/sel_ldr_arm $GOPATH/bin/sel_ldr_arm
|
|
||||||
|
|
||||||
Additionally, for NaCl/ARM only:
|
|
||||||
|
|
||||||
% ln -nfs /opt/nacl_sdk/pepper_39/tools/nacl_helper_bootstrap_arm $GOPATH/bin/nacl_helper_bootstrap_arm
|
|
||||||
|
|
||||||
Support scripts
|
|
||||||
---------------
|
|
||||||
|
|
||||||
Symlink the two scripts in this directory into your $PATH, just as you did with
|
|
||||||
NaCl sdk above.
|
|
||||||
|
|
||||||
% ln -nfs $GOROOT/misc/nacl/go_nacl_amd64p32_exec $GOPATH/bin/go_nacl_amd64p32_exec
|
|
||||||
% ln -nfs $GOROOT/misc/nacl/go_nacl_386_exec $GOPATH/bin/go_nacl_386_exec
|
|
||||||
% ln -nfs $GOROOT/misc/nacl/go_nacl_arm_exec $GOPATH/bin/go_nacl_arm_exec
|
|
||||||
|
|
||||||
Building and testing
|
|
||||||
--------------------
|
|
||||||
|
|
||||||
Building for NaCl is similar to cross compiling for other platforms. However,
|
|
||||||
as it is not possible to ever build in a `native` NaCl environment, the cmd/go
|
|
||||||
tool has been enhanced to allow the full build, all.bash, to be executed,
|
|
||||||
rather than just the compile stage, make.bash.
|
|
||||||
|
|
||||||
The cmd/go tool knows that if GOOS is set to `nacl` it should not try to
|
|
||||||
execute any binaries itself. Instead it passes their execution to a support
|
|
||||||
script which sets up a Native Client environment and invokes the NaCl sandbox.
|
|
||||||
|
|
||||||
The script's name has a special format, go_$GOOS_$GOARCH_exec, so cmd/go can
|
|
||||||
find it.
|
|
||||||
|
|
||||||
In short, if the support scripts are in place, the cmd/go tool can be used as
|
|
||||||
per normal.
|
|
||||||
|
|
||||||
# Build and test Go for NaCl
|
|
||||||
|
|
||||||
NaCl does not permit direct file system access. Instead, package syscall
|
|
||||||
provides a simulated file system served by in-memory data. The script
|
|
||||||
nacltest.bash is the NaCl equivalent of all.bash. It builds NaCl with an
|
|
||||||
in-memory file system containing files needed for tests, and then it runs the
|
|
||||||
tests.
|
|
||||||
|
|
||||||
% cd go/src
|
|
||||||
% env GOARCH=amd64p32 ./nacltest.bash
|
|
||||||
|
|
||||||
Debugging
|
|
||||||
---------
|
|
||||||
|
|
||||||
Assuming that you have built nacl/amd64p32 binary ./mybin and can run as:
|
|
||||||
|
|
||||||
% sel_ldr_x86_64 -l /dev/null -S -e ./mybin
|
|
||||||
|
|
||||||
Create the nacl manifest file mybin.manifest with the following contents:
|
|
||||||
|
|
||||||
{ "program": { "x86-64": { "url": "mybin" } } }
|
|
||||||
|
|
||||||
url is the path to the binary relative to the manifest file.
|
|
||||||
Then, run the program as:
|
|
||||||
|
|
||||||
% sel_ldr_x86_64 -g -l /dev/null -S -e ./mybin
|
|
||||||
|
|
||||||
The -g flag instructs the loader to stop at startup. Then, in another console:
|
|
||||||
|
|
||||||
% /opt/nacl_sdk/pepper_39/toolchain/linux_x86_glibc/bin/x86_64-nacl-gdb
|
|
||||||
% nacl-manifest mybin.manifest
|
|
||||||
% target remote :4014
|
|
||||||
|
|
||||||
If you see that the program is stopped in _rt0_amd64p32_nacl, then symbols are
|
|
||||||
loaded successfully and you can type 'c' to start the program.
|
|
||||||
Next time you can automate it as:
|
|
||||||
|
|
||||||
% /opt/nacl_sdk/pepper_39/toolchain/linux_x86_glibc/bin/x86_64-nacl-gdb \
|
|
||||||
-ex 'nacl-manifest mybin.manifest' -ex 'target remote :4014'
|
|
||||||
|
|
@ -1,10 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
eval $(go env)
|
|
||||||
|
|
||||||
export NACLENV_GOARCH=$GOARCH
|
|
||||||
export NACLENV_GOOS=$GOOS
|
|
||||||
export NACLENV_GOROOT=/go
|
|
||||||
export NACLENV_NACLPWD=$(pwd | sed "s;$GOROOT;/go;")
|
|
||||||
|
|
||||||
exec sel_ldr_x86_32 -l /dev/null -S -e "$@"
|
|
||||||
|
|
@ -1,10 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
eval $(go env)
|
|
||||||
|
|
||||||
export NACLENV_GOARCH=$GOARCH
|
|
||||||
export NACLENV_GOOS=$GOOS
|
|
||||||
export NACLENV_GOROOT=/go
|
|
||||||
export NACLENV_NACLPWD=$(pwd | sed "s;$GOROOT;/go;")
|
|
||||||
|
|
||||||
exec sel_ldr_x86_64 -l /dev/null -S -e "$@"
|
|
||||||
|
|
@ -1,10 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
eval $(go env)
|
|
||||||
|
|
||||||
export NACLENV_GOARCH=$GOARCH
|
|
||||||
export NACLENV_GOOS=$GOOS
|
|
||||||
export NACLENV_GOROOT=/go
|
|
||||||
export NACLENV_NACLPWD=$(pwd | sed "s;$GOROOT;/go;")
|
|
||||||
|
|
||||||
exec nacl_helper_bootstrap_arm $(which sel_ldr_arm) --reserved_at_zero=0xXXXXXXXXXXXXXXXX -l /dev/null -S -e "$@"
|
|
||||||
|
|
@ -1,226 +0,0 @@
|
||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Mkzip creates a zip file from a 'proto' file describing the contents.
|
|
||||||
//
|
|
||||||
// The proto file is inspired by the Plan 9 mkfs prototype file format.
|
|
||||||
// It describes a file tree, one directory per line, with leading tab
|
|
||||||
// indentation marking the tree structure. Each line contains a leading
|
|
||||||
// name field giving the name of the file to copy into the zip file,
|
|
||||||
// and then a sequence of optional key=value attributes to control
|
|
||||||
// the copy. The only known attribute is src=foo, meaning copy the
|
|
||||||
// actual data for the file (or directory) from an alternate location.
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/zip"
|
|
||||||
"bufio"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func usage() {
|
|
||||||
fmt.Fprintf(os.Stderr, "usage: mkzip [-r root] src.proto out.zip\n")
|
|
||||||
os.Exit(2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func sysfatal(format string, args ...interface{}) {
|
|
||||||
fmt.Fprintf(os.Stderr, "mkzip: %s\n", fmt.Sprintf(format, args...))
|
|
||||||
os.Exit(2)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
root = flag.String("r", ".", "interpret source paths relative to this directory")
|
|
||||||
gopackage = flag.String("p", "", "write Go source file in this package")
|
|
||||||
)
|
|
||||||
|
|
||||||
type stack struct {
|
|
||||||
name string
|
|
||||||
src string
|
|
||||||
depth int
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
log.SetFlags(0)
|
|
||||||
flag.Usage = usage
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
args := flag.Args()
|
|
||||||
if len(args) != 2 {
|
|
||||||
usage()
|
|
||||||
}
|
|
||||||
|
|
||||||
rf, err := os.Open(args[0])
|
|
||||||
if err != nil {
|
|
||||||
sysfatal("%v", err)
|
|
||||||
}
|
|
||||||
r := bufio.NewScanner(rf)
|
|
||||||
|
|
||||||
zf, err := os.Create(args[1])
|
|
||||||
if err != nil {
|
|
||||||
sysfatal("%v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var w io.Writer = zf
|
|
||||||
if *gopackage != "" {
|
|
||||||
fmt.Fprintf(zf, `package %s
|
|
||||||
import "sync"
|
|
||||||
func init() {
|
|
||||||
var once sync.Once
|
|
||||||
fsinit = func() {
|
|
||||||
once.Do(func() {
|
|
||||||
unzip("`, *gopackage)
|
|
||||||
gw := &goWriter{b: bufio.NewWriter(w)}
|
|
||||||
defer func() {
|
|
||||||
if err := gw.Close(); err != nil {
|
|
||||||
sysfatal("finishing Go output: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
w = gw
|
|
||||||
}
|
|
||||||
z := zip.NewWriter(w)
|
|
||||||
|
|
||||||
lineno := 0
|
|
||||||
|
|
||||||
addfile := func(info os.FileInfo, dst string, src string) {
|
|
||||||
zh, err := zip.FileInfoHeader(info)
|
|
||||||
if err != nil {
|
|
||||||
sysfatal("%s:%d: %s: %v", args[0], lineno, src, err)
|
|
||||||
}
|
|
||||||
zh.Name = dst
|
|
||||||
zh.Method = zip.Deflate
|
|
||||||
if info.IsDir() && !strings.HasSuffix(dst, "/") {
|
|
||||||
zh.Name += "/"
|
|
||||||
}
|
|
||||||
w, err := z.CreateHeader(zh)
|
|
||||||
if err != nil {
|
|
||||||
sysfatal("%s:%d: %s: %v", args[0], lineno, src, err)
|
|
||||||
}
|
|
||||||
if info.IsDir() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
r, err := os.Open(src)
|
|
||||||
if err != nil {
|
|
||||||
sysfatal("%s:%d: %s: %v", args[0], lineno, src, err)
|
|
||||||
}
|
|
||||||
defer r.Close()
|
|
||||||
if _, err := io.Copy(w, r); err != nil {
|
|
||||||
sysfatal("%s:%d: %s: %v", args[0], lineno, src, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var stk []stack
|
|
||||||
|
|
||||||
for r.Scan() {
|
|
||||||
line := r.Text()
|
|
||||||
lineno++
|
|
||||||
s := strings.TrimLeft(line, "\t")
|
|
||||||
prefix, line := line[:len(line)-len(s)], s
|
|
||||||
if i := strings.Index(line, "#"); i >= 0 {
|
|
||||||
line = line[:i]
|
|
||||||
}
|
|
||||||
f := strings.Fields(line)
|
|
||||||
if len(f) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(line, " ") {
|
|
||||||
sysfatal("%s:%d: must use tabs for indentation", args[0], lineno)
|
|
||||||
}
|
|
||||||
depth := len(prefix)
|
|
||||||
for len(stk) > 0 && depth <= stk[len(stk)-1].depth {
|
|
||||||
stk = stk[:len(stk)-1]
|
|
||||||
}
|
|
||||||
parent := ""
|
|
||||||
psrc := *root
|
|
||||||
if len(stk) > 0 {
|
|
||||||
parent = stk[len(stk)-1].name
|
|
||||||
psrc = stk[len(stk)-1].src
|
|
||||||
}
|
|
||||||
if strings.Contains(f[0], "/") {
|
|
||||||
sysfatal("%s:%d: destination name cannot contain slash", args[0], lineno)
|
|
||||||
}
|
|
||||||
name := path.Join(parent, f[0])
|
|
||||||
src := filepath.Join(psrc, f[0])
|
|
||||||
for _, attr := range f[1:] {
|
|
||||||
i := strings.Index(attr, "=")
|
|
||||||
if i < 0 {
|
|
||||||
sysfatal("%s:%d: malformed attribute %q", args[0], lineno, attr)
|
|
||||||
}
|
|
||||||
key, val := attr[:i], attr[i+1:]
|
|
||||||
switch key {
|
|
||||||
case "src":
|
|
||||||
src = val
|
|
||||||
default:
|
|
||||||
sysfatal("%s:%d: unknown attribute %q", args[0], lineno, attr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stk = append(stk, stack{name: name, src: src, depth: depth})
|
|
||||||
|
|
||||||
if f[0] == "*" || f[0] == "+" {
|
|
||||||
if f[0] == "*" {
|
|
||||||
dir, err := ioutil.ReadDir(psrc)
|
|
||||||
if err != nil {
|
|
||||||
sysfatal("%s:%d: %v", args[0], lineno, err)
|
|
||||||
}
|
|
||||||
for _, d := range dir {
|
|
||||||
addfile(d, path.Join(parent, d.Name()), filepath.Join(psrc, d.Name()))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
err := filepath.Walk(psrc, func(src string, info os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if src == psrc {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if psrc == "." {
|
|
||||||
psrc = ""
|
|
||||||
}
|
|
||||||
name := path.Join(parent, filepath.ToSlash(src[len(psrc):]))
|
|
||||||
addfile(info, name, src)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
sysfatal("%s:%d: %v", args[0], lineno, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
fi, err := os.Stat(src)
|
|
||||||
if err != nil {
|
|
||||||
sysfatal("%s:%d: %v", args[0], lineno, err)
|
|
||||||
}
|
|
||||||
addfile(fi, name, src)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := z.Close(); err != nil {
|
|
||||||
sysfatal("finishing zip file: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type goWriter struct {
|
|
||||||
b *bufio.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *goWriter) Write(b []byte) (int, error) {
|
|
||||||
for _, c := range b {
|
|
||||||
fmt.Fprintf(w.b, "\\x%02x", c)
|
|
||||||
}
|
|
||||||
return len(b), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *goWriter) Close() error {
|
|
||||||
fmt.Fprintf(w.b, "\")\n\t\t})\n\t}\n}")
|
|
||||||
w.b.Flush()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
0
misc/nacl/testdata/bin/placeholder
vendored
0
misc/nacl/testdata/bin/placeholder
vendored
0
misc/nacl/testdata/empty
vendored
0
misc/nacl/testdata/empty
vendored
8
misc/nacl/testdata/group
vendored
8
misc/nacl/testdata/group
vendored
|
|
@ -1,8 +0,0 @@
|
||||||
nobody:*:-2:
|
|
||||||
nogroup:*:-1:
|
|
||||||
wheel:*:0:root
|
|
||||||
daemon:*:1:root
|
|
||||||
kmem:*:2:root
|
|
||||||
sys:*:3:root
|
|
||||||
tty:*:4:root
|
|
||||||
operator:*:5:root
|
|
||||||
1
misc/nacl/testdata/hosts
vendored
1
misc/nacl/testdata/hosts
vendored
|
|
@ -1 +0,0 @@
|
||||||
127.0.0.1 localhost
|
|
||||||
1596
misc/nacl/testdata/mime.types
vendored
1596
misc/nacl/testdata/mime.types
vendored
File diff suppressed because it is too large
Load diff
|
|
@ -1,190 +0,0 @@
|
||||||
etc src=/etc
|
|
||||||
mime.types src=../misc/nacl/testdata/mime.types
|
|
||||||
resolv.conf src=../misc/nacl/testdata/empty
|
|
||||||
group src=../misc/nacl/testdata/group
|
|
||||||
passwd src=../misc/nacl/testdata/empty
|
|
||||||
hosts src=../misc/nacl/testdata/hosts
|
|
||||||
services
|
|
||||||
usr src=../misc/nacl/testdata
|
|
||||||
bin
|
|
||||||
go src=..
|
|
||||||
src
|
|
||||||
cmd
|
|
||||||
api
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
asm
|
|
||||||
internal
|
|
||||||
asm
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
compile
|
|
||||||
internal
|
|
||||||
syntax
|
|
||||||
parser.go
|
|
||||||
cover
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
doc
|
|
||||||
main.go
|
|
||||||
pkg.go
|
|
||||||
doc_test.go
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
internal
|
|
||||||
objfile
|
|
||||||
objfile.go
|
|
||||||
buildid
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
gofmt
|
|
||||||
gofmt.go
|
|
||||||
gofmt_test.go
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
vendor
|
|
||||||
github.com
|
|
||||||
google
|
|
||||||
pprof
|
|
||||||
internal
|
|
||||||
binutils
|
|
||||||
+
|
|
||||||
driver
|
|
||||||
+
|
|
||||||
graph
|
|
||||||
+
|
|
||||||
report
|
|
||||||
+
|
|
||||||
profile
|
|
||||||
+
|
|
||||||
ianlancetaylor
|
|
||||||
demangle
|
|
||||||
+
|
|
||||||
golang.org
|
|
||||||
x
|
|
||||||
arch
|
|
||||||
arm
|
|
||||||
armasm
|
|
||||||
+
|
|
||||||
arm64
|
|
||||||
arm64asm
|
|
||||||
+
|
|
||||||
x86
|
|
||||||
x86asm
|
|
||||||
+
|
|
||||||
ppc64
|
|
||||||
ppc64asm
|
|
||||||
+
|
|
||||||
archive
|
|
||||||
tar
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
zip
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
compress
|
|
||||||
bzip2
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
flate
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
gzip
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
lzw
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
zlib
|
|
||||||
crypto
|
|
||||||
ed25519
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
rsa
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
tls
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
debug
|
|
||||||
dwarf
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
elf
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
macho
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
pe
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
plan9obj
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
go
|
|
||||||
build
|
|
||||||
+
|
|
||||||
doc
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
format
|
|
||||||
+
|
|
||||||
parser
|
|
||||||
+
|
|
||||||
printer
|
|
||||||
+
|
|
||||||
image
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
draw
|
|
||||||
gif
|
|
||||||
jpeg
|
|
||||||
png
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
internal
|
|
||||||
trace
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
xcoff
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
io
|
|
||||||
+
|
|
||||||
mime
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
multipart
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
net
|
|
||||||
http
|
|
||||||
+
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
os
|
|
||||||
+
|
|
||||||
path
|
|
||||||
filepath
|
|
||||||
+
|
|
||||||
regexp
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
runtime
|
|
||||||
textflag.h
|
|
||||||
strconv
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
text
|
|
||||||
template
|
|
||||||
testdata
|
|
||||||
+
|
|
||||||
lib
|
|
||||||
time
|
|
||||||
zoneinfo.zip
|
|
||||||
|
|
||||||
test
|
|
||||||
+
|
|
||||||
|
|
@ -30,6 +30,12 @@
|
||||||
global.fs = require("fs");
|
global.fs = require("fs");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const enosys = () => {
|
||||||
|
const err = new Error("not implemented");
|
||||||
|
err.code = "ENOSYS";
|
||||||
|
return err;
|
||||||
|
};
|
||||||
|
|
||||||
if (!global.fs) {
|
if (!global.fs) {
|
||||||
let outputBuf = "";
|
let outputBuf = "";
|
||||||
global.fs = {
|
global.fs = {
|
||||||
|
|
@ -45,27 +51,53 @@
|
||||||
},
|
},
|
||||||
write(fd, buf, offset, length, position, callback) {
|
write(fd, buf, offset, length, position, callback) {
|
||||||
if (offset !== 0 || length !== buf.length || position !== null) {
|
if (offset !== 0 || length !== buf.length || position !== null) {
|
||||||
throw new Error("not implemented");
|
callback(enosys());
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
const n = this.writeSync(fd, buf);
|
const n = this.writeSync(fd, buf);
|
||||||
callback(null, n);
|
callback(null, n);
|
||||||
},
|
},
|
||||||
open(path, flags, mode, callback) {
|
chmod(path, mode, callback) { callback(enosys()); },
|
||||||
const err = new Error("not implemented");
|
chown(path, uid, gid, callback) { callback(enosys()); },
|
||||||
err.code = "ENOSYS";
|
close(fd, callback) { callback(enosys()); },
|
||||||
callback(err);
|
fchmod(fd, mode, callback) { callback(enosys()); },
|
||||||
},
|
fchown(fd, uid, gid, callback) { callback(enosys()); },
|
||||||
read(fd, buffer, offset, length, position, callback) {
|
fstat(fd, callback) { callback(enosys()); },
|
||||||
const err = new Error("not implemented");
|
fsync(fd, callback) { callback(null); },
|
||||||
err.code = "ENOSYS";
|
ftruncate(fd, length, callback) { callback(enosys()); },
|
||||||
callback(err);
|
lchown(path, uid, gid, callback) { callback(enosys()); },
|
||||||
},
|
link(path, link, callback) { callback(enosys()); },
|
||||||
fsync(fd, callback) {
|
lstat(path, callback) { callback(enosys()); },
|
||||||
callback(null);
|
mkdir(path, perm, callback) { callback(enosys()); },
|
||||||
},
|
open(path, flags, mode, callback) { callback(enosys()); },
|
||||||
|
read(fd, buffer, offset, length, position, callback) { callback(enosys()); },
|
||||||
|
readdir(path, callback) { callback(enosys()); },
|
||||||
|
readlink(path, callback) { callback(enosys()); },
|
||||||
|
rename(from, to, callback) { callback(enosys()); },
|
||||||
|
rmdir(path, callback) { callback(enosys()); },
|
||||||
|
stat(path, callback) { callback(enosys()); },
|
||||||
|
symlink(path, link, callback) { callback(enosys()); },
|
||||||
|
truncate(path, length, callback) { callback(enosys()); },
|
||||||
|
unlink(path, callback) { callback(enosys()); },
|
||||||
|
utimes(path, atime, mtime, callback) { callback(enosys()); },
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!global.process) {
|
||||||
|
global.process = {
|
||||||
|
getuid() { return -1; },
|
||||||
|
getgid() { return -1; },
|
||||||
|
geteuid() { return -1; },
|
||||||
|
getegid() { return -1; },
|
||||||
|
getgroups() { throw enosys(); },
|
||||||
|
pid: -1,
|
||||||
|
ppid: -1,
|
||||||
|
umask() { throw enosys(); },
|
||||||
|
cwd() { throw enosys(); },
|
||||||
|
chdir() { throw enosys(); },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!global.crypto) {
|
if (!global.crypto) {
|
||||||
const nodeCrypto = require("crypto");
|
const nodeCrypto = require("crypto");
|
||||||
global.crypto = {
|
global.crypto = {
|
||||||
|
|
|
||||||
|
|
@ -432,6 +432,7 @@ func (b *Reader) ReadBytes(delim byte) ([]byte, error) {
|
||||||
var frag []byte
|
var frag []byte
|
||||||
var full [][]byte
|
var full [][]byte
|
||||||
var err error
|
var err error
|
||||||
|
n := 0
|
||||||
for {
|
for {
|
||||||
var e error
|
var e error
|
||||||
frag, e = b.ReadSlice(delim)
|
frag, e = b.ReadSlice(delim)
|
||||||
|
|
@ -447,18 +448,15 @@ func (b *Reader) ReadBytes(delim byte) ([]byte, error) {
|
||||||
buf := make([]byte, len(frag))
|
buf := make([]byte, len(frag))
|
||||||
copy(buf, frag)
|
copy(buf, frag)
|
||||||
full = append(full, buf)
|
full = append(full, buf)
|
||||||
|
n += len(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocate new buffer to hold the full pieces and the fragment.
|
|
||||||
n := 0
|
|
||||||
for i := range full {
|
|
||||||
n += len(full[i])
|
|
||||||
}
|
|
||||||
n += len(frag)
|
n += len(frag)
|
||||||
|
|
||||||
// Copy full pieces and fragment in.
|
// Allocate new buffer to hold the full pieces and the fragment.
|
||||||
buf := make([]byte, n)
|
buf := make([]byte, n)
|
||||||
n = 0
|
n = 0
|
||||||
|
// Copy full pieces and fragment in.
|
||||||
for i := range full {
|
for i := range full {
|
||||||
n += copy(buf[n:], full[i])
|
n += copy(buf[n:], full[i])
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -45,17 +45,17 @@ selectedtargets() {
|
||||||
gettargets | egrep -v 'android-arm|darwin-arm' | egrep "$pattern"
|
gettargets | egrep -v 'android-arm|darwin-arm' | egrep "$pattern"
|
||||||
}
|
}
|
||||||
|
|
||||||
# put linux, nacl first in the target list to get all the architectures up front.
|
# put linux first in the target list to get all the architectures up front.
|
||||||
linux_nacl_targets() {
|
linux_targets() {
|
||||||
selectedtargets | egrep 'linux|nacl' | sort
|
selectedtargets | grep 'linux' | sort
|
||||||
}
|
}
|
||||||
|
|
||||||
non_linux_nacl_targets() {
|
non_linux_targets() {
|
||||||
selectedtargets | egrep -v 'linux|nacl' | sort
|
selectedtargets | grep -v 'linux' | sort
|
||||||
}
|
}
|
||||||
|
|
||||||
# Note words in $targets are separated by both newlines and spaces.
|
# Note words in $targets are separated by both newlines and spaces.
|
||||||
targets="$(linux_nacl_targets) $(non_linux_nacl_targets)"
|
targets="$(linux_targets) $(non_linux_targets)"
|
||||||
|
|
||||||
failed=false
|
failed=false
|
||||||
for target in $targets
|
for target in $targets
|
||||||
|
|
|
||||||
|
|
@ -120,6 +120,39 @@ func TestCompareBytes(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestEndianBaseCompare(t *testing.T) {
|
||||||
|
// This test compares byte slices that are almost identical, except one
|
||||||
|
// difference that for some j, a[j]>b[j] and a[j+1]<b[j+1]. If the implementation
|
||||||
|
// compares large chunks with wrong endianness, it gets wrong result.
|
||||||
|
// no vector register is larger than 512 bytes for now
|
||||||
|
const maxLength = 512
|
||||||
|
a := make([]byte, maxLength)
|
||||||
|
b := make([]byte, maxLength)
|
||||||
|
// randomish but deterministic data. No 0 or 255.
|
||||||
|
for i := 0; i < maxLength; i++ {
|
||||||
|
a[i] = byte(1 + 31*i%254)
|
||||||
|
b[i] = byte(1 + 31*i%254)
|
||||||
|
}
|
||||||
|
for i := 2; i <= maxLength; i <<= 1 {
|
||||||
|
for j := 0; j < i-1; j++ {
|
||||||
|
a[j] = b[j] - 1
|
||||||
|
a[j+1] = b[j+1] + 1
|
||||||
|
cmp := Compare(a[:i], b[:i])
|
||||||
|
if cmp != -1 {
|
||||||
|
t.Errorf(`CompareBbigger(%d,%d) = %d`, i, j, cmp)
|
||||||
|
}
|
||||||
|
a[j] = b[j] + 1
|
||||||
|
a[j+1] = b[j+1] - 1
|
||||||
|
cmp = Compare(a[:i], b[:i])
|
||||||
|
if cmp != 1 {
|
||||||
|
t.Errorf(`CompareAbigger(%d,%d) = %d`, i, j, cmp)
|
||||||
|
}
|
||||||
|
a[j] = b[j]
|
||||||
|
a[j+1] = b[j+1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func BenchmarkCompareBytesEqual(b *testing.B) {
|
func BenchmarkCompareBytesEqual(b *testing.B) {
|
||||||
b1 := []byte("Hello Gophers!")
|
b1 := []byte("Hello Gophers!")
|
||||||
b2 := []byte("Hello Gophers!")
|
b2 := []byte("Hello Gophers!")
|
||||||
|
|
|
||||||
|
|
@ -56,8 +56,6 @@ func Set(GOARCH string) *Arch {
|
||||||
return archX86(&x86.Link386)
|
return archX86(&x86.Link386)
|
||||||
case "amd64":
|
case "amd64":
|
||||||
return archX86(&x86.Linkamd64)
|
return archX86(&x86.Linkamd64)
|
||||||
case "amd64p32":
|
|
||||||
return archX86(&x86.Linkamd64p32)
|
|
||||||
case "arm":
|
case "arm":
|
||||||
return archArm()
|
return archArm()
|
||||||
case "arm64":
|
case "arm64":
|
||||||
|
|
|
||||||
|
|
@ -30,6 +30,8 @@ func jumpS390x(word string) bool {
|
||||||
"BR",
|
"BR",
|
||||||
"BVC",
|
"BVC",
|
||||||
"BVS",
|
"BVS",
|
||||||
|
"BRCT",
|
||||||
|
"BRCTG",
|
||||||
"CMPBEQ",
|
"CMPBEQ",
|
||||||
"CMPBGE",
|
"CMPBGE",
|
||||||
"CMPBGT",
|
"CMPBGT",
|
||||||
|
|
|
||||||
2
src/cmd/asm/internal/asm/testdata/arm64.s
vendored
2
src/cmd/asm/internal/asm/testdata/arm64.s
vendored
|
|
@ -1221,7 +1221,7 @@ again:
|
||||||
MRS DBGCLAIMSET_EL1, R7 // c77830d5
|
MRS DBGCLAIMSET_EL1, R7 // c77830d5
|
||||||
MSR R13, DBGCLAIMSET_EL1 // cd7810d5
|
MSR R13, DBGCLAIMSET_EL1 // cd7810d5
|
||||||
MRS DBGDTRRX_EL0, R0 // 000533d5
|
MRS DBGDTRRX_EL0, R0 // 000533d5
|
||||||
MSR R29, DBGDTRRX_EL0 // 1d0513d5
|
MSR R29, DBGDTRTX_EL0 // 1d0513d5
|
||||||
MRS DBGDTR_EL0, R27 // 1b0433d5
|
MRS DBGDTR_EL0, R27 // 1b0433d5
|
||||||
MSR R30, DBGDTR_EL0 // 1e0413d5
|
MSR R30, DBGDTR_EL0 // 1e0413d5
|
||||||
MRS DBGPRCR_EL1, R4 // 841430d5
|
MRS DBGPRCR_EL1, R4 // 841430d5
|
||||||
|
|
|
||||||
104
src/cmd/asm/internal/asm/testdata/arm64error.s
vendored
104
src/cmd/asm/internal/asm/testdata/arm64error.s
vendored
|
|
@ -232,4 +232,108 @@ TEXT errors(SB),$0
|
||||||
STXPW (R5, R7), (R6), RSP // ERROR "illegal destination register"
|
STXPW (R5, R7), (R6), RSP // ERROR "illegal destination register"
|
||||||
STLXP (R5, R7), (R6), RSP // ERROR "illegal destination register"
|
STLXP (R5, R7), (R6), RSP // ERROR "illegal destination register"
|
||||||
STLXP (R5, R7), (R6), RSP // ERROR "illegal destination register"
|
STLXP (R5, R7), (R6), RSP // ERROR "illegal destination register"
|
||||||
|
MSR OSLAR_EL1, R5 // ERROR "illegal combination"
|
||||||
|
MRS R11, AIDR_EL1 // ERROR "illegal combination"
|
||||||
|
MSR R6, AIDR_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, AMCFGR_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, AMCGCR_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, AMEVTYPER00_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, AMEVTYPER01_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, AMEVTYPER02_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, AMEVTYPER03_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, AMEVTYPER04_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, AMEVTYPER05_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, AMEVTYPER06_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, AMEVTYPER07_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, AMEVTYPER08_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, AMEVTYPER09_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, AMEVTYPER010_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, AMEVTYPER011_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, AMEVTYPER012_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, AMEVTYPER013_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, AMEVTYPER014_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, AMEVTYPER015_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, CCSIDR2_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, CCSIDR_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, CLIDR_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, CNTPCT_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, CNTVCT_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, CTR_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, CurrentEL // ERROR "system register is not writable"
|
||||||
|
MSR R6, DBGAUTHSTATUS_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, DBGDTRRX_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, DCZID_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ERRIDR_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ERXFR_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ERXPFGF_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, GMID_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ICC_HPPIR0_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ICC_HPPIR1_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ICC_IAR0_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ICC_IAR1_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ICC_RPR_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ICV_HPPIR0_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ICV_HPPIR1_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ICV_IAR0_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ICV_IAR1_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ICV_RPR_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_AA64AFR0_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_AA64AFR1_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_AA64DFR0_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_AA64DFR1_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_AA64ISAR0_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_AA64ISAR1_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_AA64MMFR0_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_AA64MMFR1_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_AA64MMFR2_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_AA64PFR0_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_AA64PFR1_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_AA64ZFR0_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_AFR0_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_DFR0_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_ISAR0_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_ISAR1_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_ISAR2_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_ISAR3_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_ISAR4_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_ISAR5_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_ISAR6_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_MMFR0_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_MMFR1_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_MMFR2_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_MMFR3_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_MMFR4_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_PFR0_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_PFR1_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ID_PFR2_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, ISR_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, LORID_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, MDCCSR_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, MDRAR_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, MIDR_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, MPAMIDR_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, MPIDR_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, MVFR0_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, MVFR1_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, MVFR2_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, OSLSR_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, PMBIDR_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, PMCEID0_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, PMCEID1_EL0 // ERROR "system register is not writable"
|
||||||
|
MSR R6, PMMIR_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, PMSIDR_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, REVIDR_EL1 // ERROR "system register is not writable"
|
||||||
|
MSR R6, RNDR // ERROR "system register is not writable"
|
||||||
|
MRS DBGDTRTX_EL0, R5 // ERROR "system register is not readable"
|
||||||
|
MRS ICV_DIR_EL1, R5 // ERROR "system register is not readable"
|
||||||
|
MRS ICC_SGI1R_EL1, R5 // ERROR "system register is not readable"
|
||||||
|
MRS ICC_SGI0R_EL1, R5 // ERROR "system register is not readable"
|
||||||
|
MRS ICC_EOIR1_EL1, R5 // ERROR "system register is not readable"
|
||||||
|
MRS ICC_EOIR0_EL1, R5 // ERROR "system register is not readable"
|
||||||
|
MRS ICC_DIR_EL1, R5 // ERROR "system register is not readable"
|
||||||
|
MRS ICC_ASGI1R_EL1, R5 // ERROR "system register is not readable"
|
||||||
|
MRS ICV_EOIR0_EL1, R3 // ERROR "system register is not readable"
|
||||||
|
MRS ICV_EOIR1_EL1, R3 // ERROR "system register is not readable"
|
||||||
|
MRS PMSWINC_EL0, R3 // ERROR "system register is not readable"
|
||||||
|
MRS OSLAR_EL1, R3 // ERROR "system register is not readable"
|
||||||
RET
|
RET
|
||||||
|
|
|
||||||
9
src/cmd/asm/internal/asm/testdata/s390x.s
vendored
9
src/cmd/asm/internal/asm/testdata/s390x.s
vendored
|
|
@ -266,6 +266,9 @@ TEXT main·foo(SB),DUPOK|NOSPLIT,$16-0 // TEXT main.foo(SB), DUPOK|NOSPLIT, $16-
|
||||||
BLTU 0(PC) // a7540000
|
BLTU 0(PC) // a7540000
|
||||||
BLEU 0(PC) // a7d40000
|
BLEU 0(PC) // a7d40000
|
||||||
|
|
||||||
|
BRCT R1, 0(PC) // a7160000
|
||||||
|
BRCTG R2, 0(PC) // a7270000
|
||||||
|
|
||||||
CMPBNE R1, R2, 0(PC) // ec1200007064
|
CMPBNE R1, R2, 0(PC) // ec1200007064
|
||||||
CMPBEQ R3, R4, 0(PC) // ec3400008064
|
CMPBEQ R3, R4, 0(PC) // ec3400008064
|
||||||
CMPBLT R5, R6, 0(PC) // ec5600004064
|
CMPBLT R5, R6, 0(PC) // ec5600004064
|
||||||
|
|
@ -435,6 +438,12 @@ TEXT main·foo(SB),DUPOK|NOSPLIT,$16-0 // TEXT main.foo(SB), DUPOK|NOSPLIT, $16-
|
||||||
VMSLEG V21, V22, V23, V24 // e78563807fb8
|
VMSLEG V21, V22, V23, V24 // e78563807fb8
|
||||||
VMSLOG V21, V22, V23, V24 // e78563407fb8
|
VMSLOG V21, V22, V23, V24 // e78563407fb8
|
||||||
VMSLEOG V21, V22, V23, V24 // e78563c07fb8
|
VMSLEOG V21, V22, V23, V24 // e78563c07fb8
|
||||||
|
VSUMGH V1, V2, V3 // e73120001065
|
||||||
|
VSUMGF V16, V17, V18 // e72010002e65
|
||||||
|
VSUMQF V4, V5, V6 // e76450002067
|
||||||
|
VSUMQG V19, V20, V21 // e75340003e67
|
||||||
|
VSUMB V7, V8, V9 // e79780000064
|
||||||
|
VSUMH V22, V23, V24 // e78670001e64
|
||||||
|
|
||||||
RET
|
RET
|
||||||
RET foo(SB)
|
RET foo(SB)
|
||||||
|
|
|
||||||
|
|
@ -2189,6 +2189,11 @@ func (c *typeConv) FinishType(pos token.Pos) {
|
||||||
// Type returns a *Type with the same memory layout as
|
// Type returns a *Type with the same memory layout as
|
||||||
// dtype when used as the type of a variable or a struct field.
|
// dtype when used as the type of a variable or a struct field.
|
||||||
func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type {
|
func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type {
|
||||||
|
return c.loadType(dtype, pos, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadType recursively loads the requested dtype and its dependency graph.
|
||||||
|
func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Type {
|
||||||
// Always recompute bad pointer typedefs, as the set of such
|
// Always recompute bad pointer typedefs, as the set of such
|
||||||
// typedefs changes as we see more types.
|
// typedefs changes as we see more types.
|
||||||
checkCache := true
|
checkCache := true
|
||||||
|
|
@ -2196,7 +2201,9 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type {
|
||||||
checkCache = false
|
checkCache = false
|
||||||
}
|
}
|
||||||
|
|
||||||
key := dtype.String()
|
// The cache key should be relative to its parent.
|
||||||
|
// See issue https://golang.org/issue/31891
|
||||||
|
key := parent + " > " + dtype.String()
|
||||||
|
|
||||||
if checkCache {
|
if checkCache {
|
||||||
if t, ok := c.m[key]; ok {
|
if t, ok := c.m[key]; ok {
|
||||||
|
|
@ -2236,7 +2243,7 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type {
|
||||||
// Translate to zero-length array instead.
|
// Translate to zero-length array instead.
|
||||||
count = 0
|
count = 0
|
||||||
}
|
}
|
||||||
sub := c.Type(dt.Type, pos)
|
sub := c.loadType(dt.Type, pos, key)
|
||||||
t.Align = sub.Align
|
t.Align = sub.Align
|
||||||
t.Go = &ast.ArrayType{
|
t.Go = &ast.ArrayType{
|
||||||
Len: c.intExpr(count),
|
Len: c.intExpr(count),
|
||||||
|
|
@ -2381,7 +2388,7 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type {
|
||||||
c.ptrs[key] = append(c.ptrs[key], t)
|
c.ptrs[key] = append(c.ptrs[key], t)
|
||||||
|
|
||||||
case *dwarf.QualType:
|
case *dwarf.QualType:
|
||||||
t1 := c.Type(dt.Type, pos)
|
t1 := c.loadType(dt.Type, pos, key)
|
||||||
t.Size = t1.Size
|
t.Size = t1.Size
|
||||||
t.Align = t1.Align
|
t.Align = t1.Align
|
||||||
t.Go = t1.Go
|
t.Go = t1.Go
|
||||||
|
|
@ -2465,7 +2472,7 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type {
|
||||||
}
|
}
|
||||||
name := c.Ident("_Ctype_" + dt.Name)
|
name := c.Ident("_Ctype_" + dt.Name)
|
||||||
goIdent[name.Name] = name
|
goIdent[name.Name] = name
|
||||||
sub := c.Type(dt.Type, pos)
|
sub := c.loadType(dt.Type, pos, key)
|
||||||
if c.badPointerTypedef(dt) {
|
if c.badPointerTypedef(dt) {
|
||||||
// Treat this typedef as a uintptr.
|
// Treat this typedef as a uintptr.
|
||||||
s := *sub
|
s := *sub
|
||||||
|
|
|
||||||
|
|
@ -160,6 +160,7 @@ var knownFormats = map[string]string{
|
||||||
"int64 %v": "",
|
"int64 %v": "",
|
||||||
"int64 %x": "",
|
"int64 %x": "",
|
||||||
"int8 %d": "",
|
"int8 %d": "",
|
||||||
|
"int8 %v": "",
|
||||||
"int8 %x": "",
|
"int8 %x": "",
|
||||||
"interface{} %#v": "",
|
"interface{} %#v": "",
|
||||||
"interface{} %T": "",
|
"interface{} %T": "",
|
||||||
|
|
@ -195,6 +196,7 @@ var knownFormats = map[string]string{
|
||||||
"uint32 %v": "",
|
"uint32 %v": "",
|
||||||
"uint32 %x": "",
|
"uint32 %x": "",
|
||||||
"uint64 %08x": "",
|
"uint64 %08x": "",
|
||||||
|
"uint64 %b": "",
|
||||||
"uint64 %d": "",
|
"uint64 %d": "",
|
||||||
"uint64 %x": "",
|
"uint64 %x": "",
|
||||||
"uint8 %d": "",
|
"uint8 %d": "",
|
||||||
|
|
|
||||||
|
|
@ -7,17 +7,12 @@ package amd64
|
||||||
import (
|
import (
|
||||||
"cmd/compile/internal/gc"
|
"cmd/compile/internal/gc"
|
||||||
"cmd/internal/obj/x86"
|
"cmd/internal/obj/x86"
|
||||||
"cmd/internal/objabi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var leaptr = x86.ALEAQ
|
var leaptr = x86.ALEAQ
|
||||||
|
|
||||||
func Init(arch *gc.Arch) {
|
func Init(arch *gc.Arch) {
|
||||||
arch.LinkArch = &x86.Linkamd64
|
arch.LinkArch = &x86.Linkamd64
|
||||||
if objabi.GOARCH == "amd64p32" {
|
|
||||||
arch.LinkArch = &x86.Linkamd64p32
|
|
||||||
leaptr = x86.ALEAL
|
|
||||||
}
|
|
||||||
arch.REGSP = x86.REGSP
|
arch.REGSP = x86.REGSP
|
||||||
arch.MAXWIDTH = 1 << 50
|
arch.MAXWIDTH = 1 << 50
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -94,7 +94,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr
|
||||||
if cnt%16 != 0 {
|
if cnt%16 != 0 {
|
||||||
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
|
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
|
||||||
}
|
}
|
||||||
} else if !gc.Nacl && !isPlan9 && (cnt <= int64(128*gc.Widthreg)) {
|
} else if !isPlan9 && (cnt <= int64(128*gc.Widthreg)) {
|
||||||
if *state&x0 == 0 {
|
if *state&x0 == 0 {
|
||||||
p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
|
p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
|
||||||
*state |= x0
|
*state |= x0
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog
|
||||||
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
|
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
|
||||||
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
|
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
|
||||||
}
|
}
|
||||||
} else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) {
|
} else if cnt <= int64(128*gc.Widthptr) {
|
||||||
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
|
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
|
||||||
p.Reg = arm.REGSP
|
p.Reg = arm.REGSP
|
||||||
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
|
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
|
||||||
|
|
|
||||||
|
|
@ -7,8 +7,6 @@ package gc
|
||||||
import (
|
import (
|
||||||
"cmd/compile/internal/types"
|
"cmd/compile/internal/types"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func escapes(all []*Node) {
|
func escapes(all []*Node) {
|
||||||
|
|
@ -36,32 +34,11 @@ func max8(a, b int8) int8 {
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
// Escape constants are numbered in order of increasing "escapiness"
|
|
||||||
// to help make inferences be monotonic. With the exception of
|
|
||||||
// EscNever which is sticky, eX < eY means that eY is more exposed
|
|
||||||
// than eX, and hence replaces it in a conservative analysis.
|
|
||||||
const (
|
const (
|
||||||
EscUnknown = iota
|
EscUnknown = iota
|
||||||
EscNone // Does not escape to heap, result, or parameters.
|
EscNone // Does not escape to heap, result, or parameters.
|
||||||
EscReturn // Is returned or reachable from returned.
|
EscHeap // Reachable from the heap
|
||||||
EscHeap // Reachable from the heap
|
EscNever // By construction will not escape.
|
||||||
EscNever // By construction will not escape.
|
|
||||||
EscBits = 3
|
|
||||||
EscMask = (1 << EscBits) - 1
|
|
||||||
EscContentEscapes = 1 << EscBits // value obtained by indirect of parameter escapes to heap
|
|
||||||
EscReturnBits = EscBits + 1
|
|
||||||
// Node.esc encoding = | escapeReturnEncoding:(width-4) | contentEscapes:1 | escEnum:3
|
|
||||||
)
|
|
||||||
|
|
||||||
// For each input parameter to a function, the escapeReturnEncoding describes
|
|
||||||
// how the parameter may leak to the function's outputs. This is currently the
|
|
||||||
// "level" of the leak where level is 0 or larger (negative level means stored into
|
|
||||||
// something whose address is returned -- but that implies stored into the heap,
|
|
||||||
// hence EscHeap, which means that the details are not currently relevant. )
|
|
||||||
const (
|
|
||||||
bitsPerOutputInTag = 3 // For each output, the number of bits for a tag
|
|
||||||
bitsMaskForTag = uint16(1<<bitsPerOutputInTag) - 1 // The bit mask to extract a single tag.
|
|
||||||
maxEncodedLevel = int(bitsMaskForTag - 1) // The largest level that can be stored in a tag.
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way.
|
// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way.
|
||||||
|
|
@ -200,49 +177,6 @@ func mustHeapAlloc(n *Node) bool {
|
||||||
n.Op == OMAKESLICE && !isSmallMakeSlice(n))
|
n.Op == OMAKESLICE && !isSmallMakeSlice(n))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Common case for escapes is 16 bits 000000000xxxEEEE
|
|
||||||
// where commonest cases for xxx encoding in-to-out pointer
|
|
||||||
// flow are 000, 001, 010, 011 and EEEE is computed Esc bits.
|
|
||||||
// Note width of xxx depends on value of constant
|
|
||||||
// bitsPerOutputInTag -- expect 2 or 3, so in practice the
|
|
||||||
// tag cache array is 64 or 128 long. Some entries will
|
|
||||||
// never be populated.
|
|
||||||
var tags [1 << (bitsPerOutputInTag + EscReturnBits)]string
|
|
||||||
|
|
||||||
// mktag returns the string representation for an escape analysis tag.
|
|
||||||
func mktag(mask int) string {
|
|
||||||
switch mask & EscMask {
|
|
||||||
case EscHeap:
|
|
||||||
return ""
|
|
||||||
case EscNone, EscReturn:
|
|
||||||
default:
|
|
||||||
Fatalf("escape mktag")
|
|
||||||
}
|
|
||||||
|
|
||||||
if mask < len(tags) && tags[mask] != "" {
|
|
||||||
return tags[mask]
|
|
||||||
}
|
|
||||||
|
|
||||||
s := fmt.Sprintf("esc:0x%x", mask)
|
|
||||||
if mask < len(tags) {
|
|
||||||
tags[mask] = s
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// parsetag decodes an escape analysis tag and returns the esc value.
|
|
||||||
func parsetag(note string) uint16 {
|
|
||||||
if !strings.HasPrefix(note, "esc:") {
|
|
||||||
return EscUnknown
|
|
||||||
}
|
|
||||||
n, _ := strconv.ParseInt(note[4:], 0, 0)
|
|
||||||
em := uint16(n)
|
|
||||||
if em == 0 {
|
|
||||||
return EscNone
|
|
||||||
}
|
|
||||||
return em
|
|
||||||
}
|
|
||||||
|
|
||||||
// addrescapes tags node n as having had its address taken
|
// addrescapes tags node n as having had its address taken
|
||||||
// by "increasing" the "value" of n.Esc to EscHeap.
|
// by "increasing" the "value" of n.Esc to EscHeap.
|
||||||
// Storage is allocated as necessary to allow the address
|
// Storage is allocated as necessary to allow the address
|
||||||
|
|
@ -431,19 +365,22 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var esc EscLeaks
|
||||||
|
|
||||||
// External functions are assumed unsafe, unless
|
// External functions are assumed unsafe, unless
|
||||||
// //go:noescape is given before the declaration.
|
// //go:noescape is given before the declaration.
|
||||||
if fn.Noescape() {
|
if fn.Func.Pragma&Noescape != 0 {
|
||||||
if Debug['m'] != 0 && f.Sym != nil {
|
if Debug['m'] != 0 && f.Sym != nil {
|
||||||
Warnl(f.Pos, "%v does not escape", name())
|
Warnl(f.Pos, "%v does not escape", name())
|
||||||
}
|
}
|
||||||
return mktag(EscNone)
|
} else {
|
||||||
|
if Debug['m'] != 0 && f.Sym != nil {
|
||||||
|
Warnl(f.Pos, "leaking param: %v", name())
|
||||||
|
}
|
||||||
|
esc.AddHeap(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
if Debug['m'] != 0 && f.Sym != nil {
|
return esc.Encode()
|
||||||
Warnl(f.Pos, "leaking param: %v", name())
|
|
||||||
}
|
|
||||||
return mktag(EscHeap)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if fn.Func.Pragma&UintptrEscapes != 0 {
|
if fn.Func.Pragma&UintptrEscapes != 0 {
|
||||||
|
|
@ -468,30 +405,34 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
|
||||||
|
|
||||||
// Unnamed parameters are unused and therefore do not escape.
|
// Unnamed parameters are unused and therefore do not escape.
|
||||||
if f.Sym == nil || f.Sym.IsBlank() {
|
if f.Sym == nil || f.Sym.IsBlank() {
|
||||||
return mktag(EscNone)
|
var esc EscLeaks
|
||||||
|
return esc.Encode()
|
||||||
}
|
}
|
||||||
|
|
||||||
n := asNode(f.Nname)
|
n := asNode(f.Nname)
|
||||||
loc := e.oldLoc(n)
|
loc := e.oldLoc(n)
|
||||||
esc := finalizeEsc(loc.paramEsc)
|
esc := loc.paramEsc
|
||||||
|
esc.Optimize()
|
||||||
|
|
||||||
if Debug['m'] != 0 && !loc.escapes {
|
if Debug['m'] != 0 && !loc.escapes {
|
||||||
if esc == EscNone {
|
if esc.Empty() {
|
||||||
Warnl(f.Pos, "%v does not escape", name())
|
Warnl(f.Pos, "%v does not escape", name())
|
||||||
} else if esc == EscHeap {
|
}
|
||||||
Warnl(f.Pos, "leaking param: %v", name())
|
if x := esc.Heap(); x >= 0 {
|
||||||
} else {
|
if x == 0 {
|
||||||
if esc&EscContentEscapes != 0 {
|
Warnl(f.Pos, "leaking param: %v", name())
|
||||||
|
} else {
|
||||||
|
// TODO(mdempsky): Mention level=x like below?
|
||||||
Warnl(f.Pos, "leaking param content: %v", name())
|
Warnl(f.Pos, "leaking param content: %v", name())
|
||||||
}
|
}
|
||||||
for i := 0; i < numEscReturns; i++ {
|
}
|
||||||
if x := getEscReturn(esc, i); x >= 0 {
|
for i := 0; i < numEscResults; i++ {
|
||||||
res := fn.Type.Results().Field(i).Sym
|
if x := esc.Result(i); x >= 0 {
|
||||||
Warnl(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
|
res := fn.Type.Results().Field(i).Sym
|
||||||
}
|
Warnl(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return mktag(int(esc))
|
return esc.Encode()
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,8 @@ package gc
|
||||||
import (
|
import (
|
||||||
"cmd/compile/internal/types"
|
"cmd/compile/internal/types"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Escape analysis.
|
// Escape analysis.
|
||||||
|
|
@ -119,9 +121,8 @@ type EscLocation struct {
|
||||||
// its storage can be immediately reused.
|
// its storage can be immediately reused.
|
||||||
transient bool
|
transient bool
|
||||||
|
|
||||||
// paramEsc records the represented parameter's escape tags.
|
// paramEsc records the represented parameter's leak set.
|
||||||
// See "Parameter tags" below for details.
|
paramEsc EscLeaks
|
||||||
paramEsc uint16
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// An EscEdge represents an assignment edge between two Go variables.
|
// An EscEdge represents an assignment edge between two Go variables.
|
||||||
|
|
@ -170,11 +171,7 @@ func (e *Escape) initFunc(fn *Node) {
|
||||||
// Allocate locations for local variables.
|
// Allocate locations for local variables.
|
||||||
for _, dcl := range fn.Func.Dcl {
|
for _, dcl := range fn.Func.Dcl {
|
||||||
if dcl.Op == ONAME {
|
if dcl.Op == ONAME {
|
||||||
loc := e.newLoc(dcl, false)
|
e.newLoc(dcl, false)
|
||||||
|
|
||||||
if dcl.Class() == PPARAM && fn.Nbody.Len() == 0 && !fn.Noescape() {
|
|
||||||
loc.paramEsc = EscHeap
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -892,20 +889,16 @@ func (e *Escape) tagHole(ks []EscHole, param *types.Field, static bool) EscHole
|
||||||
return e.heapHole()
|
return e.heapHole()
|
||||||
}
|
}
|
||||||
|
|
||||||
esc := parsetag(param.Note)
|
|
||||||
switch esc {
|
|
||||||
case EscHeap, EscUnknown:
|
|
||||||
return e.heapHole()
|
|
||||||
}
|
|
||||||
|
|
||||||
var tagKs []EscHole
|
var tagKs []EscHole
|
||||||
if esc&EscContentEscapes != 0 {
|
|
||||||
tagKs = append(tagKs, e.heapHole().shift(1))
|
esc := ParseLeaks(param.Note)
|
||||||
|
if x := esc.Heap(); x >= 0 {
|
||||||
|
tagKs = append(tagKs, e.heapHole().shift(x))
|
||||||
}
|
}
|
||||||
|
|
||||||
if ks != nil {
|
if ks != nil {
|
||||||
for i := 0; i < numEscReturns; i++ {
|
for i := 0; i < numEscResults; i++ {
|
||||||
if x := getEscReturn(esc, i); x >= 0 {
|
if x := esc.Result(i); x >= 0 {
|
||||||
tagKs = append(tagKs, ks[i].shift(x))
|
tagKs = append(tagKs, ks[i].shift(x))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1247,31 +1240,20 @@ func containsClosure(f, c *Node) bool {
|
||||||
|
|
||||||
// leak records that parameter l leaks to sink.
|
// leak records that parameter l leaks to sink.
|
||||||
func (l *EscLocation) leakTo(sink *EscLocation, derefs int) {
|
func (l *EscLocation) leakTo(sink *EscLocation, derefs int) {
|
||||||
// Short circuit if l already leaks to heap.
|
|
||||||
if l.paramEsc == EscHeap {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// If sink is a result parameter and we can fit return bits
|
// If sink is a result parameter and we can fit return bits
|
||||||
// into the escape analysis tag, then record a return leak.
|
// into the escape analysis tag, then record a return leak.
|
||||||
if sink.isName(PPARAMOUT) && sink.curfn == l.curfn {
|
if sink.isName(PPARAMOUT) && sink.curfn == l.curfn {
|
||||||
// TODO(mdempsky): Eliminate dependency on Vargen here.
|
// TODO(mdempsky): Eliminate dependency on Vargen here.
|
||||||
ri := int(sink.n.Name.Vargen) - 1
|
ri := int(sink.n.Name.Vargen) - 1
|
||||||
if ri < numEscReturns {
|
if ri < numEscResults {
|
||||||
// Leak to result parameter.
|
// Leak to result parameter.
|
||||||
if old := getEscReturn(l.paramEsc, ri); old < 0 || derefs < old {
|
l.paramEsc.AddResult(ri, derefs)
|
||||||
l.paramEsc = setEscReturn(l.paramEsc, ri, derefs)
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise, record as heap leak.
|
// Otherwise, record as heap leak.
|
||||||
if derefs > 0 {
|
l.paramEsc.AddHeap(derefs)
|
||||||
l.paramEsc |= EscContentEscapes
|
|
||||||
} else {
|
|
||||||
l.paramEsc = EscHeap
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Escape) finish(fns []*Node) {
|
func (e *Escape) finish(fns []*Node) {
|
||||||
|
|
@ -1311,7 +1293,7 @@ func (e *Escape) finish(fns []*Node) {
|
||||||
}
|
}
|
||||||
n.Esc = EscNone
|
n.Esc = EscNone
|
||||||
if loc.transient {
|
if loc.transient {
|
||||||
n.SetNoescape(true)
|
n.SetTransient(true)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1321,73 +1303,97 @@ func (l *EscLocation) isName(c Class) bool {
|
||||||
return l.n != nil && l.n.Op == ONAME && l.n.Class() == c
|
return l.n != nil && l.n.Op == ONAME && l.n.Class() == c
|
||||||
}
|
}
|
||||||
|
|
||||||
func finalizeEsc(esc uint16) uint16 {
|
const numEscResults = 7
|
||||||
esc = optimizeReturns(esc)
|
|
||||||
|
|
||||||
if esc>>EscReturnBits != 0 {
|
// An EscLeaks represents a set of assignment flows from a parameter
|
||||||
esc |= EscReturn
|
// to the heap or to any of its function's (first numEscResults)
|
||||||
} else if esc&EscMask == 0 {
|
// result parameters.
|
||||||
esc |= EscNone
|
type EscLeaks [1 + numEscResults]uint8
|
||||||
|
|
||||||
|
// Empty reports whether l is an empty set (i.e., no assignment flows).
|
||||||
|
func (l EscLeaks) Empty() bool { return l == EscLeaks{} }
|
||||||
|
|
||||||
|
// Heap returns the minimum deref count of any assignment flow from l
|
||||||
|
// to the heap. If no such flows exist, Heap returns -1.
|
||||||
|
func (l EscLeaks) Heap() int { return l.get(0) }
|
||||||
|
|
||||||
|
// Result returns the minimum deref count of any assignment flow from
|
||||||
|
// l to its function's i'th result parameter. If no such flows exist,
|
||||||
|
// Result returns -1.
|
||||||
|
func (l EscLeaks) Result(i int) int { return l.get(1 + i) }
|
||||||
|
|
||||||
|
// AddHeap adds an assignment flow from l to the heap.
|
||||||
|
func (l *EscLeaks) AddHeap(derefs int) { l.add(0, derefs) }
|
||||||
|
|
||||||
|
// AddResult adds an assignment flow from l to its function's i'th
|
||||||
|
// result parameter.
|
||||||
|
func (l *EscLeaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
|
||||||
|
|
||||||
|
func (l *EscLeaks) setResult(i, derefs int) { l.set(1+i, derefs) }
|
||||||
|
|
||||||
|
func (l EscLeaks) get(i int) int { return int(l[i]) - 1 }
|
||||||
|
|
||||||
|
func (l *EscLeaks) add(i, derefs int) {
|
||||||
|
if old := l.get(i); old < 0 || derefs < old {
|
||||||
|
l.set(i, derefs)
|
||||||
}
|
}
|
||||||
|
|
||||||
return esc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func optimizeReturns(esc uint16) uint16 {
|
func (l *EscLeaks) set(i, derefs int) {
|
||||||
if esc&EscContentEscapes != 0 {
|
v := derefs + 1
|
||||||
// EscContentEscapes represents a path of length 1
|
if v < 0 {
|
||||||
// from the heap. No point in keeping paths of equal
|
Fatalf("invalid derefs count: %v", derefs)
|
||||||
// or longer length to result parameters.
|
}
|
||||||
for i := 0; i < numEscReturns; i++ {
|
if v > math.MaxUint8 {
|
||||||
if x := getEscReturn(esc, i); x >= 1 {
|
v = math.MaxUint8
|
||||||
esc = setEscReturn(esc, i, -1)
|
}
|
||||||
|
|
||||||
|
l[i] = uint8(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optimize removes result flow paths that are equal in length or
|
||||||
|
// longer than the shortest heap flow path.
|
||||||
|
func (l *EscLeaks) Optimize() {
|
||||||
|
// If we have a path to the heap, then there's no use in
|
||||||
|
// keeping equal or longer paths elsewhere.
|
||||||
|
if x := l.Heap(); x >= 0 {
|
||||||
|
for i := 0; i < numEscResults; i++ {
|
||||||
|
if l.Result(i) >= x {
|
||||||
|
l.setResult(i, -1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return esc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parameter tags.
|
var leakTagCache = map[EscLeaks]string{}
|
||||||
//
|
|
||||||
// The escape bits saved for each analyzed parameter record the
|
|
||||||
// minimal derefs (if any) from that parameter to the heap, or to any
|
|
||||||
// of its function's (first numEscReturns) result parameters.
|
|
||||||
//
|
|
||||||
// Paths to the heap are encoded via EscHeap (length 0) or
|
|
||||||
// EscContentEscapes (length 1); if neither of these are set, then
|
|
||||||
// there's no path to the heap.
|
|
||||||
//
|
|
||||||
// Paths to the result parameters are encoded in the upper
|
|
||||||
// bits.
|
|
||||||
//
|
|
||||||
// There are other values stored in the escape bits by esc.go for
|
|
||||||
// vestigial reasons, and other special tag values used (e.g.,
|
|
||||||
// uintptrEscapesTag and unsafeUintptrTag). These could be simplified
|
|
||||||
// once compatibility with esc.go is no longer a concern.
|
|
||||||
|
|
||||||
const numEscReturns = (16 - EscReturnBits) / bitsPerOutputInTag
|
// Encode converts l into a binary string for export data.
|
||||||
|
func (l EscLeaks) Encode() string {
|
||||||
func getEscReturn(esc uint16, i int) int {
|
if l.Heap() == 0 {
|
||||||
return int((esc>>escReturnShift(i))&bitsMaskForTag) - 1
|
// Space optimization: empty string encodes more
|
||||||
}
|
// efficiently in export data.
|
||||||
|
return ""
|
||||||
func setEscReturn(esc uint16, i, v int) uint16 {
|
|
||||||
if v < -1 {
|
|
||||||
Fatalf("invalid esc return value: %v", v)
|
|
||||||
}
|
}
|
||||||
if v > maxEncodedLevel {
|
if s, ok := leakTagCache[l]; ok {
|
||||||
v = maxEncodedLevel
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
shift := escReturnShift(i)
|
n := len(l)
|
||||||
esc &^= bitsMaskForTag << shift
|
for n > 0 && l[n-1] == 0 {
|
||||||
esc |= uint16(v+1) << shift
|
n--
|
||||||
return esc
|
}
|
||||||
|
s := "esc:" + string(l[:n])
|
||||||
|
leakTagCache[l] = s
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func escReturnShift(i int) uint {
|
// ParseLeaks parses a binary string representing an EscLeaks.
|
||||||
if uint(i) >= numEscReturns {
|
func ParseLeaks(s string) EscLeaks {
|
||||||
Fatalf("esc return index out of bounds: %v", i)
|
var l EscLeaks
|
||||||
|
if !strings.HasPrefix(s, "esc:") {
|
||||||
|
l.AddHeap(0)
|
||||||
|
return l
|
||||||
}
|
}
|
||||||
return uint(EscReturnBits + i*bitsPerOutputInTag)
|
copy(l[:], s[4:])
|
||||||
|
return l
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@ package gc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"cmd/compile/internal/types"
|
"cmd/compile/internal/types"
|
||||||
|
"cmd/internal/src"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
@ -425,7 +426,14 @@ func (n *Node) jconv(s fmt.State, flag FmtFlag) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if n.Pos.IsKnown() {
|
if n.Pos.IsKnown() {
|
||||||
fmt.Fprintf(s, " l(%d)", n.Pos.Line())
|
pfx := ""
|
||||||
|
switch n.Pos.IsStmt() {
|
||||||
|
case src.PosNotStmt:
|
||||||
|
pfx = "_" // "-" would be confusing
|
||||||
|
case src.PosIsStmt:
|
||||||
|
pfx = "+"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(s, " l(%s%d)", pfx, n.Pos.Line())
|
||||||
}
|
}
|
||||||
|
|
||||||
if c == 0 && n.Xoffset != BADWIDTH {
|
if c == 0 && n.Xoffset != BADWIDTH {
|
||||||
|
|
|
||||||
|
|
@ -29,6 +29,12 @@ var (
|
||||||
// s := []byte("...") allocating [n]byte on the stack
|
// s := []byte("...") allocating [n]byte on the stack
|
||||||
// Note: the flag smallframes can update this value.
|
// Note: the flag smallframes can update this value.
|
||||||
maxImplicitStackVarSize = int64(64 * 1024)
|
maxImplicitStackVarSize = int64(64 * 1024)
|
||||||
|
|
||||||
|
// smallArrayBytes is the maximum size of an array which is considered small.
|
||||||
|
// Small arrays will be initialized directly with a sequence of constant stores.
|
||||||
|
// Large arrays will be initialized by copying from a static temp.
|
||||||
|
// 256 bytes was chosen to minimize generated code + statictmp size.
|
||||||
|
smallArrayBytes = int64(256)
|
||||||
)
|
)
|
||||||
|
|
||||||
// isRuntimePkg reports whether p is package runtime.
|
// isRuntimePkg reports whether p is package runtime.
|
||||||
|
|
@ -241,8 +247,6 @@ var Ctxt *obj.Link
|
||||||
|
|
||||||
var writearchive bool
|
var writearchive bool
|
||||||
|
|
||||||
var Nacl bool
|
|
||||||
|
|
||||||
var nodfp *Node
|
var nodfp *Node
|
||||||
|
|
||||||
var disable_checknil int
|
var disable_checknil int
|
||||||
|
|
|
||||||
|
|
@ -174,7 +174,7 @@ func TestIntendedInlining(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
switch runtime.GOARCH {
|
switch runtime.GOARCH {
|
||||||
case "nacl", "386", "wasm", "arm":
|
case "386", "wasm", "arm":
|
||||||
default:
|
default:
|
||||||
// TODO(mvdan): As explained in /test/inline_sync.go, some
|
// TODO(mvdan): As explained in /test/inline_sync.go, some
|
||||||
// architectures don't have atomic intrinsics, so these go over
|
// architectures don't have atomic intrinsics, so these go over
|
||||||
|
|
|
||||||
|
|
@ -187,7 +187,6 @@ func Main(archInit func(*Arch)) {
|
||||||
// pseudo-package used for methods with anonymous receivers
|
// pseudo-package used for methods with anonymous receivers
|
||||||
gopkg = types.NewPkg("go", "")
|
gopkg = types.NewPkg("go", "")
|
||||||
|
|
||||||
Nacl = objabi.GOOS == "nacl"
|
|
||||||
Wasm := objabi.GOARCH == "wasm"
|
Wasm := objabi.GOARCH == "wasm"
|
||||||
|
|
||||||
// Whether the limit for stack-allocated objects is much smaller than normal.
|
// Whether the limit for stack-allocated objects is much smaller than normal.
|
||||||
|
|
|
||||||
|
|
@ -495,7 +495,6 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node {
|
||||||
|
|
||||||
pragma := fun.Pragma
|
pragma := fun.Pragma
|
||||||
f.Func.Pragma = fun.Pragma
|
f.Func.Pragma = fun.Pragma
|
||||||
f.SetNoescape(pragma&Noescape != 0)
|
|
||||||
if pragma&Systemstack != 0 && pragma&Nosplit != 0 {
|
if pragma&Systemstack != 0 && pragma&Nosplit != 0 {
|
||||||
yyerrorl(f.Pos, "go:nosplit and go:systemstack cannot be combined")
|
yyerrorl(f.Pos, "go:nosplit and go:systemstack cannot be combined")
|
||||||
}
|
}
|
||||||
|
|
@ -507,7 +506,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node {
|
||||||
p.funcBody(f, fun.Body)
|
p.funcBody(f, fun.Body)
|
||||||
|
|
||||||
if fun.Body != nil {
|
if fun.Body != nil {
|
||||||
if f.Noescape() {
|
if f.Func.Pragma&Noescape != 0 {
|
||||||
yyerrorl(f.Pos, "can only use //go:noescape with external func implementations")
|
yyerrorl(f.Pos, "can only use //go:noescape with external func implementations")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
||||||
|
|
@ -1174,7 +1174,7 @@ func (o *Order) expr(n, lhs *Node) *Node {
|
||||||
}
|
}
|
||||||
|
|
||||||
case OCLOSURE:
|
case OCLOSURE:
|
||||||
if n.Noescape() && n.Func.Closure.Func.Cvars.Len() > 0 {
|
if n.Transient() && n.Func.Closure.Func.Cvars.Len() > 0 {
|
||||||
prealloc[n] = o.newTemp(closureType(n), false)
|
prealloc[n] = o.newTemp(closureType(n), false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1183,7 +1183,7 @@ func (o *Order) expr(n, lhs *Node) *Node {
|
||||||
n.Right = o.expr(n.Right, nil)
|
n.Right = o.expr(n.Right, nil)
|
||||||
o.exprList(n.List)
|
o.exprList(n.List)
|
||||||
o.exprList(n.Rlist)
|
o.exprList(n.Rlist)
|
||||||
if n.Noescape() {
|
if n.Transient() {
|
||||||
var t *types.Type
|
var t *types.Type
|
||||||
switch n.Op {
|
switch n.Op {
|
||||||
case OSLICELIT:
|
case OSLICELIT:
|
||||||
|
|
@ -1195,7 +1195,7 @@ func (o *Order) expr(n, lhs *Node) *Node {
|
||||||
}
|
}
|
||||||
|
|
||||||
case ODDDARG:
|
case ODDDARG:
|
||||||
if n.Noescape() {
|
if n.Transient() {
|
||||||
// The ddd argument does not live beyond the call it is created for.
|
// The ddd argument does not live beyond the call it is created for.
|
||||||
// Allocate a temporary that will be cleaned up when this statement
|
// Allocate a temporary that will be cleaned up when this statement
|
||||||
// completes. We could be more aggressive and try to arrange for it
|
// completes. We could be more aggressive and try to arrange for it
|
||||||
|
|
|
||||||
|
|
@ -582,6 +582,16 @@ func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isSmallSliceLit(n *Node) bool {
|
||||||
|
if n.Op != OSLICELIT {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
r := n.Right
|
||||||
|
|
||||||
|
return smallintconst(r) && (n.Type.Elem().Width == 0 || r.Int64() <= smallArrayBytes/n.Type.Elem().Width)
|
||||||
|
}
|
||||||
|
|
||||||
func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
|
func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
|
||||||
// make an array type corresponding the number of elements we have
|
// make an array type corresponding the number of elements we have
|
||||||
t := types.NewArray(n.Type.Elem(), n.Right.Int64())
|
t := types.NewArray(n.Type.Elem(), n.Right.Int64())
|
||||||
|
|
@ -639,7 +649,7 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
|
||||||
var vstat *Node
|
var vstat *Node
|
||||||
|
|
||||||
mode := getdyn(n, true)
|
mode := getdyn(n, true)
|
||||||
if mode&initConst != 0 {
|
if mode&initConst != 0 && !isSmallSliceLit(n) {
|
||||||
vstat = staticname(t)
|
vstat = staticname(t)
|
||||||
if ctxt == inInitFunction {
|
if ctxt == inInitFunction {
|
||||||
vstat.Name.SetReadonly(true)
|
vstat.Name.SetReadonly(true)
|
||||||
|
|
|
||||||
|
|
@ -2,8 +2,6 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// +build !nacl
|
|
||||||
|
|
||||||
package gc
|
package gc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
||||||
|
|
@ -513,6 +513,7 @@ func walkTypeSwitch(sw *Node) {
|
||||||
// Use a similar strategy for non-empty interfaces.
|
// Use a similar strategy for non-empty interfaces.
|
||||||
ifNil := nod(OIF, nil, nil)
|
ifNil := nod(OIF, nil, nil)
|
||||||
ifNil.Left = nod(OEQ, itab, nodnil())
|
ifNil.Left = nod(OEQ, itab, nodnil())
|
||||||
|
lineno = lineno.WithNotStmt() // disable statement marks after the first check.
|
||||||
ifNil.Left = typecheck(ifNil.Left, ctxExpr)
|
ifNil.Left = typecheck(ifNil.Left, ctxExpr)
|
||||||
ifNil.Left = defaultlit(ifNil.Left, nil)
|
ifNil.Left = defaultlit(ifNil.Left, nil)
|
||||||
// ifNil.Nbody assigned at end.
|
// ifNil.Nbody assigned at end.
|
||||||
|
|
@ -587,20 +588,10 @@ func walkTypeSwitch(sw *Node) {
|
||||||
if defaultGoto == nil {
|
if defaultGoto == nil {
|
||||||
defaultGoto = br
|
defaultGoto = br
|
||||||
}
|
}
|
||||||
|
if nilGoto == nil {
|
||||||
if nilGoto != nil {
|
nilGoto = defaultGoto
|
||||||
ifNil.Nbody.Set1(nilGoto)
|
|
||||||
} else {
|
|
||||||
// TODO(mdempsky): Just use defaultGoto directly.
|
|
||||||
|
|
||||||
// Jump to default case.
|
|
||||||
label := autolabel(".s")
|
|
||||||
ifNil.Nbody.Set1(nodSym(OGOTO, nil, label))
|
|
||||||
// Wrap default case with label.
|
|
||||||
blk := nod(OBLOCK, nil, nil)
|
|
||||||
blk.List.Set2(nodSym(OLABEL, nil, label), defaultGoto)
|
|
||||||
defaultGoto = blk
|
|
||||||
}
|
}
|
||||||
|
ifNil.Nbody.Set1(nilGoto)
|
||||||
|
|
||||||
s.Emit(&sw.Nbody)
|
s.Emit(&sw.Nbody)
|
||||||
sw.Nbody.Append(defaultGoto)
|
sw.Nbody.Append(defaultGoto)
|
||||||
|
|
@ -725,6 +716,7 @@ func binarySearch(n int, out *Nodes, less func(i int) *Node, base func(i int, ni
|
||||||
for i := lo; i < hi; i++ {
|
for i := lo; i < hi; i++ {
|
||||||
nif := nod(OIF, nil, nil)
|
nif := nod(OIF, nil, nil)
|
||||||
base(i, nif)
|
base(i, nif)
|
||||||
|
lineno = lineno.WithNotStmt()
|
||||||
nif.Left = typecheck(nif.Left, ctxExpr)
|
nif.Left = typecheck(nif.Left, ctxExpr)
|
||||||
nif.Left = defaultlit(nif.Left, nil)
|
nif.Left = defaultlit(nif.Left, nil)
|
||||||
out.Append(nif)
|
out.Append(nif)
|
||||||
|
|
@ -736,6 +728,7 @@ func binarySearch(n int, out *Nodes, less func(i int) *Node, base func(i int, ni
|
||||||
half := lo + n/2
|
half := lo + n/2
|
||||||
nif := nod(OIF, nil, nil)
|
nif := nod(OIF, nil, nil)
|
||||||
nif.Left = less(half)
|
nif.Left = less(half)
|
||||||
|
lineno = lineno.WithNotStmt()
|
||||||
nif.Left = typecheck(nif.Left, ctxExpr)
|
nif.Left = typecheck(nif.Left, ctxExpr)
|
||||||
nif.Left = defaultlit(nif.Left, nil)
|
nif.Left = defaultlit(nif.Left, nil)
|
||||||
do(lo, half, &nif.Nbody)
|
do(lo, half, &nif.Nbody)
|
||||||
|
|
|
||||||
|
|
@ -151,7 +151,7 @@ const (
|
||||||
_, nodeDiag // already printed error about this
|
_, nodeDiag // already printed error about this
|
||||||
_, nodeColas // OAS resulting from :=
|
_, nodeColas // OAS resulting from :=
|
||||||
_, nodeNonNil // guaranteed to be non-nil
|
_, nodeNonNil // guaranteed to be non-nil
|
||||||
_, nodeNoescape // func arguments do not escape; TODO(rsc): move Noescape to Func struct (see CL 7360)
|
_, nodeTransient // storage can be reused immediately after this statement
|
||||||
_, nodeBounded // bounds check unnecessary
|
_, nodeBounded // bounds check unnecessary
|
||||||
_, nodeAddable // addressable
|
_, nodeAddable // addressable
|
||||||
_, nodeHasCall // expression contains a function call
|
_, nodeHasCall // expression contains a function call
|
||||||
|
|
@ -179,7 +179,7 @@ func (n *Node) IsDDD() bool { return n.flags&nodeIsDDD != 0 }
|
||||||
func (n *Node) Diag() bool { return n.flags&nodeDiag != 0 }
|
func (n *Node) Diag() bool { return n.flags&nodeDiag != 0 }
|
||||||
func (n *Node) Colas() bool { return n.flags&nodeColas != 0 }
|
func (n *Node) Colas() bool { return n.flags&nodeColas != 0 }
|
||||||
func (n *Node) NonNil() bool { return n.flags&nodeNonNil != 0 }
|
func (n *Node) NonNil() bool { return n.flags&nodeNonNil != 0 }
|
||||||
func (n *Node) Noescape() bool { return n.flags&nodeNoescape != 0 }
|
func (n *Node) Transient() bool { return n.flags&nodeTransient != 0 }
|
||||||
func (n *Node) Bounded() bool { return n.flags&nodeBounded != 0 }
|
func (n *Node) Bounded() bool { return n.flags&nodeBounded != 0 }
|
||||||
func (n *Node) Addable() bool { return n.flags&nodeAddable != 0 }
|
func (n *Node) Addable() bool { return n.flags&nodeAddable != 0 }
|
||||||
func (n *Node) HasCall() bool { return n.flags&nodeHasCall != 0 }
|
func (n *Node) HasCall() bool { return n.flags&nodeHasCall != 0 }
|
||||||
|
|
@ -206,7 +206,7 @@ func (n *Node) SetIsDDD(b bool) { n.flags.set(nodeIsDDD, b) }
|
||||||
func (n *Node) SetDiag(b bool) { n.flags.set(nodeDiag, b) }
|
func (n *Node) SetDiag(b bool) { n.flags.set(nodeDiag, b) }
|
||||||
func (n *Node) SetColas(b bool) { n.flags.set(nodeColas, b) }
|
func (n *Node) SetColas(b bool) { n.flags.set(nodeColas, b) }
|
||||||
func (n *Node) SetNonNil(b bool) { n.flags.set(nodeNonNil, b) }
|
func (n *Node) SetNonNil(b bool) { n.flags.set(nodeNonNil, b) }
|
||||||
func (n *Node) SetNoescape(b bool) { n.flags.set(nodeNoescape, b) }
|
func (n *Node) SetTransient(b bool) { n.flags.set(nodeTransient, b) }
|
||||||
func (n *Node) SetBounded(b bool) { n.flags.set(nodeBounded, b) }
|
func (n *Node) SetBounded(b bool) { n.flags.set(nodeBounded, b) }
|
||||||
func (n *Node) SetAddable(b bool) { n.flags.set(nodeAddable, b) }
|
func (n *Node) SetAddable(b bool) { n.flags.set(nodeAddable, b) }
|
||||||
func (n *Node) SetHasCall(b bool) { n.flags.set(nodeHasCall, b) }
|
func (n *Node) SetHasCall(b bool) { n.flags.set(nodeHasCall, b) }
|
||||||
|
|
|
||||||
|
|
@ -855,13 +855,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
|
||||||
// for sizes >= 64 generate a loop as follows:
|
// for sizes >= 64 generate a loop as follows:
|
||||||
|
|
||||||
// set up loop counter in CTR, used by BC
|
// set up loop counter in CTR, used by BC
|
||||||
|
// XXLXOR VS32,VS32,VS32
|
||||||
// MOVD len/32,REG_TMP
|
// MOVD len/32,REG_TMP
|
||||||
// MOVD REG_TMP,CTR
|
// MOVD REG_TMP,CTR
|
||||||
|
// MOVD $16,REG_TMP
|
||||||
// loop:
|
// loop:
|
||||||
// MOVD R0,(R3)
|
// STXVD2X VS32,(R0)(R3)
|
||||||
// MOVD R0,8(R3)
|
// STXVD2X VS32,(R31)(R3)
|
||||||
// MOVD R0,16(R3)
|
|
||||||
// MOVD R0,24(R3)
|
|
||||||
// ADD $32,R3
|
// ADD $32,R3
|
||||||
// BC 16, 0, loop
|
// BC 16, 0, loop
|
||||||
//
|
//
|
||||||
|
|
@ -895,8 +895,16 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
|
||||||
// only generate a loop if there is more
|
// only generate a loop if there is more
|
||||||
// than 1 iteration.
|
// than 1 iteration.
|
||||||
if ctr > 1 {
|
if ctr > 1 {
|
||||||
|
// Set up VS32 (V0) to hold 0s
|
||||||
|
p := s.Prog(ppc64.AXXLXOR)
|
||||||
|
p.From.Type = obj.TYPE_REG
|
||||||
|
p.From.Reg = ppc64.REG_VS32
|
||||||
|
p.To.Type = obj.TYPE_REG
|
||||||
|
p.To.Reg = ppc64.REG_VS32
|
||||||
|
p.Reg = ppc64.REG_VS32
|
||||||
|
|
||||||
// Set up CTR loop counter
|
// Set up CTR loop counter
|
||||||
p := s.Prog(ppc64.AMOVD)
|
p = s.Prog(ppc64.AMOVD)
|
||||||
p.From.Type = obj.TYPE_CONST
|
p.From.Type = obj.TYPE_CONST
|
||||||
p.From.Offset = ctr
|
p.From.Offset = ctr
|
||||||
p.To.Type = obj.TYPE_REG
|
p.To.Type = obj.TYPE_REG
|
||||||
|
|
@ -908,23 +916,35 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
|
||||||
p.To.Type = obj.TYPE_REG
|
p.To.Type = obj.TYPE_REG
|
||||||
p.To.Reg = ppc64.REG_CTR
|
p.To.Reg = ppc64.REG_CTR
|
||||||
|
|
||||||
// generate 4 MOVDs
|
// Set up R31 to hold index value 16
|
||||||
|
p = s.Prog(ppc64.AMOVD)
|
||||||
|
p.From.Type = obj.TYPE_CONST
|
||||||
|
p.From.Offset = 16
|
||||||
|
p.To.Type = obj.TYPE_REG
|
||||||
|
p.To.Reg = ppc64.REGTMP
|
||||||
|
|
||||||
|
// generate 2 STXVD2Xs to store 16 bytes
|
||||||
// when this is a loop then the top must be saved
|
// when this is a loop then the top must be saved
|
||||||
var top *obj.Prog
|
var top *obj.Prog
|
||||||
for offset := int64(0); offset < 32; offset += 8 {
|
// This is the top of loop
|
||||||
// This is the top of loop
|
p = s.Prog(ppc64.ASTXVD2X)
|
||||||
p := s.Prog(ppc64.AMOVD)
|
p.From.Type = obj.TYPE_REG
|
||||||
p.From.Type = obj.TYPE_REG
|
p.From.Reg = ppc64.REG_VS32
|
||||||
p.From.Reg = ppc64.REG_R0
|
p.To.Type = obj.TYPE_MEM
|
||||||
p.To.Type = obj.TYPE_MEM
|
p.To.Reg = v.Args[0].Reg()
|
||||||
p.To.Reg = v.Args[0].Reg()
|
p.To.Index = ppc64.REGZERO
|
||||||
p.To.Offset = offset
|
// Save the top of loop
|
||||||
// Save the top of loop
|
if top == nil {
|
||||||
if top == nil {
|
top = p
|
||||||
top = p
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
p = s.Prog(ppc64.ASTXVD2X)
|
||||||
|
p.From.Type = obj.TYPE_REG
|
||||||
|
p.From.Reg = ppc64.REG_VS32
|
||||||
|
p.To.Type = obj.TYPE_MEM
|
||||||
|
p.To.Reg = v.Args[0].Reg()
|
||||||
|
p.To.Index = ppc64.REGTMP
|
||||||
|
|
||||||
// Increment address for the
|
// Increment address for the
|
||||||
// 4 doublewords just zeroed.
|
// 4 doublewords just zeroed.
|
||||||
p = s.Prog(ppc64.AADD)
|
p = s.Prog(ppc64.AADD)
|
||||||
|
|
@ -994,30 +1014,27 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
|
||||||
// When moving >= 64 bytes a loop is used
|
// When moving >= 64 bytes a loop is used
|
||||||
// MOVD len/32,REG_TMP
|
// MOVD len/32,REG_TMP
|
||||||
// MOVD REG_TMP,CTR
|
// MOVD REG_TMP,CTR
|
||||||
|
// MOVD $16,REG_TMP
|
||||||
// top:
|
// top:
|
||||||
// MOVD (R4),R7
|
// LXVD2X (R0)(R4),VS32
|
||||||
// MOVD 8(R4),R8
|
// LXVD2X (R31)(R4),VS33
|
||||||
// MOVD 16(R4),R9
|
// ADD $32,R4
|
||||||
// MOVD 24(R4),R10
|
// STXVD2X VS32,(R0)(R3)
|
||||||
// ADD R4,$32
|
// STXVD2X VS33,(R31)(R4)
|
||||||
// MOVD R7,(R3)
|
// ADD $32,R3
|
||||||
// MOVD R8,8(R3)
|
|
||||||
// MOVD R9,16(R3)
|
|
||||||
// MOVD R10,24(R3)
|
|
||||||
// ADD R3,$32
|
|
||||||
// BC 16,0,top
|
// BC 16,0,top
|
||||||
// Bytes not moved by this loop are moved
|
// Bytes not moved by this loop are moved
|
||||||
// with a combination of the following instructions,
|
// with a combination of the following instructions,
|
||||||
// starting with the largest sizes and generating as
|
// starting with the largest sizes and generating as
|
||||||
// many as needed, using the appropriate offset value.
|
// many as needed, using the appropriate offset value.
|
||||||
// MOVD n(R4),R7
|
// MOVD n(R4),R14
|
||||||
// MOVD R7,n(R3)
|
// MOVD R14,n(R3)
|
||||||
// MOVW n1(R4),R7
|
// MOVW n1(R4),R14
|
||||||
// MOVW R7,n1(R3)
|
// MOVW R14,n1(R3)
|
||||||
// MOVH n2(R4),R7
|
// MOVH n2(R4),R14
|
||||||
// MOVH R7,n2(R3)
|
// MOVH R14,n2(R3)
|
||||||
// MOVB n3(R4),R7
|
// MOVB n3(R4),R14
|
||||||
// MOVB R7,n3(R3)
|
// MOVB R14,n3(R3)
|
||||||
|
|
||||||
// Each loop iteration moves 32 bytes
|
// Each loop iteration moves 32 bytes
|
||||||
ctr := v.AuxInt / 32
|
ctr := v.AuxInt / 32
|
||||||
|
|
@ -1030,7 +1047,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
|
||||||
|
|
||||||
// The set of registers used here, must match the clobbered reg list
|
// The set of registers used here, must match the clobbered reg list
|
||||||
// in PPC64Ops.go.
|
// in PPC64Ops.go.
|
||||||
useregs := []int16{ppc64.REG_R7, ppc64.REG_R8, ppc64.REG_R9, ppc64.REG_R10}
|
|
||||||
offset := int64(0)
|
offset := int64(0)
|
||||||
|
|
||||||
// top of the loop
|
// top of the loop
|
||||||
|
|
@ -1050,22 +1066,35 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
|
||||||
p.To.Type = obj.TYPE_REG
|
p.To.Type = obj.TYPE_REG
|
||||||
p.To.Reg = ppc64.REG_CTR
|
p.To.Reg = ppc64.REG_CTR
|
||||||
|
|
||||||
// Generate all the MOVDs for loads
|
// Use REGTMP as index reg
|
||||||
// based off the same register, increasing
|
p = s.Prog(ppc64.AMOVD)
|
||||||
// the offset by 8 for each instruction
|
p.From.Type = obj.TYPE_CONST
|
||||||
for _, rg := range useregs {
|
p.From.Offset = 16
|
||||||
p := s.Prog(ppc64.AMOVD)
|
p.To.Type = obj.TYPE_REG
|
||||||
p.From.Type = obj.TYPE_MEM
|
p.To.Reg = ppc64.REGTMP
|
||||||
p.From.Reg = src_reg
|
|
||||||
p.From.Offset = offset
|
// Generate 16 byte loads and stores.
|
||||||
p.To.Type = obj.TYPE_REG
|
// Use temp register for index (16)
|
||||||
p.To.Reg = rg
|
// on the second one.
|
||||||
if top == nil {
|
p = s.Prog(ppc64.ALXVD2X)
|
||||||
top = p
|
p.From.Type = obj.TYPE_MEM
|
||||||
}
|
p.From.Reg = src_reg
|
||||||
offset += 8
|
p.From.Index = ppc64.REGZERO
|
||||||
|
p.To.Type = obj.TYPE_REG
|
||||||
|
p.To.Reg = ppc64.REG_VS32
|
||||||
|
|
||||||
|
if top == nil {
|
||||||
|
top = p
|
||||||
}
|
}
|
||||||
// increment the src_reg for next iteration
|
|
||||||
|
p = s.Prog(ppc64.ALXVD2X)
|
||||||
|
p.From.Type = obj.TYPE_MEM
|
||||||
|
p.From.Reg = src_reg
|
||||||
|
p.From.Index = ppc64.REGTMP
|
||||||
|
p.To.Type = obj.TYPE_REG
|
||||||
|
p.To.Reg = ppc64.REG_VS33
|
||||||
|
|
||||||
|
// increment the src reg for next iteration
|
||||||
p = s.Prog(ppc64.AADD)
|
p = s.Prog(ppc64.AADD)
|
||||||
p.Reg = src_reg
|
p.Reg = src_reg
|
||||||
p.From.Type = obj.TYPE_CONST
|
p.From.Type = obj.TYPE_CONST
|
||||||
|
|
@ -1073,20 +1102,22 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
|
||||||
p.To.Type = obj.TYPE_REG
|
p.To.Type = obj.TYPE_REG
|
||||||
p.To.Reg = src_reg
|
p.To.Reg = src_reg
|
||||||
|
|
||||||
// generate the MOVDs for stores, based
|
// generate 16 byte stores
|
||||||
// off the same register, using the same
|
p = s.Prog(ppc64.ASTXVD2X)
|
||||||
// offsets as in the loads.
|
p.From.Type = obj.TYPE_REG
|
||||||
offset = int64(0)
|
p.From.Reg = ppc64.REG_VS32
|
||||||
for _, rg := range useregs {
|
p.To.Type = obj.TYPE_MEM
|
||||||
p := s.Prog(ppc64.AMOVD)
|
p.To.Reg = dst_reg
|
||||||
p.From.Type = obj.TYPE_REG
|
p.To.Index = ppc64.REGZERO
|
||||||
p.From.Reg = rg
|
|
||||||
p.To.Type = obj.TYPE_MEM
|
p = s.Prog(ppc64.ASTXVD2X)
|
||||||
p.To.Reg = dst_reg
|
p.From.Type = obj.TYPE_REG
|
||||||
p.To.Offset = offset
|
p.From.Reg = ppc64.REG_VS33
|
||||||
offset += 8
|
p.To.Type = obj.TYPE_MEM
|
||||||
}
|
p.To.Reg = dst_reg
|
||||||
// increment the dst_reg for next iteration
|
p.To.Index = ppc64.REGTMP
|
||||||
|
|
||||||
|
// increment the dst reg for next iteration
|
||||||
p = s.Prog(ppc64.AADD)
|
p = s.Prog(ppc64.AADD)
|
||||||
p.Reg = dst_reg
|
p.Reg = dst_reg
|
||||||
p.From.Type = obj.TYPE_CONST
|
p.From.Type = obj.TYPE_CONST
|
||||||
|
|
@ -1114,6 +1145,57 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
|
||||||
rem += 32
|
rem += 32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if rem >= 16 {
|
||||||
|
// Generate 16 byte loads and stores.
|
||||||
|
// Use temp register for index (value 16)
|
||||||
|
// on the second one.
|
||||||
|
p := s.Prog(ppc64.ALXVD2X)
|
||||||
|
p.From.Type = obj.TYPE_MEM
|
||||||
|
p.From.Reg = src_reg
|
||||||
|
p.From.Index = ppc64.REGZERO
|
||||||
|
p.To.Type = obj.TYPE_REG
|
||||||
|
p.To.Reg = ppc64.REG_VS32
|
||||||
|
|
||||||
|
p = s.Prog(ppc64.ASTXVD2X)
|
||||||
|
p.From.Type = obj.TYPE_REG
|
||||||
|
p.From.Reg = ppc64.REG_VS32
|
||||||
|
p.To.Type = obj.TYPE_MEM
|
||||||
|
p.To.Reg = dst_reg
|
||||||
|
p.To.Index = ppc64.REGZERO
|
||||||
|
|
||||||
|
offset = 16
|
||||||
|
rem -= 16
|
||||||
|
|
||||||
|
if rem >= 16 {
|
||||||
|
// Use REGTMP as index reg
|
||||||
|
p = s.Prog(ppc64.AMOVD)
|
||||||
|
p.From.Type = obj.TYPE_CONST
|
||||||
|
p.From.Offset = 16
|
||||||
|
p.To.Type = obj.TYPE_REG
|
||||||
|
p.To.Reg = ppc64.REGTMP
|
||||||
|
|
||||||
|
// Generate 16 byte loads and stores.
|
||||||
|
// Use temp register for index (16)
|
||||||
|
// on the second one.
|
||||||
|
p = s.Prog(ppc64.ALXVD2X)
|
||||||
|
p.From.Type = obj.TYPE_MEM
|
||||||
|
p.From.Reg = src_reg
|
||||||
|
p.From.Index = ppc64.REGTMP
|
||||||
|
p.To.Type = obj.TYPE_REG
|
||||||
|
p.To.Reg = ppc64.REG_VS32
|
||||||
|
|
||||||
|
p = s.Prog(ppc64.ASTXVD2X)
|
||||||
|
p.From.Type = obj.TYPE_REG
|
||||||
|
p.From.Reg = ppc64.REG_VS32
|
||||||
|
p.To.Type = obj.TYPE_MEM
|
||||||
|
p.To.Reg = dst_reg
|
||||||
|
p.To.Index = ppc64.REGTMP
|
||||||
|
|
||||||
|
offset = 32
|
||||||
|
rem -= 16
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Generate all the remaining load and store pairs, starting with
|
// Generate all the remaining load and store pairs, starting with
|
||||||
// as many 8 byte moves as possible, then 4, 2, 1.
|
// as many 8 byte moves as possible, then 4, 2, 1.
|
||||||
for rem > 0 {
|
for rem > 0 {
|
||||||
|
|
@ -1129,7 +1211,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
|
||||||
// Load
|
// Load
|
||||||
p := s.Prog(op)
|
p := s.Prog(op)
|
||||||
p.To.Type = obj.TYPE_REG
|
p.To.Type = obj.TYPE_REG
|
||||||
p.To.Reg = ppc64.REG_R7
|
p.To.Reg = ppc64.REG_R14
|
||||||
p.From.Type = obj.TYPE_MEM
|
p.From.Type = obj.TYPE_MEM
|
||||||
p.From.Reg = src_reg
|
p.From.Reg = src_reg
|
||||||
p.From.Offset = offset
|
p.From.Offset = offset
|
||||||
|
|
@ -1137,7 +1219,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
|
||||||
// Store
|
// Store
|
||||||
p = s.Prog(op)
|
p = s.Prog(op)
|
||||||
p.From.Type = obj.TYPE_REG
|
p.From.Type = obj.TYPE_REG
|
||||||
p.From.Reg = ppc64.REG_R7
|
p.From.Reg = ppc64.REG_R14
|
||||||
p.To.Type = obj.TYPE_MEM
|
p.To.Type = obj.TYPE_MEM
|
||||||
p.To.Reg = dst_reg
|
p.To.Reg = dst_reg
|
||||||
p.To.Offset = offset
|
p.To.Offset = offset
|
||||||
|
|
|
||||||
|
|
@ -38,18 +38,14 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
|
||||||
|
|
||||||
// Generate a loop of large clears.
|
// Generate a loop of large clears.
|
||||||
if cnt > clearLoopCutoff {
|
if cnt > clearLoopCutoff {
|
||||||
n := cnt - (cnt % 256)
|
ireg := int16(s390x.REGRT2) // register holds number of remaining loop iterations
|
||||||
end := int16(s390x.REGRT2)
|
p = pp.Appendpp(p, s390x.AMOVD, obj.TYPE_CONST, 0, cnt/256, obj.TYPE_REG, ireg, 0)
|
||||||
p = pp.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, off+n, obj.TYPE_REG, end, 0)
|
|
||||||
p.Reg = reg
|
|
||||||
p = pp.Appendpp(p, s390x.ACLEAR, obj.TYPE_CONST, 0, 256, obj.TYPE_MEM, reg, off)
|
p = pp.Appendpp(p, s390x.ACLEAR, obj.TYPE_CONST, 0, 256, obj.TYPE_MEM, reg, off)
|
||||||
pl := p
|
pl := p
|
||||||
p = pp.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0)
|
p = pp.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0)
|
||||||
p = pp.Appendpp(p, s390x.ACMP, obj.TYPE_REG, reg, 0, obj.TYPE_REG, end, 0)
|
p = pp.Appendpp(p, s390x.ABRCTG, obj.TYPE_REG, ireg, 0, obj.TYPE_BRANCH, 0, 0)
|
||||||
p = pp.Appendpp(p, s390x.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
|
|
||||||
gc.Patch(p, pl)
|
gc.Patch(p, pl)
|
||||||
|
cnt = cnt % 256
|
||||||
cnt -= n
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate remaining clear instructions without a loop.
|
// Generate remaining clear instructions without a loop.
|
||||||
|
|
|
||||||
|
|
@ -814,7 +814,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func blockAsm(b *ssa.Block) obj.As {
|
||||||
|
switch b.Kind {
|
||||||
|
case ssa.BlockS390XBRC:
|
||||||
|
return s390x.ABRC
|
||||||
|
case ssa.BlockS390XCRJ:
|
||||||
|
return s390x.ACRJ
|
||||||
|
case ssa.BlockS390XCGRJ:
|
||||||
|
return s390x.ACGRJ
|
||||||
|
case ssa.BlockS390XCLRJ:
|
||||||
|
return s390x.ACLRJ
|
||||||
|
case ssa.BlockS390XCLGRJ:
|
||||||
|
return s390x.ACLGRJ
|
||||||
|
case ssa.BlockS390XCIJ:
|
||||||
|
return s390x.ACIJ
|
||||||
|
case ssa.BlockS390XCGIJ:
|
||||||
|
return s390x.ACGIJ
|
||||||
|
case ssa.BlockS390XCLIJ:
|
||||||
|
return s390x.ACLIJ
|
||||||
|
case ssa.BlockS390XCLGIJ:
|
||||||
|
return s390x.ACLGIJ
|
||||||
|
}
|
||||||
|
b.Fatalf("blockAsm not implemented: %s", b.LongString())
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
|
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
|
||||||
|
// Handle generic blocks first.
|
||||||
switch b.Kind {
|
switch b.Kind {
|
||||||
case ssa.BlockPlain:
|
case ssa.BlockPlain:
|
||||||
if b.Succs[0].Block() != next {
|
if b.Succs[0].Block() != next {
|
||||||
|
|
@ -822,47 +848,73 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
|
||||||
p.To.Type = obj.TYPE_BRANCH
|
p.To.Type = obj.TYPE_BRANCH
|
||||||
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
|
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
|
||||||
}
|
}
|
||||||
|
return
|
||||||
case ssa.BlockDefer:
|
case ssa.BlockDefer:
|
||||||
// defer returns in R3:
|
// defer returns in R3:
|
||||||
// 0 if we should continue executing
|
// 0 if we should continue executing
|
||||||
// 1 if we should jump to deferreturn call
|
// 1 if we should jump to deferreturn call
|
||||||
p := s.Prog(s390x.ACMPW)
|
p := s.Br(s390x.ACIJ, b.Succs[1].Block())
|
||||||
p.From.Type = obj.TYPE_REG
|
p.From.Type = obj.TYPE_CONST
|
||||||
p.From.Reg = s390x.REG_R3
|
p.From.Offset = int64(s390x.NotEqual & s390x.NotUnordered) // unordered is not possible
|
||||||
p.To.Type = obj.TYPE_CONST
|
p.Reg = s390x.REG_R3
|
||||||
p.To.Offset = 0
|
p.RestArgs = []obj.Addr{{Type: obj.TYPE_CONST, Offset: 0}}
|
||||||
p = s.Prog(s390x.ABNE)
|
|
||||||
p.To.Type = obj.TYPE_BRANCH
|
|
||||||
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
|
|
||||||
if b.Succs[0].Block() != next {
|
if b.Succs[0].Block() != next {
|
||||||
p := s.Prog(s390x.ABR)
|
s.Br(s390x.ABR, b.Succs[0].Block())
|
||||||
p.To.Type = obj.TYPE_BRANCH
|
|
||||||
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
|
|
||||||
}
|
}
|
||||||
|
return
|
||||||
case ssa.BlockExit:
|
case ssa.BlockExit:
|
||||||
|
return
|
||||||
case ssa.BlockRet:
|
case ssa.BlockRet:
|
||||||
s.Prog(obj.ARET)
|
s.Prog(obj.ARET)
|
||||||
|
return
|
||||||
case ssa.BlockRetJmp:
|
case ssa.BlockRetJmp:
|
||||||
p := s.Prog(s390x.ABR)
|
p := s.Prog(s390x.ABR)
|
||||||
p.To.Type = obj.TYPE_MEM
|
p.To.Type = obj.TYPE_MEM
|
||||||
p.To.Name = obj.NAME_EXTERN
|
p.To.Name = obj.NAME_EXTERN
|
||||||
p.To.Sym = b.Aux.(*obj.LSym)
|
p.To.Sym = b.Aux.(*obj.LSym)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle s390x-specific blocks. These blocks all have a
|
||||||
|
// condition code mask in the Aux value and 2 successors.
|
||||||
|
succs := [...]*ssa.Block{b.Succs[0].Block(), b.Succs[1].Block()}
|
||||||
|
mask := b.Aux.(s390x.CCMask)
|
||||||
|
|
||||||
|
// TODO: take into account Likely property for forward/backward
|
||||||
|
// branches. We currently can't do this because we don't know
|
||||||
|
// whether a block has already been emitted. In general forward
|
||||||
|
// branches are assumed 'not taken' and backward branches are
|
||||||
|
// assumed 'taken'.
|
||||||
|
if next == succs[0] {
|
||||||
|
succs[0], succs[1] = succs[1], succs[0]
|
||||||
|
mask = mask.Inverse()
|
||||||
|
}
|
||||||
|
|
||||||
|
p := s.Br(blockAsm(b), succs[0])
|
||||||
|
switch b.Kind {
|
||||||
case ssa.BlockS390XBRC:
|
case ssa.BlockS390XBRC:
|
||||||
succs := [...]*ssa.Block{b.Succs[0].Block(), b.Succs[1].Block()}
|
|
||||||
mask := b.Aux.(s390x.CCMask)
|
|
||||||
if next == succs[0] {
|
|
||||||
succs[0], succs[1] = succs[1], succs[0]
|
|
||||||
mask = mask.Inverse()
|
|
||||||
}
|
|
||||||
// TODO: take into account Likely property for forward/backward
|
|
||||||
// branches.
|
|
||||||
p := s.Br(s390x.ABRC, succs[0])
|
|
||||||
p.From.Type = obj.TYPE_CONST
|
p.From.Type = obj.TYPE_CONST
|
||||||
p.From.Offset = int64(mask)
|
p.From.Offset = int64(mask)
|
||||||
if next != succs[1] {
|
case ssa.BlockS390XCGRJ, ssa.BlockS390XCRJ,
|
||||||
s.Br(s390x.ABR, succs[1])
|
ssa.BlockS390XCLGRJ, ssa.BlockS390XCLRJ:
|
||||||
}
|
p.From.Type = obj.TYPE_CONST
|
||||||
|
p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible
|
||||||
|
p.Reg = b.Controls[0].Reg()
|
||||||
|
p.RestArgs = []obj.Addr{{Type: obj.TYPE_REG, Reg: b.Controls[1].Reg()}}
|
||||||
|
case ssa.BlockS390XCGIJ, ssa.BlockS390XCIJ:
|
||||||
|
p.From.Type = obj.TYPE_CONST
|
||||||
|
p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible
|
||||||
|
p.Reg = b.Controls[0].Reg()
|
||||||
|
p.RestArgs = []obj.Addr{{Type: obj.TYPE_CONST, Offset: int64(int8(b.AuxInt))}}
|
||||||
|
case ssa.BlockS390XCLGIJ, ssa.BlockS390XCLIJ:
|
||||||
|
p.From.Type = obj.TYPE_CONST
|
||||||
|
p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible
|
||||||
|
p.Reg = b.Controls[0].Reg()
|
||||||
|
p.RestArgs = []obj.Addr{{Type: obj.TYPE_CONST, Offset: int64(uint8(b.AuxInt))}}
|
||||||
default:
|
default:
|
||||||
b.Fatalf("branch not implemented: %s", b.LongString())
|
b.Fatalf("branch not implemented: %s", b.LongString())
|
||||||
}
|
}
|
||||||
|
if next != succs[1] {
|
||||||
|
s.Br(s390x.ABR, succs[1])
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,8 @@ type Block struct {
|
||||||
Controls [2]*Value
|
Controls [2]*Value
|
||||||
|
|
||||||
// Auxiliary info for the block. Its value depends on the Kind.
|
// Auxiliary info for the block. Its value depends on the Kind.
|
||||||
Aux interface{}
|
Aux interface{}
|
||||||
|
AuxInt int64
|
||||||
|
|
||||||
// The unordered set of Values that define the operation of this block.
|
// The unordered set of Values that define the operation of this block.
|
||||||
// After the scheduling pass, this list is ordered.
|
// After the scheduling pass, this list is ordered.
|
||||||
|
|
@ -118,7 +119,17 @@ func (b *Block) String() string {
|
||||||
func (b *Block) LongString() string {
|
func (b *Block) LongString() string {
|
||||||
s := b.Kind.String()
|
s := b.Kind.String()
|
||||||
if b.Aux != nil {
|
if b.Aux != nil {
|
||||||
s += fmt.Sprintf(" %s", b.Aux)
|
s += fmt.Sprintf(" {%s}", b.Aux)
|
||||||
|
}
|
||||||
|
if t := b.Kind.AuxIntType(); t != "" {
|
||||||
|
switch t {
|
||||||
|
case "Int8":
|
||||||
|
s += fmt.Sprintf(" [%v]", int8(b.AuxInt))
|
||||||
|
case "UInt8":
|
||||||
|
s += fmt.Sprintf(" [%v]", uint8(b.AuxInt))
|
||||||
|
default:
|
||||||
|
s += fmt.Sprintf(" [%v]", b.AuxInt)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
for _, c := range b.ControlValues() {
|
for _, c := range b.ControlValues() {
|
||||||
s += fmt.Sprintf(" %s", c)
|
s += fmt.Sprintf(" %s", c)
|
||||||
|
|
@ -211,6 +222,16 @@ func (b *Block) CopyControls(from *Block) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Reset sets the block to the provided kind and clears all the blocks control
|
||||||
|
// and auxilliary values. Other properties of the block, such as its successors,
|
||||||
|
// predecessors and values are left unmodified.
|
||||||
|
func (b *Block) Reset(kind BlockKind) {
|
||||||
|
b.Kind = kind
|
||||||
|
b.ResetControls()
|
||||||
|
b.Aux = nil
|
||||||
|
b.AuxInt = 0
|
||||||
|
}
|
||||||
|
|
||||||
// AddEdgeTo adds an edge from block b to block c. Used during building of the
|
// AddEdgeTo adds an edge from block b to block c. Used during building of the
|
||||||
// SSA graph; do not use on an already-completed SSA graph.
|
// SSA graph; do not use on an already-completed SSA graph.
|
||||||
func (b *Block) AddEdgeTo(c *Block) {
|
func (b *Block) AddEdgeTo(c *Block) {
|
||||||
|
|
|
||||||
|
|
@ -38,7 +38,6 @@ type Config struct {
|
||||||
useSSE bool // Use SSE for non-float operations
|
useSSE bool // Use SSE for non-float operations
|
||||||
useAvg bool // Use optimizations that need Avg* operations
|
useAvg bool // Use optimizations that need Avg* operations
|
||||||
useHmul bool // Use optimizations that need Hmul* operations
|
useHmul bool // Use optimizations that need Hmul* operations
|
||||||
nacl bool // GOOS=nacl
|
|
||||||
use387 bool // GO386=387
|
use387 bool // GO386=387
|
||||||
SoftFloat bool //
|
SoftFloat bool //
|
||||||
Race bool // race detector enabled
|
Race bool // race detector enabled
|
||||||
|
|
@ -211,19 +210,6 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
|
||||||
c.FPReg = framepointerRegAMD64
|
c.FPReg = framepointerRegAMD64
|
||||||
c.LinkReg = linkRegAMD64
|
c.LinkReg = linkRegAMD64
|
||||||
c.hasGReg = false
|
c.hasGReg = false
|
||||||
case "amd64p32":
|
|
||||||
c.PtrSize = 4
|
|
||||||
c.RegSize = 8
|
|
||||||
c.lowerBlock = rewriteBlockAMD64
|
|
||||||
c.lowerValue = rewriteValueAMD64
|
|
||||||
c.splitLoad = rewriteValueAMD64splitload
|
|
||||||
c.registers = registersAMD64[:]
|
|
||||||
c.gpRegMask = gpRegMaskAMD64
|
|
||||||
c.fpRegMask = fpRegMaskAMD64
|
|
||||||
c.FPReg = framepointerRegAMD64
|
|
||||||
c.LinkReg = linkRegAMD64
|
|
||||||
c.hasGReg = false
|
|
||||||
c.noDuffDevice = true
|
|
||||||
case "386":
|
case "386":
|
||||||
c.PtrSize = 4
|
c.PtrSize = 4
|
||||||
c.RegSize = 4
|
c.RegSize = 4
|
||||||
|
|
@ -339,7 +325,6 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
|
||||||
}
|
}
|
||||||
c.ctxt = ctxt
|
c.ctxt = ctxt
|
||||||
c.optimize = optimize
|
c.optimize = optimize
|
||||||
c.nacl = objabi.GOOS == "nacl"
|
|
||||||
c.useSSE = true
|
c.useSSE = true
|
||||||
|
|
||||||
// Don't use Duff's device nor SSE on Plan 9 AMD64, because
|
// Don't use Duff's device nor SSE on Plan 9 AMD64, because
|
||||||
|
|
@ -349,17 +334,6 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
|
||||||
c.useSSE = false
|
c.useSSE = false
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.nacl {
|
|
||||||
c.noDuffDevice = true // Don't use Duff's device on NaCl
|
|
||||||
|
|
||||||
// Returns clobber BP on nacl/386, so the write
|
|
||||||
// barrier does.
|
|
||||||
opcodeTable[Op386LoweredWB].reg.clobbers |= 1 << 5 // BP
|
|
||||||
|
|
||||||
// ... and SI on nacl/amd64.
|
|
||||||
opcodeTable[OpAMD64LoweredWB].reg.clobbers |= 1 << 6 // SI
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctxt.Flag_shared {
|
if ctxt.Flag_shared {
|
||||||
// LoweredWB is secretly a CALL and CALLs on 386 in
|
// LoweredWB is secretly a CALL and CALLs on 386 in
|
||||||
// shared mode get rewritten by obj6.go to go through
|
// shared mode get rewritten by obj6.go to go through
|
||||||
|
|
|
||||||
|
|
@ -596,32 +596,32 @@
|
||||||
// into tests for carry flags.
|
// into tests for carry flags.
|
||||||
// ULT and SETB check the carry flag; they are identical to CS and SETCS. Same, mutatis
|
// ULT and SETB check the carry flag; they are identical to CS and SETCS. Same, mutatis
|
||||||
// mutandis, for UGE and SETAE, and CC and SETCC.
|
// mutandis, for UGE and SETAE, and CC and SETCC.
|
||||||
((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> ((ULT|UGE) (BTL x y))
|
((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) -> ((ULT|UGE) (BTL x y))
|
||||||
((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> ((ULT|UGE) (BTQ x y))
|
((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) -> ((ULT|UGE) (BTQ x y))
|
||||||
((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(c) && !config.nacl
|
((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(c)
|
||||||
-> ((ULT|UGE) (BTLconst [log2uint32(c)] x))
|
-> ((ULT|UGE) (BTLconst [log2uint32(c)] x))
|
||||||
((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(c) && !config.nacl
|
((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(c)
|
||||||
-> ((ULT|UGE) (BTQconst [log2(c)] x))
|
-> ((ULT|UGE) (BTQconst [log2(c)] x))
|
||||||
((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c) && !config.nacl
|
((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
|
||||||
-> ((ULT|UGE) (BTQconst [log2(c)] x))
|
-> ((ULT|UGE) (BTQconst [log2(c)] x))
|
||||||
(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> (SET(B|AE) (BTL x y))
|
(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) -> (SET(B|AE) (BTL x y))
|
||||||
(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> (SET(B|AE) (BTQ x y))
|
(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) -> (SET(B|AE) (BTQ x y))
|
||||||
(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(c) && !config.nacl
|
(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(c)
|
||||||
-> (SET(B|AE) (BTLconst [log2uint32(c)] x))
|
-> (SET(B|AE) (BTLconst [log2uint32(c)] x))
|
||||||
(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(c) && !config.nacl
|
(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(c)
|
||||||
-> (SET(B|AE) (BTQconst [log2(c)] x))
|
-> (SET(B|AE) (BTQconst [log2(c)] x))
|
||||||
(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c) && !config.nacl
|
(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
|
||||||
-> (SET(B|AE) (BTQconst [log2(c)] x))
|
-> (SET(B|AE) (BTQconst [log2(c)] x))
|
||||||
// SET..store variant
|
// SET..store variant
|
||||||
(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) && !config.nacl
|
(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
|
||||||
-> (SET(B|AE)store [off] {sym} ptr (BTL x y) mem)
|
-> (SET(B|AE)store [off] {sym} ptr (BTL x y) mem)
|
||||||
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) && !config.nacl
|
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
|
||||||
-> (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem)
|
-> (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem)
|
||||||
(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(c) && !config.nacl
|
(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(c)
|
||||||
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem)
|
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem)
|
||||||
(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(c) && !config.nacl
|
(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(c)
|
||||||
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [log2(c)] x) mem)
|
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [log2(c)] x) mem)
|
||||||
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c) && !config.nacl
|
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c)
|
||||||
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [log2(c)] x) mem)
|
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [log2(c)] x) mem)
|
||||||
|
|
||||||
// Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules
|
// Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules
|
||||||
|
|
@ -641,29 +641,29 @@
|
||||||
(SET(NE|EQ)store [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) -> (SET(EQ|NE)store [off] {sym} ptr (CMPQconst [0] s) mem)
|
(SET(NE|EQ)store [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) -> (SET(EQ|NE)store [off] {sym} ptr (CMPQconst [0] s) mem)
|
||||||
|
|
||||||
// Recognize bit setting (a |= 1<<b) and toggling (a ^= 1<<b)
|
// Recognize bit setting (a |= 1<<b) and toggling (a ^= 1<<b)
|
||||||
(OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) && !config.nacl -> (BTS(Q|L) x y)
|
(OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) -> (BTS(Q|L) x y)
|
||||||
(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) && !config.nacl -> (BTC(Q|L) x y)
|
(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) -> (BTC(Q|L) x y)
|
||||||
|
|
||||||
// Convert ORconst into BTS, if the code gets smaller, with boundary being
|
// Convert ORconst into BTS, if the code gets smaller, with boundary being
|
||||||
// (ORL $40,AX is 3 bytes, ORL $80,AX is 6 bytes).
|
// (ORL $40,AX is 3 bytes, ORL $80,AX is 6 bytes).
|
||||||
((ORQ|XORQ)const [c] x) && isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
|
((ORQ|XORQ)const [c] x) && isUint64PowerOfTwo(c) && uint64(c) >= 128
|
||||||
-> (BT(S|C)Qconst [log2(c)] x)
|
-> (BT(S|C)Qconst [log2(c)] x)
|
||||||
((ORL|XORL)const [c] x) && isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
|
((ORL|XORL)const [c] x) && isUint32PowerOfTwo(c) && uint64(c) >= 128
|
||||||
-> (BT(S|C)Lconst [log2uint32(c)] x)
|
-> (BT(S|C)Lconst [log2uint32(c)] x)
|
||||||
((ORQ|XORQ) (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
|
((ORQ|XORQ) (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 128
|
||||||
-> (BT(S|C)Qconst [log2(c)] x)
|
-> (BT(S|C)Qconst [log2(c)] x)
|
||||||
((ORL|XORL) (MOVLconst [c]) x) && isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
|
((ORL|XORL) (MOVLconst [c]) x) && isUint32PowerOfTwo(c) && uint64(c) >= 128
|
||||||
-> (BT(S|C)Lconst [log2uint32(c)] x)
|
-> (BT(S|C)Lconst [log2uint32(c)] x)
|
||||||
|
|
||||||
// Recognize bit clearing: a &^= 1<<b
|
// Recognize bit clearing: a &^= 1<<b
|
||||||
(AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) && !config.nacl -> (BTR(Q|L) x y)
|
(AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) -> (BTR(Q|L) x y)
|
||||||
(ANDQconst [c] x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
|
(ANDQconst [c] x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128
|
||||||
-> (BTRQconst [log2(^c)] x)
|
-> (BTRQconst [log2(^c)] x)
|
||||||
(ANDLconst [c] x) && isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
|
(ANDLconst [c] x) && isUint32PowerOfTwo(^c) && uint64(^c) >= 128
|
||||||
-> (BTRLconst [log2uint32(^c)] x)
|
-> (BTRLconst [log2uint32(^c)] x)
|
||||||
(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
|
(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128
|
||||||
-> (BTRQconst [log2(^c)] x)
|
-> (BTRQconst [log2(^c)] x)
|
||||||
(ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
|
(ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(^c) && uint64(^c) >= 128
|
||||||
-> (BTRLconst [log2uint32(^c)] x)
|
-> (BTRLconst [log2uint32(^c)] x)
|
||||||
|
|
||||||
// Special-case bit patterns on first/last bit.
|
// Special-case bit patterns on first/last bit.
|
||||||
|
|
@ -677,40 +677,40 @@
|
||||||
// We thus special-case them, by detecting the shift patterns.
|
// We thus special-case them, by detecting the shift patterns.
|
||||||
|
|
||||||
// Special case resetting first/last bit
|
// Special case resetting first/last bit
|
||||||
(SHL(L|Q)const [1] (SHR(L|Q)const [1] x)) && !config.nacl
|
(SHL(L|Q)const [1] (SHR(L|Q)const [1] x))
|
||||||
-> (BTR(L|Q)const [0] x)
|
-> (BTR(L|Q)const [0] x)
|
||||||
(SHRLconst [1] (SHLLconst [1] x)) && !config.nacl
|
(SHRLconst [1] (SHLLconst [1] x))
|
||||||
-> (BTRLconst [31] x)
|
-> (BTRLconst [31] x)
|
||||||
(SHRQconst [1] (SHLQconst [1] x)) && !config.nacl
|
(SHRQconst [1] (SHLQconst [1] x))
|
||||||
-> (BTRQconst [63] x)
|
-> (BTRQconst [63] x)
|
||||||
|
|
||||||
// Special case testing first/last bit (with double-shift generated by generic.rules)
|
// Special case testing first/last bit (with double-shift generated by generic.rules)
|
||||||
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) && z1==z2 && !config.nacl
|
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) && z1==z2
|
||||||
-> ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
|
-> ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
|
||||||
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) && z1==z2 && !config.nacl
|
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) && z1==z2
|
||||||
-> ((SETB|SETAE|ULT|UGE) (BTQconst [31] x))
|
-> ((SETB|SETAE|ULT|UGE) (BTQconst [31] x))
|
||||||
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2 && !config.nacl
|
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2
|
||||||
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
|
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
|
||||||
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2 && !config.nacl
|
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2
|
||||||
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
|
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
|
||||||
|
|
||||||
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) && z1==z2 && !config.nacl
|
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) && z1==z2
|
||||||
-> ((SETB|SETAE|ULT|UGE) (BTQconst [0] x))
|
-> ((SETB|SETAE|ULT|UGE) (BTQconst [0] x))
|
||||||
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) && z1==z2 && !config.nacl
|
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) && z1==z2
|
||||||
-> ((SETB|SETAE|ULT|UGE) (BTLconst [0] x))
|
-> ((SETB|SETAE|ULT|UGE) (BTLconst [0] x))
|
||||||
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2 && !config.nacl
|
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2
|
||||||
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [0] x) mem)
|
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [0] x) mem)
|
||||||
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2 && !config.nacl
|
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2
|
||||||
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [0] x) mem)
|
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [0] x) mem)
|
||||||
|
|
||||||
// Special-case manually testing last bit with "a>>63 != 0" (without "&1")
|
// Special-case manually testing last bit with "a>>63 != 0" (without "&1")
|
||||||
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] x) z2)) && z1==z2 && !config.nacl
|
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] x) z2)) && z1==z2
|
||||||
-> ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
|
-> ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
|
||||||
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] x) z2)) && z1==z2 && !config.nacl
|
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] x) z2)) && z1==z2
|
||||||
-> ((SETB|SETAE|ULT|UGE) (BTLconst [31] x))
|
-> ((SETB|SETAE|ULT|UGE) (BTLconst [31] x))
|
||||||
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2 && !config.nacl
|
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2
|
||||||
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
|
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
|
||||||
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2 && !config.nacl
|
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2
|
||||||
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
|
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
|
||||||
|
|
||||||
// Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1)
|
// Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1)
|
||||||
|
|
|
||||||
|
|
@ -1246,20 +1246,20 @@
|
||||||
((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && objabi.GOARM>=6 -> (REV16 x)
|
((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && objabi.GOARM>=6 -> (REV16 x)
|
||||||
|
|
||||||
// use indexed loads and stores
|
// use indexed loads and stores
|
||||||
(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVWloadidx ptr idx mem)
|
(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVWloadidx ptr idx mem)
|
||||||
(MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil && !config.nacl -> (MOVWstoreidx ptr idx val mem)
|
(MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil -> (MOVWstoreidx ptr idx val mem)
|
||||||
(MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftLL ptr idx [c] mem)
|
(MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftLL ptr idx [c] mem)
|
||||||
(MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftRL ptr idx [c] mem)
|
(MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftRL ptr idx [c] mem)
|
||||||
(MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftRA ptr idx [c] mem)
|
(MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftRA ptr idx [c] mem)
|
||||||
(MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftLL ptr idx [c] val mem)
|
(MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftLL ptr idx [c] val mem)
|
||||||
(MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftRL ptr idx [c] val mem)
|
(MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftRL ptr idx [c] val mem)
|
||||||
(MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftRA ptr idx [c] val mem)
|
(MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftRA ptr idx [c] val mem)
|
||||||
(MOVBUload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVBUloadidx ptr idx mem)
|
(MOVBUload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVBUloadidx ptr idx mem)
|
||||||
(MOVBload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVBloadidx ptr idx mem)
|
(MOVBload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVBloadidx ptr idx mem)
|
||||||
(MOVBstore [0] {sym} (ADD ptr idx) val mem) && sym == nil && !config.nacl -> (MOVBstoreidx ptr idx val mem)
|
(MOVBstore [0] {sym} (ADD ptr idx) val mem) && sym == nil -> (MOVBstoreidx ptr idx val mem)
|
||||||
(MOVHUload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVHUloadidx ptr idx mem)
|
(MOVHUload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVHUloadidx ptr idx mem)
|
||||||
(MOVHload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVHloadidx ptr idx mem)
|
(MOVHload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVHloadidx ptr idx mem)
|
||||||
(MOVHstore [0] {sym} (ADD ptr idx) val mem) && sym == nil && !config.nacl -> (MOVHstoreidx ptr idx val mem)
|
(MOVHstore [0] {sym} (ADD ptr idx) val mem) && sym == nil -> (MOVHstoreidx ptr idx val mem)
|
||||||
|
|
||||||
// constant folding in indexed loads and stores
|
// constant folding in indexed loads and stores
|
||||||
(MOVWloadidx ptr (MOVWconst [c]) mem) -> (MOVWload [c] ptr mem)
|
(MOVWloadidx ptr (MOVWconst [c]) mem) -> (MOVWload [c] ptr mem)
|
||||||
|
|
|
||||||
|
|
@ -416,13 +416,13 @@ func init() {
|
||||||
// a loop is generated when there is more than one iteration
|
// a loop is generated when there is more than one iteration
|
||||||
// needed to clear 4 doublewords
|
// needed to clear 4 doublewords
|
||||||
//
|
//
|
||||||
|
// XXLXOR VS32,VS32,VS32
|
||||||
// MOVD $len/32,R31
|
// MOVD $len/32,R31
|
||||||
// MOVD R31,CTR
|
// MOVD R31,CTR
|
||||||
|
// MOVD $16,R31
|
||||||
// loop:
|
// loop:
|
||||||
// MOVD R0,(R3)
|
// STXVD2X VS32,(R0)(R3)
|
||||||
// MOVD R0,8(R3)
|
// STXVD2X VS32,(R31),R3)
|
||||||
// MOVD R0,16(R3)
|
|
||||||
// MOVD R0,24(R3)
|
|
||||||
// ADD R3,32
|
// ADD R3,32
|
||||||
// BC loop
|
// BC loop
|
||||||
|
|
||||||
|
|
@ -448,33 +448,38 @@ func init() {
|
||||||
typ: "Mem",
|
typ: "Mem",
|
||||||
faultOnNilArg0: true,
|
faultOnNilArg0: true,
|
||||||
},
|
},
|
||||||
|
// R31 is temp register
|
||||||
// Loop code:
|
// Loop code:
|
||||||
// MOVD len/32,REG_TMP only for loop
|
// MOVD len/32,R31 set up loop ctr
|
||||||
// MOVD REG_TMP,CTR only for loop
|
// MOVD R31,CTR
|
||||||
|
// MOVD $16,R31 index register
|
||||||
// loop:
|
// loop:
|
||||||
// MOVD (R4),R7
|
// LXVD2X (R0)(R4),VS32
|
||||||
// MOVD 8(R4),R8
|
// LXVD2X (R31)(R4),VS33
|
||||||
// MOVD 16(R4),R9
|
// ADD R4,$32 increment src
|
||||||
// MOVD 24(R4),R10
|
// STXVD2X VS32,(R0)(R3)
|
||||||
// ADD R4,$32 only with loop
|
// STXVD2X VS33,(R31)(R3)
|
||||||
// MOVD R7,(R3)
|
// ADD R3,$32 increment dst
|
||||||
// MOVD R8,8(R3)
|
// BC 16,0,loop branch ctr
|
||||||
// MOVD R9,16(R3)
|
// For this purpose, VS32 and VS33 are treated as
|
||||||
// MOVD R10,24(R3)
|
// scratch registers. Since regalloc does not
|
||||||
// ADD R3,$32 only with loop
|
// track vector registers, even if it could be marked
|
||||||
// BC 16,0,loop only with loop
|
// as clobbered it would have no effect.
|
||||||
|
// TODO: If vector registers are managed by regalloc
|
||||||
|
// mark these as clobbered.
|
||||||
|
//
|
||||||
// Bytes not moved by this loop are moved
|
// Bytes not moved by this loop are moved
|
||||||
// with a combination of the following instructions,
|
// with a combination of the following instructions,
|
||||||
// starting with the largest sizes and generating as
|
// starting with the largest sizes and generating as
|
||||||
// many as needed, using the appropriate offset value.
|
// many as needed, using the appropriate offset value.
|
||||||
// MOVD n(R4),R7
|
// MOVD n(R4),R14
|
||||||
// MOVD R7,n(R3)
|
// MOVD R14,n(R3)
|
||||||
// MOVW n1(R4),R7
|
// MOVW n1(R4),R14
|
||||||
// MOVW R7,n1(R3)
|
// MOVW R14,n1(R3)
|
||||||
// MOVH n2(R4),R7
|
// MOVH n2(R4),R14
|
||||||
// MOVH R7,n2(R3)
|
// MOVH R14,n2(R3)
|
||||||
// MOVB n3(R4),R7
|
// MOVB n3(R4),R14
|
||||||
// MOVB R7,n3(R3)
|
// MOVB R14,n3(R3)
|
||||||
|
|
||||||
{
|
{
|
||||||
name: "LoweredMove",
|
name: "LoweredMove",
|
||||||
|
|
@ -482,7 +487,7 @@ func init() {
|
||||||
argLength: 3,
|
argLength: 3,
|
||||||
reg: regInfo{
|
reg: regInfo{
|
||||||
inputs: []regMask{buildReg("R3"), buildReg("R4")},
|
inputs: []regMask{buildReg("R3"), buildReg("R4")},
|
||||||
clobbers: buildReg("R3 R4 R7 R8 R9 R10"),
|
clobbers: buildReg("R3 R4 R14"),
|
||||||
},
|
},
|
||||||
clobberFlags: true,
|
clobberFlags: true,
|
||||||
typ: "Mem",
|
typ: "Mem",
|
||||||
|
|
|
||||||
|
|
@ -416,7 +416,7 @@
|
||||||
(ITab (Load ptr mem)) -> (MOVDload ptr mem)
|
(ITab (Load ptr mem)) -> (MOVDload ptr mem)
|
||||||
|
|
||||||
// block rewrites
|
// block rewrites
|
||||||
(If cond yes no) -> (BRC {s390x.NotEqual} (CMPWconst [0] (MOVBZreg <typ.Bool> cond)) yes no)
|
(If cond yes no) -> (CLIJ {s390x.LessOrGreater} (MOVBZreg <typ.Bool> cond) [0] yes no)
|
||||||
|
|
||||||
// Write barrier.
|
// Write barrier.
|
||||||
(WB {fn} destptr srcptr mem) -> (LoweredWB {fn} destptr srcptr mem)
|
(WB {fn} destptr srcptr mem) -> (LoweredWB {fn} destptr srcptr mem)
|
||||||
|
|
@ -548,15 +548,60 @@
|
||||||
-> x
|
-> x
|
||||||
|
|
||||||
// Fold boolean tests into blocks.
|
// Fold boolean tests into blocks.
|
||||||
(BRC {c} (CMPWconst [0] (LOCGR {d} (MOVDconst [0]) (MOVDconst [x]) cmp)) yes no)
|
// Note: this must match If statement lowering.
|
||||||
&& x != 0
|
(CLIJ {s390x.LessOrGreater} (LOCGR {d} (MOVDconst [0]) (MOVDconst [x]) cmp) [0] yes no)
|
||||||
&& c.(s390x.CCMask) == s390x.Equal
|
&& int32(x) != 0
|
||||||
-> (BRC {d} cmp no yes)
|
|
||||||
(BRC {c} (CMPWconst [0] (LOCGR {d} (MOVDconst [0]) (MOVDconst [x]) cmp)) yes no)
|
|
||||||
&& x != 0
|
|
||||||
&& c.(s390x.CCMask) == s390x.NotEqual
|
|
||||||
-> (BRC {d} cmp yes no)
|
-> (BRC {d} cmp yes no)
|
||||||
|
|
||||||
|
// Compare-and-branch.
|
||||||
|
// Note: bit 3 (unordered) must not be set so we mask out s390x.Unordered.
|
||||||
|
(BRC {c} (CMP x y) yes no) -> (CGRJ {c.(s390x.CCMask)&^s390x.Unordered} x y yes no)
|
||||||
|
(BRC {c} (CMPW x y) yes no) -> (CRJ {c.(s390x.CCMask)&^s390x.Unordered} x y yes no)
|
||||||
|
(BRC {c} (CMPU x y) yes no) -> (CLGRJ {c.(s390x.CCMask)&^s390x.Unordered} x y yes no)
|
||||||
|
(BRC {c} (CMPWU x y) yes no) -> (CLRJ {c.(s390x.CCMask)&^s390x.Unordered} x y yes no)
|
||||||
|
|
||||||
|
// Compare-and-branch (immediate).
|
||||||
|
// Note: bit 3 (unordered) must not be set so we mask out s390x.Unordered.
|
||||||
|
(BRC {c} (CMPconst x [y]) yes no) && is8Bit(y) -> (CGIJ {c.(s390x.CCMask)&^s390x.Unordered} x [int64(int8(y))] yes no)
|
||||||
|
(BRC {c} (CMPWconst x [y]) yes no) && is8Bit(y) -> (CIJ {c.(s390x.CCMask)&^s390x.Unordered} x [int64(int8(y))] yes no)
|
||||||
|
(BRC {c} (CMPUconst x [y]) yes no) && isU8Bit(y) -> (CLGIJ {c.(s390x.CCMask)&^s390x.Unordered} x [int64(int8(y))] yes no)
|
||||||
|
(BRC {c} (CMPWUconst x [y]) yes no) && isU8Bit(y) -> (CLIJ {c.(s390x.CCMask)&^s390x.Unordered} x [int64(int8(y))] yes no)
|
||||||
|
|
||||||
|
// Absorb immediate into compare-and-branch.
|
||||||
|
(C(R|GR)J {c} x (MOVDconst [y]) yes no) && is8Bit(y) -> (C(I|GI)J {c} x [int64(int8(y))] yes no)
|
||||||
|
(CL(R|GR)J {c} x (MOVDconst [y]) yes no) && isU8Bit(y) -> (CL(I|GI)J {c} x [int64(int8(y))] yes no)
|
||||||
|
(C(R|GR)J {c} (MOVDconst [x]) y yes no) && is8Bit(x) -> (C(I|GI)J {c.(s390x.CCMask).ReverseComparison()} y [int64(int8(x))] yes no)
|
||||||
|
(CL(R|GR)J {c} (MOVDconst [x]) y yes no) && isU8Bit(x) -> (CL(I|GI)J {c.(s390x.CCMask).ReverseComparison()} y [int64(int8(x))] yes no)
|
||||||
|
|
||||||
|
// Prefer comparison with immediate to compare-and-branch.
|
||||||
|
(CGRJ {c} x (MOVDconst [y]) yes no) && !is8Bit(y) && is32Bit(y) -> (BRC {c} (CMPconst x [int64(int32(y))]) yes no)
|
||||||
|
(CRJ {c} x (MOVDconst [y]) yes no) && !is8Bit(y) && is32Bit(y) -> (BRC {c} (CMPWconst x [int64(int32(y))]) yes no)
|
||||||
|
(CLGRJ {c} x (MOVDconst [y]) yes no) && !isU8Bit(y) && isU32Bit(y) -> (BRC {c} (CMPUconst x [int64(int32(y))]) yes no)
|
||||||
|
(CLRJ {c} x (MOVDconst [y]) yes no) && !isU8Bit(y) && isU32Bit(y) -> (BRC {c} (CMPWUconst x [int64(int32(y))]) yes no)
|
||||||
|
(CGRJ {c} (MOVDconst [x]) y yes no) && !is8Bit(x) && is32Bit(x) -> (BRC {c.(s390x.CCMask).ReverseComparison()} (CMPconst y [int64(int32(x))]) yes no)
|
||||||
|
(CRJ {c} (MOVDconst [x]) y yes no) && !is8Bit(x) && is32Bit(x) -> (BRC {c.(s390x.CCMask).ReverseComparison()} (CMPWconst y [int64(int32(x))]) yes no)
|
||||||
|
(CLGRJ {c} (MOVDconst [x]) y yes no) && !isU8Bit(x) && isU32Bit(x) -> (BRC {c.(s390x.CCMask).ReverseComparison()} (CMPUconst y [int64(int32(x))]) yes no)
|
||||||
|
(CLRJ {c} (MOVDconst [x]) y yes no) && !isU8Bit(x) && isU32Bit(x) -> (BRC {c.(s390x.CCMask).ReverseComparison()} (CMPWUconst y [int64(int32(x))]) yes no)
|
||||||
|
|
||||||
|
// Absorb sign/zero extensions into 32-bit compare-and-branch.
|
||||||
|
(CIJ {c} (MOV(W|WZ)reg x) [y] yes no) -> (CIJ {c} x [y] yes no)
|
||||||
|
(CLIJ {c} (MOV(W|WZ)reg x) [y] yes no) -> (CLIJ {c} x [y] yes no)
|
||||||
|
|
||||||
|
// Bring out-of-range signed immediates into range by varying branch condition.
|
||||||
|
(BRC {s390x.Less} (CMPconst x [ 128]) yes no) -> (CGIJ {s390x.LessOrEqual} x [ 127] yes no)
|
||||||
|
(BRC {s390x.Less} (CMPWconst x [ 128]) yes no) -> (CIJ {s390x.LessOrEqual} x [ 127] yes no)
|
||||||
|
(BRC {s390x.LessOrEqual} (CMPconst x [-129]) yes no) -> (CGIJ {s390x.Less} x [-128] yes no)
|
||||||
|
(BRC {s390x.LessOrEqual} (CMPWconst x [-129]) yes no) -> (CIJ {s390x.Less} x [-128] yes no)
|
||||||
|
(BRC {s390x.Greater} (CMPconst x [-129]) yes no) -> (CGIJ {s390x.GreaterOrEqual} x [-128] yes no)
|
||||||
|
(BRC {s390x.Greater} (CMPWconst x [-129]) yes no) -> (CIJ {s390x.GreaterOrEqual} x [-128] yes no)
|
||||||
|
(BRC {s390x.GreaterOrEqual} (CMPconst x [ 128]) yes no) -> (CGIJ {s390x.Greater} x [ 127] yes no)
|
||||||
|
(BRC {s390x.GreaterOrEqual} (CMPWconst x [ 128]) yes no) -> (CIJ {s390x.Greater} x [ 127] yes no)
|
||||||
|
|
||||||
|
// Bring out-of-range unsigned immediates into range by varying branch condition.
|
||||||
|
// Note: int64(int8(255)) == -1
|
||||||
|
(BRC {s390x.Less} (CMP(WU|U)const x [256]) yes no) -> (C(L|LG)IJ {s390x.LessOrEqual} x [-1] yes no)
|
||||||
|
(BRC {s390x.GreaterOrEqual} (CMP(WU|U)const x [256]) yes no) -> (C(L|LG)IJ {s390x.Greater} x [-1] yes no)
|
||||||
|
|
||||||
// Fold constants into instructions.
|
// Fold constants into instructions.
|
||||||
(ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x)
|
(ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x)
|
||||||
(ADDW x (MOVDconst [c])) -> (ADDWconst [int64(int32(c))] x)
|
(ADDW x (MOVDconst [c])) -> (ADDWconst [int64(int32(c))] x)
|
||||||
|
|
@ -959,6 +1004,40 @@
|
||||||
(CMPWconst (ANDWconst _ [m]) [n]) && int32(m) >= 0 && int32(m) < int32(n) -> (FlagLT)
|
(CMPWconst (ANDWconst _ [m]) [n]) && int32(m) >= 0 && int32(m) < int32(n) -> (FlagLT)
|
||||||
(CMPWUconst (ANDWconst _ [m]) [n]) && uint32(m) < uint32(n) -> (FlagLT)
|
(CMPWUconst (ANDWconst _ [m]) [n]) && uint32(m) < uint32(n) -> (FlagLT)
|
||||||
|
|
||||||
|
// Constant compare-and-branch with immediate.
|
||||||
|
(CGIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Equal != 0 && int64(x) == int64( int8(y)) -> (First yes no)
|
||||||
|
(CGIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Less != 0 && int64(x) < int64( int8(y)) -> (First yes no)
|
||||||
|
(CGIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Greater != 0 && int64(x) > int64( int8(y)) -> (First yes no)
|
||||||
|
(CIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Equal != 0 && int32(x) == int32( int8(y)) -> (First yes no)
|
||||||
|
(CIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Less != 0 && int32(x) < int32( int8(y)) -> (First yes no)
|
||||||
|
(CIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Greater != 0 && int32(x) > int32( int8(y)) -> (First yes no)
|
||||||
|
(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Equal != 0 && uint64(x) == uint64(uint8(y)) -> (First yes no)
|
||||||
|
(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Less != 0 && uint64(x) < uint64(uint8(y)) -> (First yes no)
|
||||||
|
(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Greater != 0 && uint64(x) > uint64(uint8(y)) -> (First yes no)
|
||||||
|
(CLIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Equal != 0 && uint32(x) == uint32(uint8(y)) -> (First yes no)
|
||||||
|
(CLIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Less != 0 && uint32(x) < uint32(uint8(y)) -> (First yes no)
|
||||||
|
(CLIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Greater != 0 && uint32(x) > uint32(uint8(y)) -> (First yes no)
|
||||||
|
(CGIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Equal == 0 && int64(x) == int64( int8(y)) -> (First no yes)
|
||||||
|
(CGIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Less == 0 && int64(x) < int64( int8(y)) -> (First no yes)
|
||||||
|
(CGIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Greater == 0 && int64(x) > int64( int8(y)) -> (First no yes)
|
||||||
|
(CIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Equal == 0 && int32(x) == int32( int8(y)) -> (First no yes)
|
||||||
|
(CIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Less == 0 && int32(x) < int32( int8(y)) -> (First no yes)
|
||||||
|
(CIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Greater == 0 && int32(x) > int32( int8(y)) -> (First no yes)
|
||||||
|
(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Equal == 0 && uint64(x) == uint64(uint8(y)) -> (First no yes)
|
||||||
|
(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Less == 0 && uint64(x) < uint64(uint8(y)) -> (First no yes)
|
||||||
|
(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Greater == 0 && uint64(x) > uint64(uint8(y)) -> (First no yes)
|
||||||
|
(CLIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Equal == 0 && uint32(x) == uint32(uint8(y)) -> (First no yes)
|
||||||
|
(CLIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Less == 0 && uint32(x) < uint32(uint8(y)) -> (First no yes)
|
||||||
|
(CLIJ {c} (MOVDconst [x]) [y] yes no) && c.(s390x.CCMask)&s390x.Greater == 0 && uint32(x) > uint32(uint8(y)) -> (First no yes)
|
||||||
|
|
||||||
|
// Constant compare-and-branch with immediate when unsigned comparison with zero.
|
||||||
|
(C(L|LG)IJ {s390x.GreaterOrEqual} _ [0] yes no) -> (First yes no)
|
||||||
|
(C(L|LG)IJ {s390x.Less} _ [0] yes no) -> (First no yes)
|
||||||
|
|
||||||
|
// Constant compare-and-branch when operands match.
|
||||||
|
(C(GR|R|LGR|LR)J {c} x y yes no) && x == y && c.(s390x.CCMask)&s390x.Equal != 0 -> (First yes no)
|
||||||
|
(C(GR|R|LGR|LR)J {c} x y yes no) && x == y && c.(s390x.CCMask)&s390x.Equal == 0 -> (First no yes)
|
||||||
|
|
||||||
// Convert 64-bit comparisons to 32-bit comparisons and signed comparisons
|
// Convert 64-bit comparisons to 32-bit comparisons and signed comparisons
|
||||||
// to unsigned comparisons.
|
// to unsigned comparisons.
|
||||||
// Helps simplify constant comparison detection.
|
// Helps simplify constant comparison detection.
|
||||||
|
|
|
||||||
|
|
@ -707,8 +707,41 @@ func init() {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// All blocks on s390x have their condition code mask (s390x.CCMask) as the Aux value.
|
||||||
|
// The condition code mask is a 4-bit mask where each bit corresponds to a condition
|
||||||
|
// code value. If the value of the condition code matches a bit set in the condition
|
||||||
|
// code mask then the first successor is executed. Otherwise the second successor is
|
||||||
|
// executed.
|
||||||
|
//
|
||||||
|
// | condition code value | mask bit |
|
||||||
|
// +----------------------+------------+
|
||||||
|
// | 0 (equal) | 0b1000 (8) |
|
||||||
|
// | 1 (less than) | 0b0100 (4) |
|
||||||
|
// | 2 (greater than) | 0b0010 (2) |
|
||||||
|
// | 3 (unordered) | 0b0001 (1) |
|
||||||
|
//
|
||||||
|
// Note: that compare-and-branch instructions must not have bit 3 (0b0001) set.
|
||||||
var S390Xblocks = []blockData{
|
var S390Xblocks = []blockData{
|
||||||
{name: "BRC", controls: 1}, // aux is condition code mask (s390x.CCMask)
|
// branch on condition
|
||||||
|
{name: "BRC", controls: 1}, // condition code value (flags) is Controls[0]
|
||||||
|
|
||||||
|
// compare-and-branch (register-register)
|
||||||
|
// - integrates comparison of Controls[0] with Controls[1]
|
||||||
|
// - both control values must be in general purpose registers
|
||||||
|
{name: "CRJ", controls: 2}, // signed 32-bit integer comparison
|
||||||
|
{name: "CGRJ", controls: 2}, // signed 64-bit integer comparison
|
||||||
|
{name: "CLRJ", controls: 2}, // unsigned 32-bit integer comparison
|
||||||
|
{name: "CLGRJ", controls: 2}, // unsigned 64-bit integer comparison
|
||||||
|
|
||||||
|
// compare-and-branch (register-immediate)
|
||||||
|
// - integrates comparison of Controls[0] with AuxInt
|
||||||
|
// - control value must be in a general purpose register
|
||||||
|
// - the AuxInt value is sign-extended for signed comparisons
|
||||||
|
// and zero-extended for unsigned comparisons
|
||||||
|
{name: "CIJ", controls: 1, auxint: "Int8"}, // signed 32-bit integer comparison
|
||||||
|
{name: "CGIJ", controls: 1, auxint: "Int8"}, // signed 64-bit integer comparison
|
||||||
|
{name: "CLIJ", controls: 1, auxint: "UInt8"}, // unsigned 32-bit integer comparison
|
||||||
|
{name: "CLGIJ", controls: 1, auxint: "UInt8"}, // unsigned 64-bit integer comparison
|
||||||
}
|
}
|
||||||
|
|
||||||
archs = append(archs, arch{
|
archs = append(archs, arch{
|
||||||
|
|
|
||||||
|
|
@ -70,6 +70,7 @@ type opData struct {
|
||||||
type blockData struct {
|
type blockData struct {
|
||||||
name string // the suffix for this block ("EQ", "LT", etc.)
|
name string // the suffix for this block ("EQ", "LT", etc.)
|
||||||
controls int // the number of control values this type of block requires
|
controls int // the number of control values this type of block requires
|
||||||
|
auxint string // the type of the AuxInt value, if any
|
||||||
}
|
}
|
||||||
|
|
||||||
type regInfo struct {
|
type regInfo struct {
|
||||||
|
|
@ -219,6 +220,21 @@ func genOp() {
|
||||||
fmt.Fprintln(w, "}")
|
fmt.Fprintln(w, "}")
|
||||||
fmt.Fprintln(w, "func (k BlockKind) String() string {return blockString[k]}")
|
fmt.Fprintln(w, "func (k BlockKind) String() string {return blockString[k]}")
|
||||||
|
|
||||||
|
// generate block kind auxint method
|
||||||
|
fmt.Fprintln(w, "func (k BlockKind) AuxIntType() string {")
|
||||||
|
fmt.Fprintln(w, "switch k {")
|
||||||
|
for _, a := range archs {
|
||||||
|
for _, b := range a.blocks {
|
||||||
|
if b.auxint == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "case Block%s%s: return \"%s\"\n", a.Name(), b.name, b.auxint)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintln(w, "}")
|
||||||
|
fmt.Fprintln(w, "return \"\"")
|
||||||
|
fmt.Fprintln(w, "}")
|
||||||
|
|
||||||
// generate Op* declarations
|
// generate Op* declarations
|
||||||
fmt.Fprintln(w, "const (")
|
fmt.Fprintln(w, "const (")
|
||||||
fmt.Fprintln(w, "OpInvalid Op = iota") // make sure OpInvalid is 0.
|
fmt.Fprintln(w, "OpInvalid Op = iota") // make sure OpInvalid is 0.
|
||||||
|
|
|
||||||
|
|
@ -749,7 +749,7 @@ func breakf(format string, a ...interface{}) *CondBreak {
|
||||||
func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
|
func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
|
||||||
rr := &RuleRewrite{loc: rule.loc}
|
rr := &RuleRewrite{loc: rule.loc}
|
||||||
rr.match, rr.cond, rr.result = rule.parse()
|
rr.match, rr.cond, rr.result = rule.parse()
|
||||||
_, _, _, aux, s := extract(rr.match) // remove parens, then split
|
_, _, auxint, aux, s := extract(rr.match) // remove parens, then split
|
||||||
|
|
||||||
// check match of control values
|
// check match of control values
|
||||||
if len(s) < data.controls {
|
if len(s) < data.controls {
|
||||||
|
|
@ -781,15 +781,28 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
|
||||||
pos[i] = arg + ".Pos"
|
pos[i] = arg + ".Pos"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if aux != "" {
|
for _, e := range []struct {
|
||||||
rr.add(declf(aux, "b.Aux"))
|
name, field string
|
||||||
|
}{
|
||||||
|
{auxint, "AuxInt"},
|
||||||
|
{aux, "Aux"},
|
||||||
|
} {
|
||||||
|
if e.name == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !token.IsIdentifier(e.name) || rr.declared(e.name) {
|
||||||
|
// code or variable
|
||||||
|
rr.add(breakf("b.%s != %s", e.field, e.name))
|
||||||
|
} else {
|
||||||
|
rr.add(declf(e.name, "b.%s", e.field))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if rr.cond != "" {
|
if rr.cond != "" {
|
||||||
rr.add(breakf("!(%s)", rr.cond))
|
rr.add(breakf("!(%s)", rr.cond))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rule matches. Generate result.
|
// Rule matches. Generate result.
|
||||||
outop, _, _, aux, t := extract(rr.result) // remove parens, then split
|
outop, _, auxint, aux, t := extract(rr.result) // remove parens, then split
|
||||||
_, outdata := getBlockInfo(outop, arch)
|
_, outdata := getBlockInfo(outop, arch)
|
||||||
if len(t) < outdata.controls {
|
if len(t) < outdata.controls {
|
||||||
log.Fatalf("incorrect number of output arguments in %s, got %v wanted at least %v", rule, len(s), outdata.controls)
|
log.Fatalf("incorrect number of output arguments in %s, got %v wanted at least %v", rule, len(s), outdata.controls)
|
||||||
|
|
@ -816,8 +829,7 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
|
||||||
}
|
}
|
||||||
|
|
||||||
blockName, _ := getBlockInfo(outop, arch)
|
blockName, _ := getBlockInfo(outop, arch)
|
||||||
rr.add(stmtf("b.Kind = %s", blockName))
|
rr.add(stmtf("b.Reset(%s)", blockName))
|
||||||
rr.add(stmtf("b.ResetControls()"))
|
|
||||||
for i, control := range t[:outdata.controls] {
|
for i, control := range t[:outdata.controls] {
|
||||||
// Select a source position for any new control values.
|
// Select a source position for any new control values.
|
||||||
// TODO: does it always make sense to use the source position
|
// TODO: does it always make sense to use the source position
|
||||||
|
|
@ -833,10 +845,11 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
|
||||||
v := genResult0(rr, arch, control, false, false, newpos)
|
v := genResult0(rr, arch, control, false, false, newpos)
|
||||||
rr.add(stmtf("b.AddControl(%s)", v))
|
rr.add(stmtf("b.AddControl(%s)", v))
|
||||||
}
|
}
|
||||||
|
if auxint != "" {
|
||||||
|
rr.add(stmtf("b.AuxInt = %s", auxint))
|
||||||
|
}
|
||||||
if aux != "" {
|
if aux != "" {
|
||||||
rr.add(stmtf("b.Aux = %s", aux))
|
rr.add(stmtf("b.Aux = %s", aux))
|
||||||
} else {
|
|
||||||
rr.add(stmtf("b.Aux = nil"))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
succChanged := false
|
succChanged := false
|
||||||
|
|
|
||||||
|
|
@ -1,83 +0,0 @@
|
||||||
// Copyright 2017 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package ssa
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cmd/compile/internal/types"
|
|
||||||
"cmd/internal/src"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestLoopConditionS390X(t *testing.T) {
|
|
||||||
// Test that a simple loop condition does not generate a conditional
|
|
||||||
// move (issue #19227).
|
|
||||||
//
|
|
||||||
// MOVDLT is generated when Less64 is lowered but should be
|
|
||||||
// optimized into an LT branch.
|
|
||||||
//
|
|
||||||
// For example, compiling the following loop:
|
|
||||||
//
|
|
||||||
// for i := 0; i < N; i++ {
|
|
||||||
// sum += 3
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// should generate assembly similar to:
|
|
||||||
// loop:
|
|
||||||
// CMP R0, R1
|
|
||||||
// BGE done
|
|
||||||
// ADD $3, R4
|
|
||||||
// ADD $1, R1
|
|
||||||
// BR loop
|
|
||||||
// done:
|
|
||||||
//
|
|
||||||
// rather than:
|
|
||||||
// loop:
|
|
||||||
// MOVD $0, R2
|
|
||||||
// MOVD $1, R3
|
|
||||||
// CMP R0, R1
|
|
||||||
// LOCGR $(8+2) R2, R3
|
|
||||||
// CMPW R2, $0
|
|
||||||
// BNE done
|
|
||||||
// ADD $3, R4
|
|
||||||
// ADD $1, R1
|
|
||||||
// BR loop
|
|
||||||
// done:
|
|
||||||
//
|
|
||||||
c := testConfigS390X(t)
|
|
||||||
a := c.Frontend().Auto(src.NoXPos, c.config.Types.Int8)
|
|
||||||
fun := c.Fun("entry",
|
|
||||||
Bloc("entry",
|
|
||||||
Valu("mem", OpInitMem, types.TypeMem, 0, nil),
|
|
||||||
Valu("SP", OpSP, c.config.Types.Uintptr, 0, nil),
|
|
||||||
Valu("ret", OpLocalAddr, c.config.Types.Int64.PtrTo(), 0, nil, "SP", "mem"),
|
|
||||||
Valu("N", OpArg, c.config.Types.Int64, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
|
|
||||||
Valu("starti", OpConst64, c.config.Types.Int64, 0, nil),
|
|
||||||
Valu("startsum", OpConst64, c.config.Types.Int64, 0, nil),
|
|
||||||
Goto("b1")),
|
|
||||||
Bloc("b1",
|
|
||||||
Valu("phii", OpPhi, c.config.Types.Int64, 0, nil, "starti", "i"),
|
|
||||||
Valu("phisum", OpPhi, c.config.Types.Int64, 0, nil, "startsum", "sum"),
|
|
||||||
Valu("cmp1", OpLess64, c.config.Types.Bool, 0, nil, "phii", "N"),
|
|
||||||
If("cmp1", "b2", "b3")),
|
|
||||||
Bloc("b2",
|
|
||||||
Valu("c1", OpConst64, c.config.Types.Int64, 1, nil),
|
|
||||||
Valu("i", OpAdd64, c.config.Types.Int64, 0, nil, "phii", "c1"),
|
|
||||||
Valu("c3", OpConst64, c.config.Types.Int64, 3, nil),
|
|
||||||
Valu("sum", OpAdd64, c.config.Types.Int64, 0, nil, "phisum", "c3"),
|
|
||||||
Goto("b1")),
|
|
||||||
Bloc("b3",
|
|
||||||
Valu("retdef", OpVarDef, types.TypeMem, 0, a, "mem"),
|
|
||||||
Valu("store", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ret", "phisum", "retdef"),
|
|
||||||
Exit("store")))
|
|
||||||
CheckFunc(fun.f)
|
|
||||||
Compile(fun.f)
|
|
||||||
CheckFunc(fun.f)
|
|
||||||
|
|
||||||
checkOpcodeCounts(t, fun.f, map[Op]int{
|
|
||||||
OpS390XLOCGR: 0,
|
|
||||||
OpS390XCMP: 1,
|
|
||||||
OpS390XCMPWconst: 0,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
@ -15,7 +15,7 @@ func isPoorStatementOp(op Op) bool {
|
||||||
switch op {
|
switch op {
|
||||||
// Note that Nilcheck often vanishes, but when it doesn't, you'd love to start the statement there
|
// Note that Nilcheck often vanishes, but when it doesn't, you'd love to start the statement there
|
||||||
// so that a debugger-user sees the stop before the panic, and can examine the value.
|
// so that a debugger-user sees the stop before the panic, and can examine the value.
|
||||||
case OpAddr, OpLocalAddr, OpOffPtr, OpStructSelect, OpPhi,
|
case OpAddr, OpLocalAddr, OpOffPtr, OpStructSelect, OpPhi, OpITab, OpIData,
|
||||||
OpIMake, OpStringMake, OpSliceMake, OpStructMake0, OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4,
|
OpIMake, OpStringMake, OpSliceMake, OpStructMake0, OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4,
|
||||||
OpConstBool, OpConst8, OpConst16, OpConst32, OpConst64, OpConst32F, OpConst64F:
|
OpConstBool, OpConst8, OpConst16, OpConst32, OpConst64, OpConst32F, OpConst64F:
|
||||||
return true
|
return true
|
||||||
|
|
|
||||||
|
|
@ -112,6 +112,14 @@ const (
|
||||||
BlockPPC64FGE
|
BlockPPC64FGE
|
||||||
|
|
||||||
BlockS390XBRC
|
BlockS390XBRC
|
||||||
|
BlockS390XCRJ
|
||||||
|
BlockS390XCGRJ
|
||||||
|
BlockS390XCLRJ
|
||||||
|
BlockS390XCLGRJ
|
||||||
|
BlockS390XCIJ
|
||||||
|
BlockS390XCGIJ
|
||||||
|
BlockS390XCLIJ
|
||||||
|
BlockS390XCLGIJ
|
||||||
|
|
||||||
BlockPlain
|
BlockPlain
|
||||||
BlockIf
|
BlockIf
|
||||||
|
|
@ -220,7 +228,15 @@ var blockString = [...]string{
|
||||||
BlockPPC64FGT: "FGT",
|
BlockPPC64FGT: "FGT",
|
||||||
BlockPPC64FGE: "FGE",
|
BlockPPC64FGE: "FGE",
|
||||||
|
|
||||||
BlockS390XBRC: "BRC",
|
BlockS390XBRC: "BRC",
|
||||||
|
BlockS390XCRJ: "CRJ",
|
||||||
|
BlockS390XCGRJ: "CGRJ",
|
||||||
|
BlockS390XCLRJ: "CLRJ",
|
||||||
|
BlockS390XCLGRJ: "CLGRJ",
|
||||||
|
BlockS390XCIJ: "CIJ",
|
||||||
|
BlockS390XCGIJ: "CGIJ",
|
||||||
|
BlockS390XCLIJ: "CLIJ",
|
||||||
|
BlockS390XCLGIJ: "CLGIJ",
|
||||||
|
|
||||||
BlockPlain: "Plain",
|
BlockPlain: "Plain",
|
||||||
BlockIf: "If",
|
BlockIf: "If",
|
||||||
|
|
@ -232,6 +248,19 @@ var blockString = [...]string{
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k BlockKind) String() string { return blockString[k] }
|
func (k BlockKind) String() string { return blockString[k] }
|
||||||
|
func (k BlockKind) AuxIntType() string {
|
||||||
|
switch k {
|
||||||
|
case BlockS390XCIJ:
|
||||||
|
return "Int8"
|
||||||
|
case BlockS390XCGIJ:
|
||||||
|
return "Int8"
|
||||||
|
case BlockS390XCLIJ:
|
||||||
|
return "UInt8"
|
||||||
|
case BlockS390XCLGIJ:
|
||||||
|
return "UInt8"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
OpInvalid Op = iota
|
OpInvalid Op = iota
|
||||||
|
|
@ -24461,7 +24490,7 @@ var opcodeTable = [...]opInfo{
|
||||||
{0, 8}, // R3
|
{0, 8}, // R3
|
||||||
{1, 16}, // R4
|
{1, 16}, // R4
|
||||||
},
|
},
|
||||||
clobbers: 1944, // R3 R4 R7 R8 R9 R10
|
clobbers: 16408, // R3 R4 R14
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -625,15 +625,6 @@ func (s *regAllocState) init(f *Func) {
|
||||||
s.f.fe.Fatalf(src.NoXPos, "arch %s not implemented", s.f.Config.arch)
|
s.f.fe.Fatalf(src.NoXPos, "arch %s not implemented", s.f.Config.arch)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if s.f.Config.nacl {
|
|
||||||
switch s.f.Config.arch {
|
|
||||||
case "arm":
|
|
||||||
s.allocatable &^= 1 << 9 // R9 is "thread pointer" on nacl/arm
|
|
||||||
case "amd64p32":
|
|
||||||
s.allocatable &^= 1 << 5 // BP - reserved for nacl
|
|
||||||
s.allocatable &^= 1 << 15 // R15 - reserved for nacl
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if s.f.Config.use387 {
|
if s.f.Config.use387 {
|
||||||
s.allocatable &^= 1 << 15 // X7 disallowed (one 387 register is used as scratch space during SSE->387 generation in ../x86/387.go)
|
s.allocatable &^= 1 << 15 // X7 disallowed (one 387 register is used as scratch space during SSE->387 generation in ../x86/387.go)
|
||||||
}
|
}
|
||||||
|
|
@ -1328,27 +1319,25 @@ func (s *regAllocState) regalloc(f *Func) {
|
||||||
// arg0 is dead. We can clobber its register.
|
// arg0 is dead. We can clobber its register.
|
||||||
goto ok
|
goto ok
|
||||||
}
|
}
|
||||||
|
if opcodeTable[v.Op].commutative && !s.liveAfterCurrentInstruction(v.Args[1]) {
|
||||||
|
args[0], args[1] = args[1], args[0]
|
||||||
|
goto ok
|
||||||
|
}
|
||||||
if s.values[v.Args[0].ID].rematerializeable {
|
if s.values[v.Args[0].ID].rematerializeable {
|
||||||
// We can rematerialize the input, don't worry about clobbering it.
|
// We can rematerialize the input, don't worry about clobbering it.
|
||||||
goto ok
|
goto ok
|
||||||
}
|
}
|
||||||
|
if opcodeTable[v.Op].commutative && s.values[v.Args[1].ID].rematerializeable {
|
||||||
|
args[0], args[1] = args[1], args[0]
|
||||||
|
goto ok
|
||||||
|
}
|
||||||
if countRegs(s.values[v.Args[0].ID].regs) >= 2 {
|
if countRegs(s.values[v.Args[0].ID].regs) >= 2 {
|
||||||
// we have at least 2 copies of arg0. We can afford to clobber one.
|
// we have at least 2 copies of arg0. We can afford to clobber one.
|
||||||
goto ok
|
goto ok
|
||||||
}
|
}
|
||||||
if opcodeTable[v.Op].commutative {
|
if opcodeTable[v.Op].commutative && countRegs(s.values[v.Args[1].ID].regs) >= 2 {
|
||||||
if !s.liveAfterCurrentInstruction(v.Args[1]) {
|
args[0], args[1] = args[1], args[0]
|
||||||
args[0], args[1] = args[1], args[0]
|
goto ok
|
||||||
goto ok
|
|
||||||
}
|
|
||||||
if s.values[v.Args[1].ID].rematerializeable {
|
|
||||||
args[0], args[1] = args[1], args[0]
|
|
||||||
goto ok
|
|
||||||
}
|
|
||||||
if countRegs(s.values[v.Args[1].ID].regs) >= 2 {
|
|
||||||
args[0], args[1] = args[1], args[0]
|
|
||||||
goto ok
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We can't overwrite arg0 (or arg1, if commutative). So we
|
// We can't overwrite arg0 (or arg1, if commutative). So we
|
||||||
|
|
|
||||||
|
|
@ -404,6 +404,16 @@ func is16Bit(n int64) bool {
|
||||||
return n == int64(int16(n))
|
return n == int64(int16(n))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// is8Bit reports whether n can be represented as a signed 8 bit integer.
|
||||||
|
func is8Bit(n int64) bool {
|
||||||
|
return n == int64(int8(n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// isU8Bit reports whether n can be represented as an unsigned 8 bit integer.
|
||||||
|
func isU8Bit(n int64) bool {
|
||||||
|
return n == int64(uint8(n))
|
||||||
|
}
|
||||||
|
|
||||||
// isU12Bit reports whether n can be represented as an unsigned 12 bit integer.
|
// isU12Bit reports whether n can be represented as an unsigned 12 bit integer.
|
||||||
func isU12Bit(n int64) bool {
|
func isU12Bit(n int64) bool {
|
||||||
return 0 <= n && n < (1<<12)
|
return 0 <= n && n < (1<<12)
|
||||||
|
|
@ -1051,7 +1061,7 @@ func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool {
|
||||||
// lowers them, so we only perform this optimization on platforms that we know to
|
// lowers them, so we only perform this optimization on platforms that we know to
|
||||||
// have fast Move ops.
|
// have fast Move ops.
|
||||||
switch c.arch {
|
switch c.arch {
|
||||||
case "amd64", "amd64p32":
|
case "amd64":
|
||||||
return sz <= 16 || (sz < 1024 && disjoint(dst, sz, src, sz))
|
return sz <= 16 || (sz < 1024 && disjoint(dst, sz, src, sz))
|
||||||
case "386", "ppc64", "ppc64le", "arm64":
|
case "386", "ppc64", "ppc64le", "arm64":
|
||||||
return sz <= 8
|
return sz <= 8
|
||||||
|
|
@ -1067,7 +1077,7 @@ func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool {
|
||||||
// for sizes < 32-bit. This is used to decide whether to promote some rotations.
|
// for sizes < 32-bit. This is used to decide whether to promote some rotations.
|
||||||
func hasSmallRotate(c *Config) bool {
|
func hasSmallRotate(c *Config) bool {
|
||||||
switch c.arch {
|
switch c.arch {
|
||||||
case "amd64", "amd64p32", "386":
|
case "amd64", "386":
|
||||||
return true
|
return true
|
||||||
default:
|
default:
|
||||||
return false
|
return false
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -9002,10 +9002,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpMIPSFPFlagTrue {
|
for b.Controls[0].Op == OpMIPSFPFlagTrue {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cmp := v_0.Args[0]
|
cmp := v_0.Args[0]
|
||||||
b.Kind = BlockMIPSFPF
|
b.Reset(BlockMIPSFPF)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (FPFlagFalse cmp) yes no)
|
// match: (EQ (FPFlagFalse cmp) yes no)
|
||||||
|
|
@ -9013,10 +9011,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpMIPSFPFlagFalse {
|
for b.Controls[0].Op == OpMIPSFPFlagFalse {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cmp := v_0.Args[0]
|
cmp := v_0.Args[0]
|
||||||
b.Kind = BlockMIPSFPT
|
b.Reset(BlockMIPSFPT)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no)
|
// match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no)
|
||||||
|
|
@ -9031,10 +9027,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
_ = cmp.Args[1]
|
_ = cmp.Args[1]
|
||||||
b.Kind = BlockMIPSNE
|
b.Reset(BlockMIPSNE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no)
|
// match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no)
|
||||||
|
|
@ -9049,10 +9043,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
_ = cmp.Args[1]
|
_ = cmp.Args[1]
|
||||||
b.Kind = BlockMIPSNE
|
b.Reset(BlockMIPSNE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no)
|
// match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no)
|
||||||
|
|
@ -9066,10 +9058,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if cmp.Op != OpMIPSSGTconst {
|
if cmp.Op != OpMIPSSGTconst {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockMIPSNE
|
b.Reset(BlockMIPSNE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no)
|
// match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no)
|
||||||
|
|
@ -9083,10 +9073,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if cmp.Op != OpMIPSSGTUconst {
|
if cmp.Op != OpMIPSSGTUconst {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockMIPSNE
|
b.Reset(BlockMIPSNE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (XORconst [1] cmp:(SGTzero _)) yes no)
|
// match: (EQ (XORconst [1] cmp:(SGTzero _)) yes no)
|
||||||
|
|
@ -9100,10 +9088,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if cmp.Op != OpMIPSSGTzero {
|
if cmp.Op != OpMIPSSGTzero {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockMIPSNE
|
b.Reset(BlockMIPSNE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (XORconst [1] cmp:(SGTUzero _)) yes no)
|
// match: (EQ (XORconst [1] cmp:(SGTUzero _)) yes no)
|
||||||
|
|
@ -9117,10 +9103,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if cmp.Op != OpMIPSSGTUzero {
|
if cmp.Op != OpMIPSSGTUzero {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockMIPSNE
|
b.Reset(BlockMIPSNE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (SGTUconst [1] x) yes no)
|
// match: (EQ (SGTUconst [1] x) yes no)
|
||||||
|
|
@ -9131,10 +9115,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
x := v_0.Args[0]
|
x := v_0.Args[0]
|
||||||
b.Kind = BlockMIPSNE
|
b.Reset(BlockMIPSNE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(x)
|
b.AddControl(x)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (SGTUzero x) yes no)
|
// match: (EQ (SGTUzero x) yes no)
|
||||||
|
|
@ -9142,10 +9124,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpMIPSSGTUzero {
|
for b.Controls[0].Op == OpMIPSSGTUzero {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
x := v_0.Args[0]
|
x := v_0.Args[0]
|
||||||
b.Kind = BlockMIPSEQ
|
b.Reset(BlockMIPSEQ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(x)
|
b.AddControl(x)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (SGTconst [0] x) yes no)
|
// match: (EQ (SGTconst [0] x) yes no)
|
||||||
|
|
@ -9156,10 +9136,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
x := v_0.Args[0]
|
x := v_0.Args[0]
|
||||||
b.Kind = BlockMIPSGEZ
|
b.Reset(BlockMIPSGEZ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(x)
|
b.AddControl(x)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (SGTzero x) yes no)
|
// match: (EQ (SGTzero x) yes no)
|
||||||
|
|
@ -9167,10 +9145,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpMIPSSGTzero {
|
for b.Controls[0].Op == OpMIPSSGTzero {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
x := v_0.Args[0]
|
x := v_0.Args[0]
|
||||||
b.Kind = BlockMIPSLEZ
|
b.Reset(BlockMIPSLEZ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(x)
|
b.AddControl(x)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (MOVWconst [0]) yes no)
|
// match: (EQ (MOVWconst [0]) yes no)
|
||||||
|
|
@ -9180,9 +9156,7 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if v_0.AuxInt != 0 {
|
if v_0.AuxInt != 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (MOVWconst [c]) yes no)
|
// match: (EQ (MOVWconst [c]) yes no)
|
||||||
|
|
@ -9194,9 +9168,7 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if !(c != 0) {
|
if !(c != 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -9210,9 +9182,7 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if !(int32(c) >= 0) {
|
if !(int32(c) >= 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GEZ (MOVWconst [c]) yes no)
|
// match: (GEZ (MOVWconst [c]) yes no)
|
||||||
|
|
@ -9224,9 +9194,7 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if !(int32(c) < 0) {
|
if !(int32(c) < 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -9240,9 +9208,7 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if !(int32(c) > 0) {
|
if !(int32(c) > 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GTZ (MOVWconst [c]) yes no)
|
// match: (GTZ (MOVWconst [c]) yes no)
|
||||||
|
|
@ -9254,9 +9220,7 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if !(int32(c) <= 0) {
|
if !(int32(c) <= 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -9265,10 +9229,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
// result: (NE cond yes no)
|
// result: (NE cond yes no)
|
||||||
for {
|
for {
|
||||||
cond := b.Controls[0]
|
cond := b.Controls[0]
|
||||||
b.Kind = BlockMIPSNE
|
b.Reset(BlockMIPSNE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cond)
|
b.AddControl(cond)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
case BlockMIPSLEZ:
|
case BlockMIPSLEZ:
|
||||||
|
|
@ -9281,9 +9243,7 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if !(int32(c) <= 0) {
|
if !(int32(c) <= 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (LEZ (MOVWconst [c]) yes no)
|
// match: (LEZ (MOVWconst [c]) yes no)
|
||||||
|
|
@ -9295,9 +9255,7 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if !(int32(c) > 0) {
|
if !(int32(c) > 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -9311,9 +9269,7 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if !(int32(c) < 0) {
|
if !(int32(c) < 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (LTZ (MOVWconst [c]) yes no)
|
// match: (LTZ (MOVWconst [c]) yes no)
|
||||||
|
|
@ -9325,9 +9281,7 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if !(int32(c) >= 0) {
|
if !(int32(c) >= 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -9337,10 +9291,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpMIPSFPFlagTrue {
|
for b.Controls[0].Op == OpMIPSFPFlagTrue {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cmp := v_0.Args[0]
|
cmp := v_0.Args[0]
|
||||||
b.Kind = BlockMIPSFPT
|
b.Reset(BlockMIPSFPT)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (FPFlagFalse cmp) yes no)
|
// match: (NE (FPFlagFalse cmp) yes no)
|
||||||
|
|
@ -9348,10 +9300,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpMIPSFPFlagFalse {
|
for b.Controls[0].Op == OpMIPSFPFlagFalse {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cmp := v_0.Args[0]
|
cmp := v_0.Args[0]
|
||||||
b.Kind = BlockMIPSFPF
|
b.Reset(BlockMIPSFPF)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (XORconst [1] cmp:(SGT _ _)) yes no)
|
// match: (NE (XORconst [1] cmp:(SGT _ _)) yes no)
|
||||||
|
|
@ -9366,10 +9316,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
_ = cmp.Args[1]
|
_ = cmp.Args[1]
|
||||||
b.Kind = BlockMIPSEQ
|
b.Reset(BlockMIPSEQ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no)
|
// match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no)
|
||||||
|
|
@ -9384,10 +9332,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
_ = cmp.Args[1]
|
_ = cmp.Args[1]
|
||||||
b.Kind = BlockMIPSEQ
|
b.Reset(BlockMIPSEQ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (XORconst [1] cmp:(SGTconst _)) yes no)
|
// match: (NE (XORconst [1] cmp:(SGTconst _)) yes no)
|
||||||
|
|
@ -9401,10 +9347,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if cmp.Op != OpMIPSSGTconst {
|
if cmp.Op != OpMIPSSGTconst {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockMIPSEQ
|
b.Reset(BlockMIPSEQ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no)
|
// match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no)
|
||||||
|
|
@ -9418,10 +9362,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if cmp.Op != OpMIPSSGTUconst {
|
if cmp.Op != OpMIPSSGTUconst {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockMIPSEQ
|
b.Reset(BlockMIPSEQ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (XORconst [1] cmp:(SGTzero _)) yes no)
|
// match: (NE (XORconst [1] cmp:(SGTzero _)) yes no)
|
||||||
|
|
@ -9435,10 +9377,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if cmp.Op != OpMIPSSGTzero {
|
if cmp.Op != OpMIPSSGTzero {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockMIPSEQ
|
b.Reset(BlockMIPSEQ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (XORconst [1] cmp:(SGTUzero _)) yes no)
|
// match: (NE (XORconst [1] cmp:(SGTUzero _)) yes no)
|
||||||
|
|
@ -9452,10 +9392,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if cmp.Op != OpMIPSSGTUzero {
|
if cmp.Op != OpMIPSSGTUzero {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockMIPSEQ
|
b.Reset(BlockMIPSEQ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (SGTUconst [1] x) yes no)
|
// match: (NE (SGTUconst [1] x) yes no)
|
||||||
|
|
@ -9466,10 +9404,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
x := v_0.Args[0]
|
x := v_0.Args[0]
|
||||||
b.Kind = BlockMIPSEQ
|
b.Reset(BlockMIPSEQ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(x)
|
b.AddControl(x)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (SGTUzero x) yes no)
|
// match: (NE (SGTUzero x) yes no)
|
||||||
|
|
@ -9477,10 +9413,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpMIPSSGTUzero {
|
for b.Controls[0].Op == OpMIPSSGTUzero {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
x := v_0.Args[0]
|
x := v_0.Args[0]
|
||||||
b.Kind = BlockMIPSNE
|
b.Reset(BlockMIPSNE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(x)
|
b.AddControl(x)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (SGTconst [0] x) yes no)
|
// match: (NE (SGTconst [0] x) yes no)
|
||||||
|
|
@ -9491,10 +9425,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
x := v_0.Args[0]
|
x := v_0.Args[0]
|
||||||
b.Kind = BlockMIPSLTZ
|
b.Reset(BlockMIPSLTZ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(x)
|
b.AddControl(x)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (SGTzero x) yes no)
|
// match: (NE (SGTzero x) yes no)
|
||||||
|
|
@ -9502,10 +9434,8 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpMIPSSGTzero {
|
for b.Controls[0].Op == OpMIPSSGTzero {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
x := v_0.Args[0]
|
x := v_0.Args[0]
|
||||||
b.Kind = BlockMIPSGTZ
|
b.Reset(BlockMIPSGTZ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(x)
|
b.AddControl(x)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (MOVWconst [0]) yes no)
|
// match: (NE (MOVWconst [0]) yes no)
|
||||||
|
|
@ -9515,9 +9445,7 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if v_0.AuxInt != 0 {
|
if v_0.AuxInt != 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -9530,9 +9458,7 @@ func rewriteBlockMIPS(b *Block) bool {
|
||||||
if !(c != 0) {
|
if !(c != 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -9747,10 +9747,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpMIPS64FPFlagTrue {
|
for b.Controls[0].Op == OpMIPS64FPFlagTrue {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cmp := v_0.Args[0]
|
cmp := v_0.Args[0]
|
||||||
b.Kind = BlockMIPS64FPF
|
b.Reset(BlockMIPS64FPF)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (FPFlagFalse cmp) yes no)
|
// match: (EQ (FPFlagFalse cmp) yes no)
|
||||||
|
|
@ -9758,10 +9756,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpMIPS64FPFlagFalse {
|
for b.Controls[0].Op == OpMIPS64FPFlagFalse {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cmp := v_0.Args[0]
|
cmp := v_0.Args[0]
|
||||||
b.Kind = BlockMIPS64FPT
|
b.Reset(BlockMIPS64FPT)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no)
|
// match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no)
|
||||||
|
|
@ -9776,10 +9772,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
_ = cmp.Args[1]
|
_ = cmp.Args[1]
|
||||||
b.Kind = BlockMIPS64NE
|
b.Reset(BlockMIPS64NE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no)
|
// match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no)
|
||||||
|
|
@ -9794,10 +9788,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
_ = cmp.Args[1]
|
_ = cmp.Args[1]
|
||||||
b.Kind = BlockMIPS64NE
|
b.Reset(BlockMIPS64NE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no)
|
// match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no)
|
||||||
|
|
@ -9811,10 +9803,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if cmp.Op != OpMIPS64SGTconst {
|
if cmp.Op != OpMIPS64SGTconst {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockMIPS64NE
|
b.Reset(BlockMIPS64NE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no)
|
// match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no)
|
||||||
|
|
@ -9828,10 +9818,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if cmp.Op != OpMIPS64SGTUconst {
|
if cmp.Op != OpMIPS64SGTUconst {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockMIPS64NE
|
b.Reset(BlockMIPS64NE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (SGTUconst [1] x) yes no)
|
// match: (EQ (SGTUconst [1] x) yes no)
|
||||||
|
|
@ -9842,10 +9830,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
x := v_0.Args[0]
|
x := v_0.Args[0]
|
||||||
b.Kind = BlockMIPS64NE
|
b.Reset(BlockMIPS64NE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(x)
|
b.AddControl(x)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (SGTU x (MOVVconst [0])) yes no)
|
// match: (EQ (SGTU x (MOVVconst [0])) yes no)
|
||||||
|
|
@ -9858,10 +9844,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 {
|
if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockMIPS64EQ
|
b.Reset(BlockMIPS64EQ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(x)
|
b.AddControl(x)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (SGTconst [0] x) yes no)
|
// match: (EQ (SGTconst [0] x) yes no)
|
||||||
|
|
@ -9872,10 +9856,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
x := v_0.Args[0]
|
x := v_0.Args[0]
|
||||||
b.Kind = BlockMIPS64GEZ
|
b.Reset(BlockMIPS64GEZ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(x)
|
b.AddControl(x)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (SGT x (MOVVconst [0])) yes no)
|
// match: (EQ (SGT x (MOVVconst [0])) yes no)
|
||||||
|
|
@ -9888,10 +9870,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 {
|
if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockMIPS64LEZ
|
b.Reset(BlockMIPS64LEZ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(x)
|
b.AddControl(x)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (MOVVconst [0]) yes no)
|
// match: (EQ (MOVVconst [0]) yes no)
|
||||||
|
|
@ -9901,9 +9881,7 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if v_0.AuxInt != 0 {
|
if v_0.AuxInt != 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (MOVVconst [c]) yes no)
|
// match: (EQ (MOVVconst [c]) yes no)
|
||||||
|
|
@ -9915,9 +9893,7 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if !(c != 0) {
|
if !(c != 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -9931,9 +9907,7 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if !(c >= 0) {
|
if !(c >= 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GEZ (MOVVconst [c]) yes no)
|
// match: (GEZ (MOVVconst [c]) yes no)
|
||||||
|
|
@ -9945,9 +9919,7 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if !(c < 0) {
|
if !(c < 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -9961,9 +9933,7 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if !(c > 0) {
|
if !(c > 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GTZ (MOVVconst [c]) yes no)
|
// match: (GTZ (MOVVconst [c]) yes no)
|
||||||
|
|
@ -9975,9 +9945,7 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if !(c <= 0) {
|
if !(c <= 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -9986,10 +9954,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
// result: (NE cond yes no)
|
// result: (NE cond yes no)
|
||||||
for {
|
for {
|
||||||
cond := b.Controls[0]
|
cond := b.Controls[0]
|
||||||
b.Kind = BlockMIPS64NE
|
b.Reset(BlockMIPS64NE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cond)
|
b.AddControl(cond)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
case BlockMIPS64LEZ:
|
case BlockMIPS64LEZ:
|
||||||
|
|
@ -10002,9 +9968,7 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if !(c <= 0) {
|
if !(c <= 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (LEZ (MOVVconst [c]) yes no)
|
// match: (LEZ (MOVVconst [c]) yes no)
|
||||||
|
|
@ -10016,9 +9980,7 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if !(c > 0) {
|
if !(c > 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -10032,9 +9994,7 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if !(c < 0) {
|
if !(c < 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (LTZ (MOVVconst [c]) yes no)
|
// match: (LTZ (MOVVconst [c]) yes no)
|
||||||
|
|
@ -10046,9 +10006,7 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if !(c >= 0) {
|
if !(c >= 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -10058,10 +10016,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpMIPS64FPFlagTrue {
|
for b.Controls[0].Op == OpMIPS64FPFlagTrue {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cmp := v_0.Args[0]
|
cmp := v_0.Args[0]
|
||||||
b.Kind = BlockMIPS64FPT
|
b.Reset(BlockMIPS64FPT)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (FPFlagFalse cmp) yes no)
|
// match: (NE (FPFlagFalse cmp) yes no)
|
||||||
|
|
@ -10069,10 +10025,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpMIPS64FPFlagFalse {
|
for b.Controls[0].Op == OpMIPS64FPFlagFalse {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cmp := v_0.Args[0]
|
cmp := v_0.Args[0]
|
||||||
b.Kind = BlockMIPS64FPF
|
b.Reset(BlockMIPS64FPF)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (XORconst [1] cmp:(SGT _ _)) yes no)
|
// match: (NE (XORconst [1] cmp:(SGT _ _)) yes no)
|
||||||
|
|
@ -10087,10 +10041,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
_ = cmp.Args[1]
|
_ = cmp.Args[1]
|
||||||
b.Kind = BlockMIPS64EQ
|
b.Reset(BlockMIPS64EQ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no)
|
// match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no)
|
||||||
|
|
@ -10105,10 +10057,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
_ = cmp.Args[1]
|
_ = cmp.Args[1]
|
||||||
b.Kind = BlockMIPS64EQ
|
b.Reset(BlockMIPS64EQ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (XORconst [1] cmp:(SGTconst _)) yes no)
|
// match: (NE (XORconst [1] cmp:(SGTconst _)) yes no)
|
||||||
|
|
@ -10122,10 +10072,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if cmp.Op != OpMIPS64SGTconst {
|
if cmp.Op != OpMIPS64SGTconst {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockMIPS64EQ
|
b.Reset(BlockMIPS64EQ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no)
|
// match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no)
|
||||||
|
|
@ -10139,10 +10087,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if cmp.Op != OpMIPS64SGTUconst {
|
if cmp.Op != OpMIPS64SGTUconst {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockMIPS64EQ
|
b.Reset(BlockMIPS64EQ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (SGTUconst [1] x) yes no)
|
// match: (NE (SGTUconst [1] x) yes no)
|
||||||
|
|
@ -10153,10 +10099,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
x := v_0.Args[0]
|
x := v_0.Args[0]
|
||||||
b.Kind = BlockMIPS64EQ
|
b.Reset(BlockMIPS64EQ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(x)
|
b.AddControl(x)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (SGTU x (MOVVconst [0])) yes no)
|
// match: (NE (SGTU x (MOVVconst [0])) yes no)
|
||||||
|
|
@ -10169,10 +10113,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 {
|
if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockMIPS64NE
|
b.Reset(BlockMIPS64NE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(x)
|
b.AddControl(x)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (SGTconst [0] x) yes no)
|
// match: (NE (SGTconst [0] x) yes no)
|
||||||
|
|
@ -10183,10 +10125,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
x := v_0.Args[0]
|
x := v_0.Args[0]
|
||||||
b.Kind = BlockMIPS64LTZ
|
b.Reset(BlockMIPS64LTZ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(x)
|
b.AddControl(x)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (SGT x (MOVVconst [0])) yes no)
|
// match: (NE (SGT x (MOVVconst [0])) yes no)
|
||||||
|
|
@ -10199,10 +10139,8 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 {
|
if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockMIPS64GTZ
|
b.Reset(BlockMIPS64GTZ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(x)
|
b.AddControl(x)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (MOVVconst [0]) yes no)
|
// match: (NE (MOVVconst [0]) yes no)
|
||||||
|
|
@ -10212,9 +10150,7 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if v_0.AuxInt != 0 {
|
if v_0.AuxInt != 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -10227,9 +10163,7 @@ func rewriteBlockMIPS64(b *Block) bool {
|
||||||
if !(c != 0) {
|
if !(c != 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -26481,13 +26481,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
}
|
}
|
||||||
c := v_0_0.AuxInt
|
c := v_0_0.AuxInt
|
||||||
x := v_0_0.Args[0]
|
x := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64EQ
|
b.Reset(BlockPPC64EQ)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
||||||
v0.AuxInt = c
|
v0.AuxInt = c
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (CMPWconst [0] (ANDconst [c] x)) yes no)
|
// match: (EQ (CMPWconst [0] (ANDconst [c] x)) yes no)
|
||||||
|
|
@ -26503,38 +26501,30 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
}
|
}
|
||||||
c := v_0_0.AuxInt
|
c := v_0_0.AuxInt
|
||||||
x := v_0_0.Args[0]
|
x := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64EQ
|
b.Reset(BlockPPC64EQ)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
||||||
v0.AuxInt = c
|
v0.AuxInt = c
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (FlagEQ) yes no)
|
// match: (EQ (FlagEQ) yes no)
|
||||||
// result: (First yes no)
|
// result: (First yes no)
|
||||||
for b.Controls[0].Op == OpPPC64FlagEQ {
|
for b.Controls[0].Op == OpPPC64FlagEQ {
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (FlagLT) yes no)
|
// match: (EQ (FlagLT) yes no)
|
||||||
// result: (First no yes)
|
// result: (First no yes)
|
||||||
for b.Controls[0].Op == OpPPC64FlagLT {
|
for b.Controls[0].Op == OpPPC64FlagLT {
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (FlagGT) yes no)
|
// match: (EQ (FlagGT) yes no)
|
||||||
// result: (First no yes)
|
// result: (First no yes)
|
||||||
for b.Controls[0].Op == OpPPC64FlagGT {
|
for b.Controls[0].Op == OpPPC64FlagGT {
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -26543,10 +26533,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpPPC64InvertFlags {
|
for b.Controls[0].Op == OpPPC64InvertFlags {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cmp := v_0.Args[0]
|
cmp := v_0.Args[0]
|
||||||
b.Kind = BlockPPC64EQ
|
b.Reset(BlockPPC64EQ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (CMPconst [0] (ANDconst [c] x)) yes no)
|
// match: (EQ (CMPconst [0] (ANDconst [c] x)) yes no)
|
||||||
|
|
@ -26562,13 +26550,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
}
|
}
|
||||||
c := v_0_0.AuxInt
|
c := v_0_0.AuxInt
|
||||||
x := v_0_0.Args[0]
|
x := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64EQ
|
b.Reset(BlockPPC64EQ)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
||||||
v0.AuxInt = c
|
v0.AuxInt = c
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (CMPWconst [0] (ANDconst [c] x)) yes no)
|
// match: (EQ (CMPWconst [0] (ANDconst [c] x)) yes no)
|
||||||
|
|
@ -26584,13 +26570,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
}
|
}
|
||||||
c := v_0_0.AuxInt
|
c := v_0_0.AuxInt
|
||||||
x := v_0_0.Args[0]
|
x := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64EQ
|
b.Reset(BlockPPC64EQ)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
||||||
v0.AuxInt = c
|
v0.AuxInt = c
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (CMPconst [0] z:(AND x y)) yes no)
|
// match: (EQ (CMPconst [0] z:(AND x y)) yes no)
|
||||||
|
|
@ -26610,13 +26594,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
if !(z.Uses == 1) {
|
if !(z.Uses == 1) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockPPC64EQ
|
b.Reset(BlockPPC64EQ)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
v0.AddArg(y)
|
v0.AddArg(y)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (CMPconst [0] z:(OR x y)) yes no)
|
// match: (EQ (CMPconst [0] z:(OR x y)) yes no)
|
||||||
|
|
@ -26636,13 +26618,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
if !(z.Uses == 1) {
|
if !(z.Uses == 1) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockPPC64EQ
|
b.Reset(BlockPPC64EQ)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
v0.AddArg(y)
|
v0.AddArg(y)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (EQ (CMPconst [0] z:(XOR x y)) yes no)
|
// match: (EQ (CMPconst [0] z:(XOR x y)) yes no)
|
||||||
|
|
@ -26662,39 +26642,31 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
if !(z.Uses == 1) {
|
if !(z.Uses == 1) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockPPC64EQ
|
b.Reset(BlockPPC64EQ)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
v0.AddArg(y)
|
v0.AddArg(y)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
case BlockPPC64GE:
|
case BlockPPC64GE:
|
||||||
// match: (GE (FlagEQ) yes no)
|
// match: (GE (FlagEQ) yes no)
|
||||||
// result: (First yes no)
|
// result: (First yes no)
|
||||||
for b.Controls[0].Op == OpPPC64FlagEQ {
|
for b.Controls[0].Op == OpPPC64FlagEQ {
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GE (FlagLT) yes no)
|
// match: (GE (FlagLT) yes no)
|
||||||
// result: (First no yes)
|
// result: (First no yes)
|
||||||
for b.Controls[0].Op == OpPPC64FlagLT {
|
for b.Controls[0].Op == OpPPC64FlagLT {
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GE (FlagGT) yes no)
|
// match: (GE (FlagGT) yes no)
|
||||||
// result: (First yes no)
|
// result: (First yes no)
|
||||||
for b.Controls[0].Op == OpPPC64FlagGT {
|
for b.Controls[0].Op == OpPPC64FlagGT {
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GE (InvertFlags cmp) yes no)
|
// match: (GE (InvertFlags cmp) yes no)
|
||||||
|
|
@ -26702,10 +26674,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpPPC64InvertFlags {
|
for b.Controls[0].Op == OpPPC64InvertFlags {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cmp := v_0.Args[0]
|
cmp := v_0.Args[0]
|
||||||
b.Kind = BlockPPC64LE
|
b.Reset(BlockPPC64LE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GE (CMPconst [0] (ANDconst [c] x)) yes no)
|
// match: (GE (CMPconst [0] (ANDconst [c] x)) yes no)
|
||||||
|
|
@ -26721,13 +26691,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
}
|
}
|
||||||
c := v_0_0.AuxInt
|
c := v_0_0.AuxInt
|
||||||
x := v_0_0.Args[0]
|
x := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64GE
|
b.Reset(BlockPPC64GE)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
||||||
v0.AuxInt = c
|
v0.AuxInt = c
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GE (CMPWconst [0] (ANDconst [c] x)) yes no)
|
// match: (GE (CMPWconst [0] (ANDconst [c] x)) yes no)
|
||||||
|
|
@ -26743,13 +26711,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
}
|
}
|
||||||
c := v_0_0.AuxInt
|
c := v_0_0.AuxInt
|
||||||
x := v_0_0.Args[0]
|
x := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64GE
|
b.Reset(BlockPPC64GE)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
||||||
v0.AuxInt = c
|
v0.AuxInt = c
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GE (CMPconst [0] z:(AND x y)) yes no)
|
// match: (GE (CMPconst [0] z:(AND x y)) yes no)
|
||||||
|
|
@ -26769,13 +26735,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
if !(z.Uses == 1) {
|
if !(z.Uses == 1) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockPPC64GE
|
b.Reset(BlockPPC64GE)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
v0.AddArg(y)
|
v0.AddArg(y)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GE (CMPconst [0] z:(OR x y)) yes no)
|
// match: (GE (CMPconst [0] z:(OR x y)) yes no)
|
||||||
|
|
@ -26795,13 +26759,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
if !(z.Uses == 1) {
|
if !(z.Uses == 1) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockPPC64GE
|
b.Reset(BlockPPC64GE)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
v0.AddArg(y)
|
v0.AddArg(y)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GE (CMPconst [0] z:(XOR x y)) yes no)
|
// match: (GE (CMPconst [0] z:(XOR x y)) yes no)
|
||||||
|
|
@ -26821,40 +26783,32 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
if !(z.Uses == 1) {
|
if !(z.Uses == 1) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockPPC64GE
|
b.Reset(BlockPPC64GE)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
v0.AddArg(y)
|
v0.AddArg(y)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
case BlockPPC64GT:
|
case BlockPPC64GT:
|
||||||
// match: (GT (FlagEQ) yes no)
|
// match: (GT (FlagEQ) yes no)
|
||||||
// result: (First no yes)
|
// result: (First no yes)
|
||||||
for b.Controls[0].Op == OpPPC64FlagEQ {
|
for b.Controls[0].Op == OpPPC64FlagEQ {
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GT (FlagLT) yes no)
|
// match: (GT (FlagLT) yes no)
|
||||||
// result: (First no yes)
|
// result: (First no yes)
|
||||||
for b.Controls[0].Op == OpPPC64FlagLT {
|
for b.Controls[0].Op == OpPPC64FlagLT {
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GT (FlagGT) yes no)
|
// match: (GT (FlagGT) yes no)
|
||||||
// result: (First yes no)
|
// result: (First yes no)
|
||||||
for b.Controls[0].Op == OpPPC64FlagGT {
|
for b.Controls[0].Op == OpPPC64FlagGT {
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GT (InvertFlags cmp) yes no)
|
// match: (GT (InvertFlags cmp) yes no)
|
||||||
|
|
@ -26862,10 +26816,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpPPC64InvertFlags {
|
for b.Controls[0].Op == OpPPC64InvertFlags {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cmp := v_0.Args[0]
|
cmp := v_0.Args[0]
|
||||||
b.Kind = BlockPPC64LT
|
b.Reset(BlockPPC64LT)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GT (CMPconst [0] (ANDconst [c] x)) yes no)
|
// match: (GT (CMPconst [0] (ANDconst [c] x)) yes no)
|
||||||
|
|
@ -26881,13 +26833,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
}
|
}
|
||||||
c := v_0_0.AuxInt
|
c := v_0_0.AuxInt
|
||||||
x := v_0_0.Args[0]
|
x := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64GT
|
b.Reset(BlockPPC64GT)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
||||||
v0.AuxInt = c
|
v0.AuxInt = c
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GT (CMPWconst [0] (ANDconst [c] x)) yes no)
|
// match: (GT (CMPWconst [0] (ANDconst [c] x)) yes no)
|
||||||
|
|
@ -26903,13 +26853,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
}
|
}
|
||||||
c := v_0_0.AuxInt
|
c := v_0_0.AuxInt
|
||||||
x := v_0_0.Args[0]
|
x := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64GT
|
b.Reset(BlockPPC64GT)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
||||||
v0.AuxInt = c
|
v0.AuxInt = c
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GT (CMPconst [0] z:(AND x y)) yes no)
|
// match: (GT (CMPconst [0] z:(AND x y)) yes no)
|
||||||
|
|
@ -26929,13 +26877,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
if !(z.Uses == 1) {
|
if !(z.Uses == 1) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockPPC64GT
|
b.Reset(BlockPPC64GT)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
v0.AddArg(y)
|
v0.AddArg(y)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GT (CMPconst [0] z:(OR x y)) yes no)
|
// match: (GT (CMPconst [0] z:(OR x y)) yes no)
|
||||||
|
|
@ -26955,13 +26901,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
if !(z.Uses == 1) {
|
if !(z.Uses == 1) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockPPC64GT
|
b.Reset(BlockPPC64GT)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
v0.AddArg(y)
|
v0.AddArg(y)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (GT (CMPconst [0] z:(XOR x y)) yes no)
|
// match: (GT (CMPconst [0] z:(XOR x y)) yes no)
|
||||||
|
|
@ -26981,13 +26925,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
if !(z.Uses == 1) {
|
if !(z.Uses == 1) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockPPC64GT
|
b.Reset(BlockPPC64GT)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
v0.AddArg(y)
|
v0.AddArg(y)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
case BlockIf:
|
case BlockIf:
|
||||||
|
|
@ -26996,10 +26938,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpPPC64Equal {
|
for b.Controls[0].Op == OpPPC64Equal {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cc := v_0.Args[0]
|
cc := v_0.Args[0]
|
||||||
b.Kind = BlockPPC64EQ
|
b.Reset(BlockPPC64EQ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (If (NotEqual cc) yes no)
|
// match: (If (NotEqual cc) yes no)
|
||||||
|
|
@ -27007,10 +26947,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpPPC64NotEqual {
|
for b.Controls[0].Op == OpPPC64NotEqual {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cc := v_0.Args[0]
|
cc := v_0.Args[0]
|
||||||
b.Kind = BlockPPC64NE
|
b.Reset(BlockPPC64NE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (If (LessThan cc) yes no)
|
// match: (If (LessThan cc) yes no)
|
||||||
|
|
@ -27018,10 +26956,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpPPC64LessThan {
|
for b.Controls[0].Op == OpPPC64LessThan {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cc := v_0.Args[0]
|
cc := v_0.Args[0]
|
||||||
b.Kind = BlockPPC64LT
|
b.Reset(BlockPPC64LT)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (If (LessEqual cc) yes no)
|
// match: (If (LessEqual cc) yes no)
|
||||||
|
|
@ -27029,10 +26965,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpPPC64LessEqual {
|
for b.Controls[0].Op == OpPPC64LessEqual {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cc := v_0.Args[0]
|
cc := v_0.Args[0]
|
||||||
b.Kind = BlockPPC64LE
|
b.Reset(BlockPPC64LE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (If (GreaterThan cc) yes no)
|
// match: (If (GreaterThan cc) yes no)
|
||||||
|
|
@ -27040,10 +26974,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpPPC64GreaterThan {
|
for b.Controls[0].Op == OpPPC64GreaterThan {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cc := v_0.Args[0]
|
cc := v_0.Args[0]
|
||||||
b.Kind = BlockPPC64GT
|
b.Reset(BlockPPC64GT)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (If (GreaterEqual cc) yes no)
|
// match: (If (GreaterEqual cc) yes no)
|
||||||
|
|
@ -27051,10 +26983,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpPPC64GreaterEqual {
|
for b.Controls[0].Op == OpPPC64GreaterEqual {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cc := v_0.Args[0]
|
cc := v_0.Args[0]
|
||||||
b.Kind = BlockPPC64GE
|
b.Reset(BlockPPC64GE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (If (FLessThan cc) yes no)
|
// match: (If (FLessThan cc) yes no)
|
||||||
|
|
@ -27062,10 +26992,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpPPC64FLessThan {
|
for b.Controls[0].Op == OpPPC64FLessThan {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cc := v_0.Args[0]
|
cc := v_0.Args[0]
|
||||||
b.Kind = BlockPPC64FLT
|
b.Reset(BlockPPC64FLT)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (If (FLessEqual cc) yes no)
|
// match: (If (FLessEqual cc) yes no)
|
||||||
|
|
@ -27073,10 +27001,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpPPC64FLessEqual {
|
for b.Controls[0].Op == OpPPC64FLessEqual {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cc := v_0.Args[0]
|
cc := v_0.Args[0]
|
||||||
b.Kind = BlockPPC64FLE
|
b.Reset(BlockPPC64FLE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (If (FGreaterThan cc) yes no)
|
// match: (If (FGreaterThan cc) yes no)
|
||||||
|
|
@ -27084,10 +27010,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpPPC64FGreaterThan {
|
for b.Controls[0].Op == OpPPC64FGreaterThan {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cc := v_0.Args[0]
|
cc := v_0.Args[0]
|
||||||
b.Kind = BlockPPC64FGT
|
b.Reset(BlockPPC64FGT)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (If (FGreaterEqual cc) yes no)
|
// match: (If (FGreaterEqual cc) yes no)
|
||||||
|
|
@ -27095,48 +27019,38 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpPPC64FGreaterEqual {
|
for b.Controls[0].Op == OpPPC64FGreaterEqual {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cc := v_0.Args[0]
|
cc := v_0.Args[0]
|
||||||
b.Kind = BlockPPC64FGE
|
b.Reset(BlockPPC64FGE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (If cond yes no)
|
// match: (If cond yes no)
|
||||||
// result: (NE (CMPWconst [0] cond) yes no)
|
// result: (NE (CMPWconst [0] cond) yes no)
|
||||||
for {
|
for {
|
||||||
cond := b.Controls[0]
|
cond := b.Controls[0]
|
||||||
b.Kind = BlockPPC64NE
|
b.Reset(BlockPPC64NE)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(cond.Pos, OpPPC64CMPWconst, types.TypeFlags)
|
v0 := b.NewValue0(cond.Pos, OpPPC64CMPWconst, types.TypeFlags)
|
||||||
v0.AuxInt = 0
|
v0.AuxInt = 0
|
||||||
v0.AddArg(cond)
|
v0.AddArg(cond)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
case BlockPPC64LE:
|
case BlockPPC64LE:
|
||||||
// match: (LE (FlagEQ) yes no)
|
// match: (LE (FlagEQ) yes no)
|
||||||
// result: (First yes no)
|
// result: (First yes no)
|
||||||
for b.Controls[0].Op == OpPPC64FlagEQ {
|
for b.Controls[0].Op == OpPPC64FlagEQ {
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (LE (FlagLT) yes no)
|
// match: (LE (FlagLT) yes no)
|
||||||
// result: (First yes no)
|
// result: (First yes no)
|
||||||
for b.Controls[0].Op == OpPPC64FlagLT {
|
for b.Controls[0].Op == OpPPC64FlagLT {
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (LE (FlagGT) yes no)
|
// match: (LE (FlagGT) yes no)
|
||||||
// result: (First no yes)
|
// result: (First no yes)
|
||||||
for b.Controls[0].Op == OpPPC64FlagGT {
|
for b.Controls[0].Op == OpPPC64FlagGT {
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -27145,10 +27059,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpPPC64InvertFlags {
|
for b.Controls[0].Op == OpPPC64InvertFlags {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cmp := v_0.Args[0]
|
cmp := v_0.Args[0]
|
||||||
b.Kind = BlockPPC64GE
|
b.Reset(BlockPPC64GE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (LE (CMPconst [0] (ANDconst [c] x)) yes no)
|
// match: (LE (CMPconst [0] (ANDconst [c] x)) yes no)
|
||||||
|
|
@ -27164,13 +27076,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
}
|
}
|
||||||
c := v_0_0.AuxInt
|
c := v_0_0.AuxInt
|
||||||
x := v_0_0.Args[0]
|
x := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64LE
|
b.Reset(BlockPPC64LE)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
||||||
v0.AuxInt = c
|
v0.AuxInt = c
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (LE (CMPWconst [0] (ANDconst [c] x)) yes no)
|
// match: (LE (CMPWconst [0] (ANDconst [c] x)) yes no)
|
||||||
|
|
@ -27186,13 +27096,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
}
|
}
|
||||||
c := v_0_0.AuxInt
|
c := v_0_0.AuxInt
|
||||||
x := v_0_0.Args[0]
|
x := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64LE
|
b.Reset(BlockPPC64LE)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
||||||
v0.AuxInt = c
|
v0.AuxInt = c
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (LE (CMPconst [0] z:(AND x y)) yes no)
|
// match: (LE (CMPconst [0] z:(AND x y)) yes no)
|
||||||
|
|
@ -27212,13 +27120,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
if !(z.Uses == 1) {
|
if !(z.Uses == 1) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockPPC64LE
|
b.Reset(BlockPPC64LE)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
v0.AddArg(y)
|
v0.AddArg(y)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (LE (CMPconst [0] z:(OR x y)) yes no)
|
// match: (LE (CMPconst [0] z:(OR x y)) yes no)
|
||||||
|
|
@ -27238,13 +27144,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
if !(z.Uses == 1) {
|
if !(z.Uses == 1) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockPPC64LE
|
b.Reset(BlockPPC64LE)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
v0.AddArg(y)
|
v0.AddArg(y)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (LE (CMPconst [0] z:(XOR x y)) yes no)
|
// match: (LE (CMPconst [0] z:(XOR x y)) yes no)
|
||||||
|
|
@ -27264,39 +27168,31 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
if !(z.Uses == 1) {
|
if !(z.Uses == 1) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockPPC64LE
|
b.Reset(BlockPPC64LE)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
v0.AddArg(y)
|
v0.AddArg(y)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
case BlockPPC64LT:
|
case BlockPPC64LT:
|
||||||
// match: (LT (FlagEQ) yes no)
|
// match: (LT (FlagEQ) yes no)
|
||||||
// result: (First no yes)
|
// result: (First no yes)
|
||||||
for b.Controls[0].Op == OpPPC64FlagEQ {
|
for b.Controls[0].Op == OpPPC64FlagEQ {
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (LT (FlagLT) yes no)
|
// match: (LT (FlagLT) yes no)
|
||||||
// result: (First yes no)
|
// result: (First yes no)
|
||||||
for b.Controls[0].Op == OpPPC64FlagLT {
|
for b.Controls[0].Op == OpPPC64FlagLT {
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (LT (FlagGT) yes no)
|
// match: (LT (FlagGT) yes no)
|
||||||
// result: (First no yes)
|
// result: (First no yes)
|
||||||
for b.Controls[0].Op == OpPPC64FlagGT {
|
for b.Controls[0].Op == OpPPC64FlagGT {
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -27305,10 +27201,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpPPC64InvertFlags {
|
for b.Controls[0].Op == OpPPC64InvertFlags {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cmp := v_0.Args[0]
|
cmp := v_0.Args[0]
|
||||||
b.Kind = BlockPPC64GT
|
b.Reset(BlockPPC64GT)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (LT (CMPconst [0] (ANDconst [c] x)) yes no)
|
// match: (LT (CMPconst [0] (ANDconst [c] x)) yes no)
|
||||||
|
|
@ -27324,13 +27218,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
}
|
}
|
||||||
c := v_0_0.AuxInt
|
c := v_0_0.AuxInt
|
||||||
x := v_0_0.Args[0]
|
x := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64LT
|
b.Reset(BlockPPC64LT)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
||||||
v0.AuxInt = c
|
v0.AuxInt = c
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (LT (CMPWconst [0] (ANDconst [c] x)) yes no)
|
// match: (LT (CMPWconst [0] (ANDconst [c] x)) yes no)
|
||||||
|
|
@ -27346,13 +27238,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
}
|
}
|
||||||
c := v_0_0.AuxInt
|
c := v_0_0.AuxInt
|
||||||
x := v_0_0.Args[0]
|
x := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64LT
|
b.Reset(BlockPPC64LT)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
||||||
v0.AuxInt = c
|
v0.AuxInt = c
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (LT (CMPconst [0] z:(AND x y)) yes no)
|
// match: (LT (CMPconst [0] z:(AND x y)) yes no)
|
||||||
|
|
@ -27372,13 +27262,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
if !(z.Uses == 1) {
|
if !(z.Uses == 1) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockPPC64LT
|
b.Reset(BlockPPC64LT)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
v0.AddArg(y)
|
v0.AddArg(y)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (LT (CMPconst [0] z:(OR x y)) yes no)
|
// match: (LT (CMPconst [0] z:(OR x y)) yes no)
|
||||||
|
|
@ -27398,13 +27286,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
if !(z.Uses == 1) {
|
if !(z.Uses == 1) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockPPC64LT
|
b.Reset(BlockPPC64LT)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
v0.AddArg(y)
|
v0.AddArg(y)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (LT (CMPconst [0] z:(XOR x y)) yes no)
|
// match: (LT (CMPconst [0] z:(XOR x y)) yes no)
|
||||||
|
|
@ -27424,13 +27310,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
if !(z.Uses == 1) {
|
if !(z.Uses == 1) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockPPC64LT
|
b.Reset(BlockPPC64LT)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
v0.AddArg(y)
|
v0.AddArg(y)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
case BlockPPC64NE:
|
case BlockPPC64NE:
|
||||||
|
|
@ -27446,10 +27330,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
cc := v_0_0.Args[0]
|
cc := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64EQ
|
b.Reset(BlockPPC64EQ)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (CMPWconst [0] (NotEqual cc)) yes no)
|
// match: (NE (CMPWconst [0] (NotEqual cc)) yes no)
|
||||||
|
|
@ -27464,10 +27346,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
cc := v_0_0.Args[0]
|
cc := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64NE
|
b.Reset(BlockPPC64NE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (CMPWconst [0] (LessThan cc)) yes no)
|
// match: (NE (CMPWconst [0] (LessThan cc)) yes no)
|
||||||
|
|
@ -27482,10 +27362,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
cc := v_0_0.Args[0]
|
cc := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64LT
|
b.Reset(BlockPPC64LT)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (CMPWconst [0] (LessEqual cc)) yes no)
|
// match: (NE (CMPWconst [0] (LessEqual cc)) yes no)
|
||||||
|
|
@ -27500,10 +27378,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
cc := v_0_0.Args[0]
|
cc := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64LE
|
b.Reset(BlockPPC64LE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (CMPWconst [0] (GreaterThan cc)) yes no)
|
// match: (NE (CMPWconst [0] (GreaterThan cc)) yes no)
|
||||||
|
|
@ -27518,10 +27394,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
cc := v_0_0.Args[0]
|
cc := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64GT
|
b.Reset(BlockPPC64GT)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (CMPWconst [0] (GreaterEqual cc)) yes no)
|
// match: (NE (CMPWconst [0] (GreaterEqual cc)) yes no)
|
||||||
|
|
@ -27536,10 +27410,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
cc := v_0_0.Args[0]
|
cc := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64GE
|
b.Reset(BlockPPC64GE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (CMPWconst [0] (FLessThan cc)) yes no)
|
// match: (NE (CMPWconst [0] (FLessThan cc)) yes no)
|
||||||
|
|
@ -27554,10 +27426,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
cc := v_0_0.Args[0]
|
cc := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64FLT
|
b.Reset(BlockPPC64FLT)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (CMPWconst [0] (FLessEqual cc)) yes no)
|
// match: (NE (CMPWconst [0] (FLessEqual cc)) yes no)
|
||||||
|
|
@ -27572,10 +27442,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
cc := v_0_0.Args[0]
|
cc := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64FLE
|
b.Reset(BlockPPC64FLE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (CMPWconst [0] (FGreaterThan cc)) yes no)
|
// match: (NE (CMPWconst [0] (FGreaterThan cc)) yes no)
|
||||||
|
|
@ -27590,10 +27458,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
cc := v_0_0.Args[0]
|
cc := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64FGT
|
b.Reset(BlockPPC64FGT)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (CMPWconst [0] (FGreaterEqual cc)) yes no)
|
// match: (NE (CMPWconst [0] (FGreaterEqual cc)) yes no)
|
||||||
|
|
@ -27608,10 +27474,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
cc := v_0_0.Args[0]
|
cc := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64FGE
|
b.Reset(BlockPPC64FGE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cc)
|
b.AddControl(cc)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (CMPconst [0] (ANDconst [c] x)) yes no)
|
// match: (NE (CMPconst [0] (ANDconst [c] x)) yes no)
|
||||||
|
|
@ -27627,13 +27491,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
}
|
}
|
||||||
c := v_0_0.AuxInt
|
c := v_0_0.AuxInt
|
||||||
x := v_0_0.Args[0]
|
x := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64NE
|
b.Reset(BlockPPC64NE)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
||||||
v0.AuxInt = c
|
v0.AuxInt = c
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (CMPWconst [0] (ANDconst [c] x)) yes no)
|
// match: (NE (CMPWconst [0] (ANDconst [c] x)) yes no)
|
||||||
|
|
@ -27649,38 +27511,30 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
}
|
}
|
||||||
c := v_0_0.AuxInt
|
c := v_0_0.AuxInt
|
||||||
x := v_0_0.Args[0]
|
x := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64NE
|
b.Reset(BlockPPC64NE)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
||||||
v0.AuxInt = c
|
v0.AuxInt = c
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (FlagEQ) yes no)
|
// match: (NE (FlagEQ) yes no)
|
||||||
// result: (First no yes)
|
// result: (First no yes)
|
||||||
for b.Controls[0].Op == OpPPC64FlagEQ {
|
for b.Controls[0].Op == OpPPC64FlagEQ {
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (FlagLT) yes no)
|
// match: (NE (FlagLT) yes no)
|
||||||
// result: (First yes no)
|
// result: (First yes no)
|
||||||
for b.Controls[0].Op == OpPPC64FlagLT {
|
for b.Controls[0].Op == OpPPC64FlagLT {
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (FlagGT) yes no)
|
// match: (NE (FlagGT) yes no)
|
||||||
// result: (First yes no)
|
// result: (First yes no)
|
||||||
for b.Controls[0].Op == OpPPC64FlagGT {
|
for b.Controls[0].Op == OpPPC64FlagGT {
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (InvertFlags cmp) yes no)
|
// match: (NE (InvertFlags cmp) yes no)
|
||||||
|
|
@ -27688,10 +27542,8 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpPPC64InvertFlags {
|
for b.Controls[0].Op == OpPPC64InvertFlags {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cmp := v_0.Args[0]
|
cmp := v_0.Args[0]
|
||||||
b.Kind = BlockPPC64NE
|
b.Reset(BlockPPC64NE)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cmp)
|
b.AddControl(cmp)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (CMPconst [0] (ANDconst [c] x)) yes no)
|
// match: (NE (CMPconst [0] (ANDconst [c] x)) yes no)
|
||||||
|
|
@ -27707,13 +27559,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
}
|
}
|
||||||
c := v_0_0.AuxInt
|
c := v_0_0.AuxInt
|
||||||
x := v_0_0.Args[0]
|
x := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64NE
|
b.Reset(BlockPPC64NE)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
||||||
v0.AuxInt = c
|
v0.AuxInt = c
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (CMPWconst [0] (ANDconst [c] x)) yes no)
|
// match: (NE (CMPWconst [0] (ANDconst [c] x)) yes no)
|
||||||
|
|
@ -27729,13 +27579,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
}
|
}
|
||||||
c := v_0_0.AuxInt
|
c := v_0_0.AuxInt
|
||||||
x := v_0_0.Args[0]
|
x := v_0_0.Args[0]
|
||||||
b.Kind = BlockPPC64NE
|
b.Reset(BlockPPC64NE)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
|
||||||
v0.AuxInt = c
|
v0.AuxInt = c
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (CMPconst [0] z:(AND x y)) yes no)
|
// match: (NE (CMPconst [0] z:(AND x y)) yes no)
|
||||||
|
|
@ -27755,13 +27603,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
if !(z.Uses == 1) {
|
if !(z.Uses == 1) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockPPC64NE
|
b.Reset(BlockPPC64NE)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
v0.AddArg(y)
|
v0.AddArg(y)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (CMPconst [0] z:(OR x y)) yes no)
|
// match: (NE (CMPconst [0] z:(OR x y)) yes no)
|
||||||
|
|
@ -27781,13 +27627,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
if !(z.Uses == 1) {
|
if !(z.Uses == 1) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockPPC64NE
|
b.Reset(BlockPPC64NE)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
v0.AddArg(y)
|
v0.AddArg(y)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NE (CMPconst [0] z:(XOR x y)) yes no)
|
// match: (NE (CMPconst [0] z:(XOR x y)) yes no)
|
||||||
|
|
@ -27807,13 +27651,11 @@ func rewriteBlockPPC64(b *Block) bool {
|
||||||
if !(z.Uses == 1) {
|
if !(z.Uses == 1) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockPPC64NE
|
b.Reset(BlockPPC64NE)
|
||||||
b.ResetControls()
|
|
||||||
v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
|
v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
|
||||||
v0.AddArg(x)
|
v0.AddArg(x)
|
||||||
v0.AddArg(y)
|
v0.AddArg(y)
|
||||||
b.AddControl(v0)
|
b.AddControl(v0)
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load diff
|
|
@ -47414,10 +47414,8 @@ func rewriteBlockgeneric(b *Block) bool {
|
||||||
for b.Controls[0].Op == OpNot {
|
for b.Controls[0].Op == OpNot {
|
||||||
v_0 := b.Controls[0]
|
v_0 := b.Controls[0]
|
||||||
cond := v_0.Args[0]
|
cond := v_0.Args[0]
|
||||||
b.Kind = BlockIf
|
b.Reset(BlockIf)
|
||||||
b.ResetControls()
|
|
||||||
b.AddControl(cond)
|
b.AddControl(cond)
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -47430,9 +47428,7 @@ func rewriteBlockgeneric(b *Block) bool {
|
||||||
if !(c == 1) {
|
if !(c == 1) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (If (ConstBool [c]) yes no)
|
// match: (If (ConstBool [c]) yes no)
|
||||||
|
|
@ -47444,9 +47440,7 @@ func rewriteBlockgeneric(b *Block) bool {
|
||||||
if !(c == 0) {
|
if !(c == 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
b.Kind = BlockFirst
|
b.Reset(BlockFirst)
|
||||||
b.ResetControls()
|
|
||||||
b.Aux = nil
|
|
||||||
b.swapSuccessors()
|
b.swapSuccessors()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,8 +2,6 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// +build !nacl
|
|
||||||
|
|
||||||
package ssa
|
package ssa
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
@ -23,7 +21,7 @@ func TestSizeof(t *testing.T) {
|
||||||
_64bit uintptr // size on 64bit platforms
|
_64bit uintptr // size on 64bit platforms
|
||||||
}{
|
}{
|
||||||
{Value{}, 72, 112},
|
{Value{}, 72, 112},
|
||||||
{Block{}, 156, 296},
|
{Block{}, 164, 304},
|
||||||
{LocalSlot{}, 32, 48},
|
{LocalSlot{}, 32, 48},
|
||||||
{valState{}, 28, 40},
|
{valState{}, 28, 40},
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -8,15 +8,19 @@ import (
|
||||||
"cmd/compile/internal/types"
|
"cmd/compile/internal/types"
|
||||||
"cmd/internal/obj"
|
"cmd/internal/obj"
|
||||||
"cmd/internal/src"
|
"cmd/internal/src"
|
||||||
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A ZeroRegion records a range of an object which is known to be zero.
|
// A ZeroRegion records parts of an object which are known to be zero.
|
||||||
// A ZeroRegion only applies to a single memory state.
|
// A ZeroRegion only applies to a single memory state.
|
||||||
|
// Each bit in mask is set if the corresponding pointer-sized word of
|
||||||
|
// the base object is known to be zero.
|
||||||
|
// In other words, if mask & (1<<i) != 0, then [base+i*ptrSize, base+(i+1)*ptrSize)
|
||||||
|
// is known to be zero.
|
||||||
type ZeroRegion struct {
|
type ZeroRegion struct {
|
||||||
base *Value
|
base *Value
|
||||||
min int64
|
mask uint64
|
||||||
max int64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// needwb reports whether we need write barrier for store op v.
|
// needwb reports whether we need write barrier for store op v.
|
||||||
|
|
@ -46,10 +50,25 @@ func needwb(v *Value, zeroes map[ID]ZeroRegion) bool {
|
||||||
off += ptr.AuxInt
|
off += ptr.AuxInt
|
||||||
ptr = ptr.Args[0]
|
ptr = ptr.Args[0]
|
||||||
}
|
}
|
||||||
z := zeroes[v.MemoryArg().ID]
|
ptrSize := v.Block.Func.Config.PtrSize
|
||||||
if ptr == z.base && off >= z.min && off+size <= z.max {
|
if off%ptrSize != 0 || size%ptrSize != 0 {
|
||||||
return false
|
v.Fatalf("unaligned pointer write")
|
||||||
}
|
}
|
||||||
|
if off < 0 || off+size > 64*ptrSize {
|
||||||
|
// write goes off end of tracked offsets
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
z := zeroes[v.MemoryArg().ID]
|
||||||
|
if ptr != z.base {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
for i := off; i < off+size; i += ptrSize {
|
||||||
|
if z.mask>>uint(i/ptrSize)&1 == 0 {
|
||||||
|
return true // not known to be zero
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// All written locations are known to be zero - write barrier not needed.
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -375,10 +394,11 @@ func writebarrier(f *Func) {
|
||||||
// computeZeroMap returns a map from an ID of a memory value to
|
// computeZeroMap returns a map from an ID of a memory value to
|
||||||
// a set of locations that are known to be zeroed at that memory value.
|
// a set of locations that are known to be zeroed at that memory value.
|
||||||
func (f *Func) computeZeroMap() map[ID]ZeroRegion {
|
func (f *Func) computeZeroMap() map[ID]ZeroRegion {
|
||||||
|
ptrSize := f.Config.PtrSize
|
||||||
// Keep track of which parts of memory are known to be zero.
|
// Keep track of which parts of memory are known to be zero.
|
||||||
// This helps with removing write barriers for various initialization patterns.
|
// This helps with removing write barriers for various initialization patterns.
|
||||||
// This analysis is conservative. We only keep track, for each memory state, of
|
// This analysis is conservative. We only keep track, for each memory state, of
|
||||||
// a single constant range of a single object which is known to be zero.
|
// which of the first 64 words of a single object are known to be zero.
|
||||||
zeroes := map[ID]ZeroRegion{}
|
zeroes := map[ID]ZeroRegion{}
|
||||||
// Find new objects.
|
// Find new objects.
|
||||||
for _, b := range f.Blocks {
|
for _, b := range f.Blocks {
|
||||||
|
|
@ -388,7 +408,11 @@ func (f *Func) computeZeroMap() map[ID]ZeroRegion {
|
||||||
}
|
}
|
||||||
mem := v.MemoryArg()
|
mem := v.MemoryArg()
|
||||||
if IsNewObject(v, mem) {
|
if IsNewObject(v, mem) {
|
||||||
zeroes[mem.ID] = ZeroRegion{v, 0, v.Type.Elem().Size()}
|
nptr := v.Type.Elem().Size() / ptrSize
|
||||||
|
if nptr > 64 {
|
||||||
|
nptr = 64
|
||||||
|
}
|
||||||
|
zeroes[mem.ID] = ZeroRegion{base: v, mask: 1<<uint(nptr) - 1}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -420,26 +444,36 @@ func (f *Func) computeZeroMap() map[ID]ZeroRegion {
|
||||||
// So we have to throw all the zero information we have away.
|
// So we have to throw all the zero information we have away.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if off < z.min || off+size > z.max {
|
// Round to cover any partially written pointer slots.
|
||||||
// Writing, at least partially, outside the known zeroes.
|
// Pointer writes should never be unaligned like this, but non-pointer
|
||||||
// We could salvage some zero information, but probably
|
// writes to pointer-containing types will do this.
|
||||||
// not worth it.
|
if d := off % ptrSize; d != 0 {
|
||||||
|
off -= d
|
||||||
|
size += d
|
||||||
|
}
|
||||||
|
if d := size % ptrSize; d != 0 {
|
||||||
|
size += ptrSize - d
|
||||||
|
}
|
||||||
|
// Clip to the 64 words that we track.
|
||||||
|
min := off
|
||||||
|
max := off + size
|
||||||
|
if min < 0 {
|
||||||
|
min = 0
|
||||||
|
}
|
||||||
|
if max > 64*ptrSize {
|
||||||
|
max = 64 * ptrSize
|
||||||
|
}
|
||||||
|
// Clear bits for parts that we are writing (and hence
|
||||||
|
// will no longer necessarily be zero).
|
||||||
|
for i := min; i < max; i += ptrSize {
|
||||||
|
bit := i / ptrSize
|
||||||
|
z.mask &^= 1 << uint(bit)
|
||||||
|
}
|
||||||
|
if z.mask == 0 {
|
||||||
|
// No more known zeros - don't bother keeping.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// We now know we're storing to a zeroed area.
|
// Save updated known zero contents for new store.
|
||||||
// We need to make a smaller zero range for the result of this store.
|
|
||||||
if off == z.min {
|
|
||||||
z.min += size
|
|
||||||
} else if off+size == z.max {
|
|
||||||
z.max -= size
|
|
||||||
} else {
|
|
||||||
// The store splits the known zero range in two.
|
|
||||||
// Keep track of the upper one, as we tend to initialize
|
|
||||||
// things in increasing memory order.
|
|
||||||
// TODO: keep track of larger one instead?
|
|
||||||
z.min = off + size
|
|
||||||
}
|
|
||||||
// Save updated zero range.
|
|
||||||
if zeroes[v.ID] != z {
|
if zeroes[v.ID] != z {
|
||||||
zeroes[v.ID] = z
|
zeroes[v.ID] = z
|
||||||
changed = true
|
changed = true
|
||||||
|
|
@ -450,6 +484,12 @@ func (f *Func) computeZeroMap() map[ID]ZeroRegion {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if f.pass.debug > 0 {
|
||||||
|
fmt.Printf("func %s\n", f.Name)
|
||||||
|
for mem, z := range zeroes {
|
||||||
|
fmt.Printf(" memory=v%d ptr=%v zeromask=%b\n", mem, z.base, z.mask)
|
||||||
|
}
|
||||||
|
}
|
||||||
return zeroes
|
return zeroes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -512,20 +552,23 @@ func IsGlobalAddr(v *Value) bool {
|
||||||
if v.Op == OpConstNil {
|
if v.Op == OpConstNil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
if v.Op == OpLoad && IsReadOnlyGlobalAddr(v.Args[0]) {
|
||||||
|
return true // loading from a read-only global - the resulting address can't be a heap address.
|
||||||
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsReadOnlyGlobalAddr reports whether v is known to be an address of a read-only global.
|
// IsReadOnlyGlobalAddr reports whether v is known to be an address of a read-only global.
|
||||||
func IsReadOnlyGlobalAddr(v *Value) bool {
|
func IsReadOnlyGlobalAddr(v *Value) bool {
|
||||||
if !IsGlobalAddr(v) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if v.Op == OpConstNil {
|
if v.Op == OpConstNil {
|
||||||
// Nil pointers are read only. See issue 33438.
|
// Nil pointers are read only. See issue 33438.
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// See TODO in OpAddr case in IsSanitizerSafeAddr below.
|
// See TODO in OpAddr case in IsSanitizerSafeAddr below.
|
||||||
return strings.HasPrefix(v.Aux.(*obj.LSym).Name, `""..stmp_`)
|
if v.Op == OpAddr && strings.HasPrefix(v.Aux.(*obj.LSym).Name, `""..stmp_`) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsNewObject reports whether v is a pointer to a freshly allocated & zeroed object at memory state mem.
|
// IsNewObject reports whether v is a pointer to a freshly allocated & zeroed object at memory state mem.
|
||||||
|
|
|
||||||
|
|
@ -2,8 +2,6 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// +build !nacl
|
|
||||||
|
|
||||||
package types
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, ax *uint32) *obj.Prog
|
||||||
for i := int64(0); i < cnt; i += int64(gc.Widthreg) {
|
for i := int64(0); i < cnt; i += int64(gc.Widthreg) {
|
||||||
p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off+i)
|
p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off+i)
|
||||||
}
|
}
|
||||||
} else if !gc.Nacl && cnt <= int64(128*gc.Widthreg) {
|
} else if cnt <= int64(128*gc.Widthreg) {
|
||||||
p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
|
p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
|
||||||
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(gc.Widthreg)))
|
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(gc.Widthreg)))
|
||||||
p.To.Sym = gc.Duffzero
|
p.To.Sym = gc.Duffzero
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,6 @@ import (
|
||||||
var archInits = map[string]func(*gc.Arch){
|
var archInits = map[string]func(*gc.Arch){
|
||||||
"386": x86.Init,
|
"386": x86.Init,
|
||||||
"amd64": amd64.Init,
|
"amd64": amd64.Init,
|
||||||
"amd64p32": amd64.Init,
|
|
||||||
"arm": arm.Init,
|
"arm": arm.Init,
|
||||||
"arm64": arm64.Init,
|
"arm64": arm64.Init,
|
||||||
"mips": mips.Init,
|
"mips": mips.Init,
|
||||||
|
|
|
||||||
6
src/cmd/dist/build.go
vendored
6
src/cmd/dist/build.go
vendored
|
|
@ -61,7 +61,6 @@ var (
|
||||||
var okgoarch = []string{
|
var okgoarch = []string{
|
||||||
"386",
|
"386",
|
||||||
"amd64",
|
"amd64",
|
||||||
"amd64p32",
|
|
||||||
"arm",
|
"arm",
|
||||||
"arm64",
|
"arm64",
|
||||||
"mips",
|
"mips",
|
||||||
|
|
@ -86,7 +85,7 @@ var okgoos = []string{
|
||||||
"android",
|
"android",
|
||||||
"solaris",
|
"solaris",
|
||||||
"freebsd",
|
"freebsd",
|
||||||
"nacl",
|
"nacl", // keep;
|
||||||
"netbsd",
|
"netbsd",
|
||||||
"openbsd",
|
"openbsd",
|
||||||
"plan9",
|
"plan9",
|
||||||
|
|
@ -1505,9 +1504,6 @@ var cgoEnabled = map[string]bool{
|
||||||
"android/arm": true,
|
"android/arm": true,
|
||||||
"android/arm64": true,
|
"android/arm64": true,
|
||||||
"js/wasm": false,
|
"js/wasm": false,
|
||||||
"nacl/386": false,
|
|
||||||
"nacl/amd64p32": false,
|
|
||||||
"nacl/arm": false,
|
|
||||||
"netbsd/386": true,
|
"netbsd/386": true,
|
||||||
"netbsd/amd64": true,
|
"netbsd/amd64": true,
|
||||||
"netbsd/arm": true,
|
"netbsd/arm": true,
|
||||||
|
|
|
||||||
4
src/cmd/dist/test.go
vendored
4
src/cmd/dist/test.go
vendored
|
|
@ -703,7 +703,7 @@ func (t *tester) registerTests() {
|
||||||
|
|
||||||
// Doc tests only run on builders.
|
// Doc tests only run on builders.
|
||||||
// They find problems approximately never.
|
// They find problems approximately never.
|
||||||
if t.hasBash() && goos != "nacl" && goos != "js" && goos != "android" && !t.iOS() && os.Getenv("GO_BUILDER_NAME") != "" {
|
if t.hasBash() && goos != "js" && goos != "android" && !t.iOS() && os.Getenv("GO_BUILDER_NAME") != "" {
|
||||||
t.registerTest("doc_progs", "../doc/progs", "time", "go", "run", "run.go")
|
t.registerTest("doc_progs", "../doc/progs", "time", "go", "run", "run.go")
|
||||||
t.registerTest("wiki", "../doc/articles/wiki", "./test.bash")
|
t.registerTest("wiki", "../doc/articles/wiki", "./test.bash")
|
||||||
t.registerTest("codewalk", "../doc/codewalk", "time", "./run")
|
t.registerTest("codewalk", "../doc/codewalk", "time", "./run")
|
||||||
|
|
@ -735,7 +735,7 @@ func (t *tester) registerTests() {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if goos != "nacl" && goos != "android" && !t.iOS() && goos != "js" {
|
if goos != "android" && !t.iOS() && goos != "js" {
|
||||||
t.tests = append(t.tests, distTest{
|
t.tests = append(t.tests, distTest{
|
||||||
name: "api",
|
name: "api",
|
||||||
heading: "API check",
|
heading: "API check",
|
||||||
|
|
|
||||||
4
src/cmd/dist/util.go
vendored
4
src/cmd/dist/util.go
vendored
|
|
@ -383,10 +383,6 @@ func xsamefile(f1, f2 string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func xgetgoarm() string {
|
func xgetgoarm() string {
|
||||||
if goos == "nacl" {
|
|
||||||
// NaCl guarantees VFPv3 and is always cross-compiled.
|
|
||||||
return "7"
|
|
||||||
}
|
|
||||||
if goos == "darwin" || goos == "android" {
|
if goos == "darwin" || goos == "android" {
|
||||||
// Assume all darwin/arm and android devices have VFPv3.
|
// Assume all darwin/arm and android devices have VFPv3.
|
||||||
// These ports are also mostly cross-compiled, so it makes little
|
// These ports are also mostly cross-compiled, so it makes little
|
||||||
|
|
|
||||||
|
|
@ -162,7 +162,15 @@ func findCodeRoots() []Dir {
|
||||||
// Check for use of modules by 'go env GOMOD',
|
// Check for use of modules by 'go env GOMOD',
|
||||||
// which reports a go.mod file path if modules are enabled.
|
// which reports a go.mod file path if modules are enabled.
|
||||||
stdout, _ := exec.Command("go", "env", "GOMOD").Output()
|
stdout, _ := exec.Command("go", "env", "GOMOD").Output()
|
||||||
usingModules = len(bytes.TrimSpace(stdout)) > 0
|
gomod := string(bytes.TrimSpace(stdout))
|
||||||
|
usingModules = len(gomod) > 0
|
||||||
|
if gomod == os.DevNull {
|
||||||
|
// Modules are enabled, but the working directory is outside any module.
|
||||||
|
// We can still access std, cmd, and packages specified as source files
|
||||||
|
// on the command line, but there are no module roots.
|
||||||
|
// Avoid 'go list -m all' below, since it will not work.
|
||||||
|
return list
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !usingModules {
|
if !usingModules {
|
||||||
|
|
|
||||||
|
|
@ -33,9 +33,6 @@ func TestMain(m *testing.M) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func maybeSkip(t *testing.T) {
|
func maybeSkip(t *testing.T) {
|
||||||
if strings.HasPrefix(runtime.GOOS, "nacl") {
|
|
||||||
t.Skip("nacl does not have a full file tree")
|
|
||||||
}
|
|
||||||
if runtime.GOOS == "darwin" && strings.HasPrefix(runtime.GOARCH, "arm") {
|
if runtime.GOOS == "darwin" && strings.HasPrefix(runtime.GOARCH, "arm") {
|
||||||
t.Skip("darwin/arm does not have a full file tree")
|
t.Skip("darwin/arm does not have a full file tree")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -231,8 +231,8 @@ func parseArgs(args []string) (pkg *build.Package, path, symbol string, more boo
|
||||||
// First, is it a complete package path as it is? If so, we are done.
|
// First, is it a complete package path as it is? If so, we are done.
|
||||||
// This avoids confusion over package paths that have other
|
// This avoids confusion over package paths that have other
|
||||||
// package paths as their prefix.
|
// package paths as their prefix.
|
||||||
pkg, err = build.Import(arg, wd, build.ImportComment)
|
pkg, importErr := build.Import(arg, wd, build.ImportComment)
|
||||||
if err == nil {
|
if importErr == nil {
|
||||||
return pkg, arg, "", false
|
return pkg, arg, "", false
|
||||||
}
|
}
|
||||||
// Another disambiguator: If the symbol starts with an upper
|
// Another disambiguator: If the symbol starts with an upper
|
||||||
|
|
@ -286,7 +286,18 @@ func parseArgs(args []string) (pkg *build.Package, path, symbol string, more boo
|
||||||
}
|
}
|
||||||
// If it has a slash, we've failed.
|
// If it has a slash, we've failed.
|
||||||
if slash >= 0 {
|
if slash >= 0 {
|
||||||
log.Fatalf("no such package %s", arg[0:period])
|
// build.Import should always include the path in its error message,
|
||||||
|
// and we should avoid repeating it. Unfortunately, build.Import doesn't
|
||||||
|
// return a structured error. That can't easily be fixed, since it
|
||||||
|
// invokes 'go list' and returns the error text from the loaded package.
|
||||||
|
// TODO(golang.org/issue/34750): load using golang.org/x/tools/go/packages
|
||||||
|
// instead of go/build.
|
||||||
|
importErrStr := importErr.Error()
|
||||||
|
if strings.Contains(importErrStr, arg[:period]) {
|
||||||
|
log.Fatal(importErrStr)
|
||||||
|
} else {
|
||||||
|
log.Fatalf("no such package %s: %s", arg[:period], importErrStr)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Guess it's a symbol in the current directory.
|
// Guess it's a symbol in the current directory.
|
||||||
return importDir(wd), "", arg, false
|
return importDir(wd), "", arg, false
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
module cmd
|
module cmd
|
||||||
|
|
||||||
go 1.12
|
go 1.14
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f
|
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f
|
||||||
|
|
|
||||||
|
|
@ -435,6 +435,9 @@
|
||||||
// The -n flag prints commands that would be executed.
|
// The -n flag prints commands that would be executed.
|
||||||
// The -x flag prints commands as they are executed.
|
// The -x flag prints commands as they are executed.
|
||||||
//
|
//
|
||||||
|
// The -mod flag's value sets which module download mode
|
||||||
|
// to use: readonly or vendor. See 'go help modules' for more.
|
||||||
|
//
|
||||||
// To run gofmt with specific options, run gofmt itself.
|
// To run gofmt with specific options, run gofmt itself.
|
||||||
//
|
//
|
||||||
// See also: go fix, go vet.
|
// See also: go fix, go vet.
|
||||||
|
|
@ -1235,7 +1238,7 @@
|
||||||
// If the -exec flag is not given, GOOS or GOARCH is different from the system
|
// If the -exec flag is not given, GOOS or GOARCH is different from the system
|
||||||
// default, and a program named go_$GOOS_$GOARCH_exec can be found
|
// default, and a program named go_$GOOS_$GOARCH_exec can be found
|
||||||
// on the current search path, 'go run' invokes the binary using that program,
|
// on the current search path, 'go run' invokes the binary using that program,
|
||||||
// for example 'go_nacl_386_exec a.out arguments...'. This allows execution of
|
// for example 'go_js_wasm_exec a.out arguments...'. This allows execution of
|
||||||
// cross-compiled programs when a simulator or other execution method is
|
// cross-compiled programs when a simulator or other execution method is
|
||||||
// available.
|
// available.
|
||||||
//
|
//
|
||||||
|
|
|
||||||
|
|
@ -56,7 +56,7 @@ func tooSlow(t *testing.T) {
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
switch runtime.GOOS {
|
switch runtime.GOOS {
|
||||||
case "android", "js", "nacl":
|
case "android", "js":
|
||||||
canRun = false
|
canRun = false
|
||||||
case "darwin":
|
case "darwin":
|
||||||
switch runtime.GOARCH {
|
switch runtime.GOARCH {
|
||||||
|
|
@ -5604,7 +5604,7 @@ func TestTestCacheInputs(t *testing.T) {
|
||||||
tg.grepStdout(`\(cached\)`, "did not cache")
|
tg.grepStdout(`\(cached\)`, "did not cache")
|
||||||
|
|
||||||
switch runtime.GOOS {
|
switch runtime.GOOS {
|
||||||
case "nacl", "plan9", "windows":
|
case "plan9", "windows":
|
||||||
// no shell scripts
|
// no shell scripts
|
||||||
default:
|
default:
|
||||||
tg.run("test", "testcache", "-run=Exec")
|
tg.run("test", "testcache", "-run=Exec")
|
||||||
|
|
|
||||||
|
|
@ -2,8 +2,6 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// +build !nacl
|
|
||||||
|
|
||||||
package main_test
|
package main_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
||||||
|
|
@ -33,3 +33,8 @@ func AddBuildFlagsNX(flags *flag.FlagSet) {
|
||||||
flags.BoolVar(&cfg.BuildN, "n", false, "")
|
flags.BoolVar(&cfg.BuildN, "n", false, "")
|
||||||
flags.BoolVar(&cfg.BuildX, "x", false, "")
|
flags.BoolVar(&cfg.BuildX, "x", false, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddLoadFlags adds the -mod build flag to the flag set.
|
||||||
|
func AddLoadFlags(flags *flag.FlagSet) {
|
||||||
|
flags.StringVar(&cfg.BuildMod, "mod", "", "")
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// +build aix darwin dragonfly freebsd js linux nacl netbsd openbsd solaris
|
// +build aix darwin dragonfly freebsd js linux netbsd openbsd solaris
|
||||||
|
|
||||||
package base
|
package base
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -22,6 +22,7 @@ import (
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
base.AddBuildFlagsNX(&CmdFmt.Flag)
|
base.AddBuildFlagsNX(&CmdFmt.Flag)
|
||||||
|
base.AddLoadFlags(&CmdFmt.Flag)
|
||||||
}
|
}
|
||||||
|
|
||||||
var CmdFmt = &base.Command{
|
var CmdFmt = &base.Command{
|
||||||
|
|
@ -38,6 +39,9 @@ For more about specifying packages, see 'go help packages'.
|
||||||
The -n flag prints commands that would be executed.
|
The -n flag prints commands that would be executed.
|
||||||
The -x flag prints commands as they are executed.
|
The -x flag prints commands as they are executed.
|
||||||
|
|
||||||
|
The -mod flag's value sets which module download mode
|
||||||
|
to use: readonly or vendor. See 'go help modules' for more.
|
||||||
|
|
||||||
To run gofmt with specific options, run gofmt itself.
|
To run gofmt with specific options, run gofmt itself.
|
||||||
|
|
||||||
See also: go fix, go vet.
|
See also: go fix, go vet.
|
||||||
|
|
@ -68,7 +72,7 @@ func runFmt(cmd *base.Command, args []string) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if pkg.Error != nil {
|
if pkg.Error != nil {
|
||||||
if strings.HasPrefix(pkg.Error.Err, "build constraints exclude all Go files") {
|
if strings.HasPrefix(pkg.Error.Err.Error(), "build constraints exclude all Go files") {
|
||||||
// Skip this error, as we will format
|
// Skip this error, as we will format
|
||||||
// all files regardless.
|
// all files regardless.
|
||||||
} else {
|
} else {
|
||||||
|
|
|
||||||
|
|
@ -274,7 +274,7 @@ func download(arg string, parent *load.Package, stk *load.ImportStack, mode int)
|
||||||
stk.Push(arg)
|
stk.Push(arg)
|
||||||
err := downloadPackage(p)
|
err := downloadPackage(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
base.Errorf("%s", &load.PackageError{ImportStack: stk.Copy(), Err: err.Error()})
|
base.Errorf("%s", &load.PackageError{ImportStack: stk.Copy(), Err: err})
|
||||||
stk.Pop()
|
stk.Pop()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -355,7 +355,7 @@ func download(arg string, parent *load.Package, stk *load.ImportStack, mode int)
|
||||||
stk.Push(path)
|
stk.Push(path)
|
||||||
err := &load.PackageError{
|
err := &load.PackageError{
|
||||||
ImportStack: stk.Copy(),
|
ImportStack: stk.Copy(),
|
||||||
Err: "must be imported as " + path[j+len("vendor/"):],
|
Err: load.ImportErrorf(path, "%s must be imported as %s", path, path[j+len("vendor/"):]),
|
||||||
}
|
}
|
||||||
stk.Pop()
|
stk.Pop()
|
||||||
base.Errorf("%s", err)
|
base.Errorf("%s", err)
|
||||||
|
|
|
||||||
|
|
@ -210,7 +210,7 @@ var KnownOS = map[string]bool{
|
||||||
"illumos": true,
|
"illumos": true,
|
||||||
"js": true,
|
"js": true,
|
||||||
"linux": true,
|
"linux": true,
|
||||||
"nacl": true,
|
"nacl": true, // legacy; don't remove
|
||||||
"netbsd": true,
|
"netbsd": true,
|
||||||
"openbsd": true,
|
"openbsd": true,
|
||||||
"plan9": true,
|
"plan9": true,
|
||||||
|
|
@ -222,7 +222,7 @@ var KnownOS = map[string]bool{
|
||||||
var KnownArch = map[string]bool{
|
var KnownArch = map[string]bool{
|
||||||
"386": true,
|
"386": true,
|
||||||
"amd64": true,
|
"amd64": true,
|
||||||
"amd64p32": true,
|
"amd64p32": true, // legacy; don't remove
|
||||||
"arm": true,
|
"arm": true,
|
||||||
"armbe": true,
|
"armbe": true,
|
||||||
"arm64": true,
|
"arm64": true,
|
||||||
|
|
|
||||||
|
|
@ -384,6 +384,9 @@ func runList(cmd *base.Command, args []string) {
|
||||||
if modload.Init(); !modload.Enabled() {
|
if modload.Init(); !modload.Enabled() {
|
||||||
base.Fatalf("go list -m: not using modules")
|
base.Fatalf("go list -m: not using modules")
|
||||||
}
|
}
|
||||||
|
if cfg.BuildMod == "vendor" {
|
||||||
|
base.Fatalf("go list -m: can't list modules with -mod=vendor\n\tuse -mod=mod or -mod=readonly to ignore the vendor directory")
|
||||||
|
}
|
||||||
modload.LoadBuildList()
|
modload.LoadBuildList()
|
||||||
|
|
||||||
mods := modload.ListModules(args, *listU, *listVersions)
|
mods := modload.ListModules(args, *listU, *listVersions)
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,7 @@ package load
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"go/build"
|
"go/build"
|
||||||
|
|
@ -304,9 +305,9 @@ func (p *Package) copyBuild(pp *build.Package) {
|
||||||
type PackageError struct {
|
type PackageError struct {
|
||||||
ImportStack []string // shortest path from package named on command line to this one
|
ImportStack []string // shortest path from package named on command line to this one
|
||||||
Pos string // position of error
|
Pos string // position of error
|
||||||
Err string // the error itself
|
Err error // the error itself
|
||||||
IsImportCycle bool `json:"-"` // the error is an import cycle
|
IsImportCycle bool // the error is an import cycle
|
||||||
Hard bool `json:"-"` // whether the error is soft or hard; soft errors are ignored in some places
|
Hard bool // whether the error is soft or hard; soft errors are ignored in some places
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PackageError) Error() string {
|
func (p *PackageError) Error() string {
|
||||||
|
|
@ -317,12 +318,77 @@ func (p *PackageError) Error() string {
|
||||||
if p.Pos != "" {
|
if p.Pos != "" {
|
||||||
// Omit import stack. The full path to the file where the error
|
// Omit import stack. The full path to the file where the error
|
||||||
// is the most important thing.
|
// is the most important thing.
|
||||||
return p.Pos + ": " + p.Err
|
return p.Pos + ": " + p.Err.Error()
|
||||||
}
|
}
|
||||||
if len(p.ImportStack) == 0 {
|
|
||||||
return p.Err
|
// If the error is an ImportPathError, and the last path on the stack appears
|
||||||
|
// in the error message, omit that path from the stack to avoid repetition.
|
||||||
|
// If an ImportPathError wraps another ImportPathError that matches the
|
||||||
|
// last path on the stack, we don't omit the path. An error like
|
||||||
|
// "package A imports B: error loading C caused by B" would not be clearer
|
||||||
|
// if "imports B" were omitted.
|
||||||
|
stack := p.ImportStack
|
||||||
|
var ierr ImportPathError
|
||||||
|
if len(stack) > 0 && errors.As(p.Err, &ierr) && ierr.ImportPath() == stack[len(stack)-1] {
|
||||||
|
stack = stack[:len(stack)-1]
|
||||||
}
|
}
|
||||||
return "package " + strings.Join(p.ImportStack, "\n\timports ") + ": " + p.Err
|
if len(stack) == 0 {
|
||||||
|
return p.Err.Error()
|
||||||
|
}
|
||||||
|
return "package " + strings.Join(stack, "\n\timports ") + ": " + p.Err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// PackageError implements MarshalJSON so that Err is marshaled as a string
|
||||||
|
// and non-essential fields are omitted.
|
||||||
|
func (p *PackageError) MarshalJSON() ([]byte, error) {
|
||||||
|
perr := struct {
|
||||||
|
ImportStack []string
|
||||||
|
Pos string
|
||||||
|
Err string
|
||||||
|
}{p.ImportStack, p.Pos, p.Err.Error()}
|
||||||
|
return json.Marshal(perr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImportPathError is a type of error that prevents a package from being loaded
|
||||||
|
// for a given import path. When such a package is loaded, a *Package is
|
||||||
|
// returned with Err wrapping an ImportPathError: the error is attached to
|
||||||
|
// the imported package, not the importing package.
|
||||||
|
//
|
||||||
|
// The string returned by ImportPath must appear in the string returned by
|
||||||
|
// Error. Errors that wrap ImportPathError (such as PackageError) may omit
|
||||||
|
// the import path.
|
||||||
|
type ImportPathError interface {
|
||||||
|
error
|
||||||
|
ImportPath() string
|
||||||
|
}
|
||||||
|
|
||||||
|
type importError struct {
|
||||||
|
importPath string
|
||||||
|
err error // created with fmt.Errorf
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ ImportPathError = (*importError)(nil)
|
||||||
|
|
||||||
|
func ImportErrorf(path, format string, args ...interface{}) ImportPathError {
|
||||||
|
err := &importError{importPath: path, err: fmt.Errorf(format, args...)}
|
||||||
|
if errStr := err.Error(); !strings.Contains(errStr, path) {
|
||||||
|
panic(fmt.Sprintf("path %q not in error %q", path, errStr))
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *importError) Error() string {
|
||||||
|
return e.err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *importError) Unwrap() error {
|
||||||
|
// Don't return e.err directly, since we're only wrapping an error if %w
|
||||||
|
// was passed to ImportErrorf.
|
||||||
|
return errors.Unwrap(e.err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *importError) ImportPath() string {
|
||||||
|
return e.importPath
|
||||||
}
|
}
|
||||||
|
|
||||||
// An ImportStack is a stack of import paths, possibly with the suffix " (test)" appended.
|
// An ImportStack is a stack of import paths, possibly with the suffix " (test)" appended.
|
||||||
|
|
@ -489,7 +555,7 @@ func loadImport(pre *preload, path, srcDir string, parent *Package, stk *ImportS
|
||||||
ImportPath: path,
|
ImportPath: path,
|
||||||
Error: &PackageError{
|
Error: &PackageError{
|
||||||
ImportStack: stk.Copy(),
|
ImportStack: stk.Copy(),
|
||||||
Err: err.Error(),
|
Err: err,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -516,7 +582,7 @@ func loadImport(pre *preload, path, srcDir string, parent *Package, stk *ImportS
|
||||||
if !cfg.ModulesEnabled && path != cleanImport(path) {
|
if !cfg.ModulesEnabled && path != cleanImport(path) {
|
||||||
p.Error = &PackageError{
|
p.Error = &PackageError{
|
||||||
ImportStack: stk.Copy(),
|
ImportStack: stk.Copy(),
|
||||||
Err: fmt.Sprintf("non-canonical import path: %q should be %q", path, pathpkg.Clean(path)),
|
Err: fmt.Errorf("non-canonical import path: %q should be %q", path, pathpkg.Clean(path)),
|
||||||
}
|
}
|
||||||
p.Incomplete = true
|
p.Incomplete = true
|
||||||
}
|
}
|
||||||
|
|
@ -536,20 +602,22 @@ func loadImport(pre *preload, path, srcDir string, parent *Package, stk *ImportS
|
||||||
perr := *p
|
perr := *p
|
||||||
perr.Error = &PackageError{
|
perr.Error = &PackageError{
|
||||||
ImportStack: stk.Copy(),
|
ImportStack: stk.Copy(),
|
||||||
Err: fmt.Sprintf("import %q is a program, not an importable package", path),
|
Err: ImportErrorf(path, "import %q is a program, not an importable package", path),
|
||||||
}
|
}
|
||||||
return setErrorPos(&perr, importPos)
|
return setErrorPos(&perr, importPos)
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.Internal.Local && parent != nil && !parent.Internal.Local {
|
if p.Internal.Local && parent != nil && !parent.Internal.Local {
|
||||||
perr := *p
|
perr := *p
|
||||||
errMsg := fmt.Sprintf("local import %q in non-local package", path)
|
var err error
|
||||||
if path == "." {
|
if path == "." {
|
||||||
errMsg = "cannot import current directory"
|
err = ImportErrorf(path, "%s: cannot import current directory", path)
|
||||||
|
} else {
|
||||||
|
err = ImportErrorf(path, "local import %q in non-local package", path)
|
||||||
}
|
}
|
||||||
perr.Error = &PackageError{
|
perr.Error = &PackageError{
|
||||||
ImportStack: stk.Copy(),
|
ImportStack: stk.Copy(),
|
||||||
Err: errMsg,
|
Err: err,
|
||||||
}
|
}
|
||||||
return setErrorPos(&perr, importPos)
|
return setErrorPos(&perr, importPos)
|
||||||
}
|
}
|
||||||
|
|
@ -1125,7 +1193,7 @@ func reusePackage(p *Package, stk *ImportStack) *Package {
|
||||||
if p.Error == nil {
|
if p.Error == nil {
|
||||||
p.Error = &PackageError{
|
p.Error = &PackageError{
|
||||||
ImportStack: stk.Copy(),
|
ImportStack: stk.Copy(),
|
||||||
Err: "import cycle not allowed",
|
Err: errors.New("import cycle not allowed"),
|
||||||
IsImportCycle: true,
|
IsImportCycle: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1228,7 +1296,7 @@ func disallowInternal(srcDir string, importer *Package, importerPath string, p *
|
||||||
perr := *p
|
perr := *p
|
||||||
perr.Error = &PackageError{
|
perr.Error = &PackageError{
|
||||||
ImportStack: stk.Copy(),
|
ImportStack: stk.Copy(),
|
||||||
Err: "use of internal package " + p.ImportPath + " not allowed",
|
Err: ImportErrorf(p.ImportPath, "use of internal package "+p.ImportPath+" not allowed"),
|
||||||
}
|
}
|
||||||
perr.Incomplete = true
|
perr.Incomplete = true
|
||||||
return &perr
|
return &perr
|
||||||
|
|
@ -1275,7 +1343,7 @@ func disallowVendor(srcDir string, importer *Package, importerPath, path string,
|
||||||
perr := *p
|
perr := *p
|
||||||
perr.Error = &PackageError{
|
perr.Error = &PackageError{
|
||||||
ImportStack: stk.Copy(),
|
ImportStack: stk.Copy(),
|
||||||
Err: "must be imported as " + path[i+len("vendor/"):],
|
Err: ImportErrorf(path, "%s must be imported as %s", path, path[i+len("vendor/"):]),
|
||||||
}
|
}
|
||||||
perr.Incomplete = true
|
perr.Incomplete = true
|
||||||
return &perr
|
return &perr
|
||||||
|
|
@ -1329,7 +1397,7 @@ func disallowVendorVisibility(srcDir string, p *Package, stk *ImportStack) *Pack
|
||||||
perr := *p
|
perr := *p
|
||||||
perr.Error = &PackageError{
|
perr.Error = &PackageError{
|
||||||
ImportStack: stk.Copy(),
|
ImportStack: stk.Copy(),
|
||||||
Err: "use of vendored package not allowed",
|
Err: errors.New("use of vendored package not allowed"),
|
||||||
}
|
}
|
||||||
perr.Incomplete = true
|
perr.Incomplete = true
|
||||||
return &perr
|
return &perr
|
||||||
|
|
@ -1455,7 +1523,7 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) {
|
||||||
err = base.ExpandScanner(err)
|
err = base.ExpandScanner(err)
|
||||||
p.Error = &PackageError{
|
p.Error = &PackageError{
|
||||||
ImportStack: stk.Copy(),
|
ImportStack: stk.Copy(),
|
||||||
Err: err.Error(),
|
Err: err,
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -1472,7 +1540,7 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) {
|
||||||
// Report an error when the old code.google.com/p/go.tools paths are used.
|
// Report an error when the old code.google.com/p/go.tools paths are used.
|
||||||
if InstallTargetDir(p) == StalePath {
|
if InstallTargetDir(p) == StalePath {
|
||||||
newPath := strings.Replace(p.ImportPath, "code.google.com/p/go.", "golang.org/x/", 1)
|
newPath := strings.Replace(p.ImportPath, "code.google.com/p/go.", "golang.org/x/", 1)
|
||||||
e := fmt.Sprintf("the %v command has moved; use %v instead.", p.ImportPath, newPath)
|
e := ImportErrorf(p.ImportPath, "the %v command has moved; use %v instead.", p.ImportPath, newPath)
|
||||||
p.Error = &PackageError{Err: e}
|
p.Error = &PackageError{Err: e}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -1585,7 +1653,7 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) {
|
||||||
if f1 != "" {
|
if f1 != "" {
|
||||||
p.Error = &PackageError{
|
p.Error = &PackageError{
|
||||||
ImportStack: stk.Copy(),
|
ImportStack: stk.Copy(),
|
||||||
Err: fmt.Sprintf("case-insensitive file name collision: %q and %q", f1, f2),
|
Err: fmt.Errorf("case-insensitive file name collision: %q and %q", f1, f2),
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -1601,7 +1669,7 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) {
|
||||||
if !SafeArg(file) || strings.HasPrefix(file, "_cgo_") {
|
if !SafeArg(file) || strings.HasPrefix(file, "_cgo_") {
|
||||||
p.Error = &PackageError{
|
p.Error = &PackageError{
|
||||||
ImportStack: stk.Copy(),
|
ImportStack: stk.Copy(),
|
||||||
Err: fmt.Sprintf("invalid input file name %q", file),
|
Err: fmt.Errorf("invalid input file name %q", file),
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -1609,14 +1677,14 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) {
|
||||||
if name := pathpkg.Base(p.ImportPath); !SafeArg(name) {
|
if name := pathpkg.Base(p.ImportPath); !SafeArg(name) {
|
||||||
p.Error = &PackageError{
|
p.Error = &PackageError{
|
||||||
ImportStack: stk.Copy(),
|
ImportStack: stk.Copy(),
|
||||||
Err: fmt.Sprintf("invalid input directory name %q", name),
|
Err: fmt.Errorf("invalid input directory name %q", name),
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !SafeArg(p.ImportPath) {
|
if !SafeArg(p.ImportPath) {
|
||||||
p.Error = &PackageError{
|
p.Error = &PackageError{
|
||||||
ImportStack: stk.Copy(),
|
ImportStack: stk.Copy(),
|
||||||
Err: fmt.Sprintf("invalid import path %q", p.ImportPath),
|
Err: ImportErrorf(p.ImportPath, "invalid import path %q", p.ImportPath),
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -1662,31 +1730,31 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) {
|
||||||
// code; see issue #16050).
|
// code; see issue #16050).
|
||||||
}
|
}
|
||||||
|
|
||||||
setError := func(msg string) {
|
setError := func(err error) {
|
||||||
p.Error = &PackageError{
|
p.Error = &PackageError{
|
||||||
ImportStack: stk.Copy(),
|
ImportStack: stk.Copy(),
|
||||||
Err: msg,
|
Err: err,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The gc toolchain only permits C source files with cgo or SWIG.
|
// The gc toolchain only permits C source files with cgo or SWIG.
|
||||||
if len(p.CFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() && cfg.BuildContext.Compiler == "gc" {
|
if len(p.CFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() && cfg.BuildContext.Compiler == "gc" {
|
||||||
setError(fmt.Sprintf("C source files not allowed when not using cgo or SWIG: %s", strings.Join(p.CFiles, " ")))
|
setError(fmt.Errorf("C source files not allowed when not using cgo or SWIG: %s", strings.Join(p.CFiles, " ")))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// C++, Objective-C, and Fortran source files are permitted only with cgo or SWIG,
|
// C++, Objective-C, and Fortran source files are permitted only with cgo or SWIG,
|
||||||
// regardless of toolchain.
|
// regardless of toolchain.
|
||||||
if len(p.CXXFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() {
|
if len(p.CXXFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() {
|
||||||
setError(fmt.Sprintf("C++ source files not allowed when not using cgo or SWIG: %s", strings.Join(p.CXXFiles, " ")))
|
setError(fmt.Errorf("C++ source files not allowed when not using cgo or SWIG: %s", strings.Join(p.CXXFiles, " ")))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if len(p.MFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() {
|
if len(p.MFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() {
|
||||||
setError(fmt.Sprintf("Objective-C source files not allowed when not using cgo or SWIG: %s", strings.Join(p.MFiles, " ")))
|
setError(fmt.Errorf("Objective-C source files not allowed when not using cgo or SWIG: %s", strings.Join(p.MFiles, " ")))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if len(p.FFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() {
|
if len(p.FFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() {
|
||||||
setError(fmt.Sprintf("Fortran source files not allowed when not using cgo or SWIG: %s", strings.Join(p.FFiles, " ")))
|
setError(fmt.Errorf("Fortran source files not allowed when not using cgo or SWIG: %s", strings.Join(p.FFiles, " ")))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1695,7 +1763,7 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) {
|
||||||
if other := foldPath[fold]; other == "" {
|
if other := foldPath[fold]; other == "" {
|
||||||
foldPath[fold] = p.ImportPath
|
foldPath[fold] = p.ImportPath
|
||||||
} else if other != p.ImportPath {
|
} else if other != p.ImportPath {
|
||||||
setError(fmt.Sprintf("case-insensitive import collision: %q and %q", p.ImportPath, other))
|
setError(ImportErrorf(p.ImportPath, "case-insensitive import collision: %q and %q", p.ImportPath, other))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2102,7 +2170,7 @@ func GoFilesPackage(gofiles []string) *Package {
|
||||||
pkg.Internal.CmdlineFiles = true
|
pkg.Internal.CmdlineFiles = true
|
||||||
pkg.Name = f
|
pkg.Name = f
|
||||||
pkg.Error = &PackageError{
|
pkg.Error = &PackageError{
|
||||||
Err: fmt.Sprintf("named files must be .go files: %s", pkg.Name),
|
Err: fmt.Errorf("named files must be .go files: %s", pkg.Name),
|
||||||
}
|
}
|
||||||
return pkg
|
return pkg
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -110,7 +110,7 @@ func TestPackagesAndErrors(p *Package, cover *TestCover) (pmain, ptest, pxtest *
|
||||||
// non-test copy of a package.
|
// non-test copy of a package.
|
||||||
ptestErr = &PackageError{
|
ptestErr = &PackageError{
|
||||||
ImportStack: testImportStack(stk[0], p1, p.ImportPath),
|
ImportStack: testImportStack(stk[0], p1, p.ImportPath),
|
||||||
Err: "import cycle not allowed in test",
|
Err: errors.New("import cycle not allowed in test"),
|
||||||
IsImportCycle: true,
|
IsImportCycle: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -271,7 +271,7 @@ func TestPackagesAndErrors(p *Package, cover *TestCover) (pmain, ptest, pxtest *
|
||||||
// afterward that gathers t.Cover information.
|
// afterward that gathers t.Cover information.
|
||||||
t, err := loadTestFuncs(ptest)
|
t, err := loadTestFuncs(ptest)
|
||||||
if err != nil && pmain.Error == nil {
|
if err != nil && pmain.Error == nil {
|
||||||
pmain.Error = &PackageError{Err: err.Error()}
|
pmain.Error = &PackageError{Err: err}
|
||||||
}
|
}
|
||||||
t.Cover = cover
|
t.Cover = cover
|
||||||
if len(ptest.GoFiles)+len(ptest.CgoFiles) > 0 {
|
if len(ptest.GoFiles)+len(ptest.CgoFiles) > 0 {
|
||||||
|
|
@ -322,7 +322,7 @@ func TestPackagesAndErrors(p *Package, cover *TestCover) (pmain, ptest, pxtest *
|
||||||
|
|
||||||
data, err := formatTestmain(t)
|
data, err := formatTestmain(t)
|
||||||
if err != nil && pmain.Error == nil {
|
if err != nil && pmain.Error == nil {
|
||||||
pmain.Error = &PackageError{Err: err.Error()}
|
pmain.Error = &PackageError{Err: err}
|
||||||
}
|
}
|
||||||
if data != nil {
|
if data != nil {
|
||||||
pmain.Internal.TestmainGo = &data
|
pmain.Internal.TestmainGo = &data
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// +build !js,!nacl,!plan9
|
// +build !js,!plan9
|
||||||
|
|
||||||
package filelock_test
|
package filelock_test
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,8 +2,8 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// js and nacl do not support inter-process file locking.
|
// js does not support inter-process file locking.
|
||||||
// +build !js,!nacl
|
// +build !js
|
||||||
|
|
||||||
package lockedfile_test
|
package lockedfile_test
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -59,19 +59,24 @@ func runVendor(cmd *base.Command, args []string) {
|
||||||
modpkgs[m] = append(modpkgs[m], pkg)
|
modpkgs[m] = append(modpkgs[m], pkg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
isExplicit := map[module.Version]bool{}
|
||||||
|
for _, r := range modload.ModFile().Require {
|
||||||
|
isExplicit[r.Mod] = true
|
||||||
|
}
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
for _, m := range modload.BuildList()[1:] {
|
for _, m := range modload.BuildList()[1:] {
|
||||||
if pkgs := modpkgs[m]; len(pkgs) > 0 {
|
if pkgs := modpkgs[m]; len(pkgs) > 0 || isExplicit[m] {
|
||||||
repl := ""
|
line := moduleLine(m, modload.Replacement(m))
|
||||||
if r := modload.Replacement(m); r.Path != "" {
|
buf.WriteString(line)
|
||||||
repl = " => " + r.Path
|
|
||||||
if r.Version != "" {
|
|
||||||
repl += " " + r.Version
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&buf, "# %s %s%s\n", m.Path, m.Version, repl)
|
|
||||||
if cfg.BuildV {
|
if cfg.BuildV {
|
||||||
fmt.Fprintf(os.Stderr, "# %s %s%s\n", m.Path, m.Version, repl)
|
os.Stderr.WriteString(line)
|
||||||
|
}
|
||||||
|
if isExplicit[m] {
|
||||||
|
buf.WriteString("## explicit\n")
|
||||||
|
if cfg.BuildV {
|
||||||
|
os.Stderr.WriteString("## explicit\n")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
sort.Strings(pkgs)
|
sort.Strings(pkgs)
|
||||||
for _, pkg := range pkgs {
|
for _, pkg := range pkgs {
|
||||||
|
|
@ -83,6 +88,24 @@ func runVendor(cmd *base.Command, args []string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Record unused and wildcard replacements at the end of the modules.txt file:
|
||||||
|
// without access to the complete build list, the consumer of the vendor
|
||||||
|
// directory can't otherwise determine that those replacements had no effect.
|
||||||
|
for _, r := range modload.ModFile().Replace {
|
||||||
|
if len(modpkgs[r.Old]) > 0 {
|
||||||
|
// We we already recorded this replacement in the entry for the replaced
|
||||||
|
// module with the packages it provides.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
line := moduleLine(r.Old, r.New)
|
||||||
|
buf.WriteString(line)
|
||||||
|
if cfg.BuildV {
|
||||||
|
os.Stderr.WriteString(line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if buf.Len() == 0 {
|
if buf.Len() == 0 {
|
||||||
fmt.Fprintf(os.Stderr, "go: no dependencies to vendor\n")
|
fmt.Fprintf(os.Stderr, "go: no dependencies to vendor\n")
|
||||||
return
|
return
|
||||||
|
|
@ -92,6 +115,26 @@ func runVendor(cmd *base.Command, args []string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func moduleLine(m, r module.Version) string {
|
||||||
|
b := new(strings.Builder)
|
||||||
|
b.WriteString("# ")
|
||||||
|
b.WriteString(m.Path)
|
||||||
|
if m.Version != "" {
|
||||||
|
b.WriteString(" ")
|
||||||
|
b.WriteString(m.Version)
|
||||||
|
}
|
||||||
|
if r.Path != "" {
|
||||||
|
b.WriteString(" => ")
|
||||||
|
b.WriteString(r.Path)
|
||||||
|
if r.Version != "" {
|
||||||
|
b.WriteString(" ")
|
||||||
|
b.WriteString(r.Version)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.WriteString("\n")
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
func vendorPkg(vdir, pkg string) {
|
func vendorPkg(vdir, pkg string) {
|
||||||
realPath := modload.ImportMap(pkg)
|
realPath := modload.ImportMap(pkg)
|
||||||
if realPath != pkg && modload.ImportMap(realPath) != "" {
|
if realPath != pkg && modload.ImportMap(realPath) != "" {
|
||||||
|
|
|
||||||
|
|
@ -566,6 +566,9 @@ func (f *File) SetRequire(req []*Require) {
|
||||||
var newLines []*Line
|
var newLines []*Line
|
||||||
for _, line := range stmt.Line {
|
for _, line := range stmt.Line {
|
||||||
if p, err := parseString(&line.Token[0]); err == nil && need[p] != "" {
|
if p, err := parseString(&line.Token[0]); err == nil && need[p] != "" {
|
||||||
|
if len(line.Comments.Before) == 1 && len(line.Comments.Before[0].Token) == 0 {
|
||||||
|
line.Comments.Before = line.Comments.Before[:0]
|
||||||
|
}
|
||||||
line.Token[1] = need[p]
|
line.Token[1] = need[p]
|
||||||
delete(need, p)
|
delete(need, p)
|
||||||
setIndirect(line, indirect[p])
|
setIndirect(line, indirect[p])
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,8 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"cmd/go/internal/module"
|
||||||
)
|
)
|
||||||
|
|
||||||
var addRequireTests = []struct {
|
var addRequireTests = []struct {
|
||||||
|
|
@ -59,6 +61,40 @@ var addRequireTests = []struct {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var setRequireTests = []struct {
|
||||||
|
in string
|
||||||
|
mods []struct {
|
||||||
|
path string
|
||||||
|
vers string
|
||||||
|
}
|
||||||
|
out string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
`module m
|
||||||
|
require (
|
||||||
|
x.y/b v1.2.3
|
||||||
|
|
||||||
|
x.y/a v1.2.3
|
||||||
|
)
|
||||||
|
`,
|
||||||
|
[]struct {
|
||||||
|
path string
|
||||||
|
vers string
|
||||||
|
}{
|
||||||
|
{"x.y/a", "v1.2.3"},
|
||||||
|
{"x.y/b", "v1.2.3"},
|
||||||
|
{"x.y/c", "v1.2.3"},
|
||||||
|
},
|
||||||
|
`module m
|
||||||
|
require (
|
||||||
|
x.y/a v1.2.3
|
||||||
|
x.y/b v1.2.3
|
||||||
|
x.y/c v1.2.3
|
||||||
|
)
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
func TestAddRequire(t *testing.T) {
|
func TestAddRequire(t *testing.T) {
|
||||||
for i, tt := range addRequireTests {
|
for i, tt := range addRequireTests {
|
||||||
t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
|
t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
|
||||||
|
|
@ -88,3 +124,40 @@ func TestAddRequire(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSetRequire(t *testing.T) {
|
||||||
|
for i, tt := range setRequireTests {
|
||||||
|
t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
|
||||||
|
f, err := Parse("in", []byte(tt.in), nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
g, err := Parse("out", []byte(tt.out), nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
golden, err := g.Format()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
var mods []*Require
|
||||||
|
for _, mod := range tt.mods {
|
||||||
|
mods = append(mods, &Require{
|
||||||
|
Mod: module.Version{
|
||||||
|
Path: mod.path,
|
||||||
|
Version: mod.vers,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
f.SetRequire(mods)
|
||||||
|
out, err := f.Format()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(out, golden) {
|
||||||
|
t.Errorf("have:\n%s\nwant:\n%s", out, golden)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -284,6 +284,10 @@ func runGet(cmd *base.Command, args []string) {
|
||||||
// what was requested.
|
// what was requested.
|
||||||
modload.DisallowWriteGoMod()
|
modload.DisallowWriteGoMod()
|
||||||
|
|
||||||
|
// Allow looking up modules for import paths outside of a module.
|
||||||
|
// 'go get' is expected to do this, unlike other commands.
|
||||||
|
modload.AllowMissingModuleImports()
|
||||||
|
|
||||||
// Parse command-line arguments and report errors. The command-line
|
// Parse command-line arguments and report errors. The command-line
|
||||||
// arguments are of the form path@version or simply path, with implicit
|
// arguments are of the form path@version or simply path, with implicit
|
||||||
// @upgrade. path@none is "downgrade away".
|
// @upgrade. path@none is "downgrade away".
|
||||||
|
|
@ -354,6 +358,10 @@ func runGet(cmd *base.Command, args []string) {
|
||||||
// upgrade golang.org/x/tools.
|
// upgrade golang.org/x/tools.
|
||||||
|
|
||||||
case path == "all":
|
case path == "all":
|
||||||
|
// If there is no main module, "all" is not meaningful.
|
||||||
|
if !modload.HasModRoot() {
|
||||||
|
base.Errorf(`go get %s: cannot match "all": working directory is not part of a module`, arg)
|
||||||
|
}
|
||||||
// Don't query modules until we load packages. We'll automatically
|
// Don't query modules until we load packages. We'll automatically
|
||||||
// look up any missing modules.
|
// look up any missing modules.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -120,7 +120,8 @@ func moduleInfo(m module.Version, fromBuildList bool) *modinfo.ModulePublic {
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.BuildMod == "vendor" {
|
if cfg.BuildMod == "vendor" {
|
||||||
info.Dir = filepath.Join(ModRoot(), "vendor", m.Path)
|
// The vendor directory doesn't contain enough information to reconstruct
|
||||||
|
// anything more about the module.
|
||||||
return info
|
return info
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cmd/go/internal/cfg"
|
"cmd/go/internal/cfg"
|
||||||
|
"cmd/go/internal/load"
|
||||||
"cmd/go/internal/modfetch"
|
"cmd/go/internal/modfetch"
|
||||||
"cmd/go/internal/module"
|
"cmd/go/internal/module"
|
||||||
"cmd/go/internal/par"
|
"cmd/go/internal/par"
|
||||||
|
|
@ -25,32 +26,38 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type ImportMissingError struct {
|
type ImportMissingError struct {
|
||||||
ImportPath string
|
Path string
|
||||||
Module module.Version
|
Module module.Version
|
||||||
QueryErr error
|
QueryErr error
|
||||||
|
|
||||||
// newMissingVersion is set to a newer version of Module if one is present
|
// newMissingVersion is set to a newer version of Module if one is present
|
||||||
// in the build list. When set, we can't automatically upgrade.
|
// in the build list. When set, we can't automatically upgrade.
|
||||||
newMissingVersion string
|
newMissingVersion string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var _ load.ImportPathError = (*ImportMissingError)(nil)
|
||||||
|
|
||||||
func (e *ImportMissingError) Error() string {
|
func (e *ImportMissingError) Error() string {
|
||||||
if e.Module.Path == "" {
|
if e.Module.Path == "" {
|
||||||
if str.HasPathPrefix(e.ImportPath, "cmd") {
|
if str.HasPathPrefix(e.Path, "cmd") {
|
||||||
return fmt.Sprintf("package %s is not in GOROOT (%s)", e.ImportPath, filepath.Join(cfg.GOROOT, "src", e.ImportPath))
|
return fmt.Sprintf("package %s is not in GOROOT (%s)", e.Path, filepath.Join(cfg.GOROOT, "src", e.Path))
|
||||||
}
|
}
|
||||||
if e.QueryErr != nil {
|
if e.QueryErr != nil {
|
||||||
return fmt.Sprintf("cannot find module providing package %s: %v", e.ImportPath, e.QueryErr)
|
return fmt.Sprintf("cannot find module providing package %s: %v", e.Path, e.QueryErr)
|
||||||
}
|
}
|
||||||
return "cannot find module providing package " + e.ImportPath
|
return "cannot find module providing package " + e.Path
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("missing module for import: %s@%s provides %s", e.Module.Path, e.Module.Version, e.ImportPath)
|
return fmt.Sprintf("missing module for import: %s@%s provides %s", e.Module.Path, e.Module.Version, e.Path)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *ImportMissingError) Unwrap() error {
|
func (e *ImportMissingError) Unwrap() error {
|
||||||
return e.QueryErr
|
return e.QueryErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *ImportMissingError) ImportPath() string {
|
||||||
|
return e.Path
|
||||||
|
}
|
||||||
|
|
||||||
// An AmbiguousImportError indicates an import of a package found in multiple
|
// An AmbiguousImportError indicates an import of a package found in multiple
|
||||||
// modules in the build list, or found in both the main module and its vendor
|
// modules in the build list, or found in both the main module and its vendor
|
||||||
// directory.
|
// directory.
|
||||||
|
|
@ -121,7 +128,7 @@ func Import(path string) (m module.Version, dir string, err error) {
|
||||||
return module.Version{}, dir, nil
|
return module.Version{}, dir, nil
|
||||||
}
|
}
|
||||||
if str.HasPathPrefix(path, "cmd") {
|
if str.HasPathPrefix(path, "cmd") {
|
||||||
return module.Version{}, "", &ImportMissingError{ImportPath: path}
|
return module.Version{}, "", &ImportMissingError{Path: path}
|
||||||
}
|
}
|
||||||
|
|
||||||
// -mod=vendor is special.
|
// -mod=vendor is special.
|
||||||
|
|
@ -139,7 +146,7 @@ func Import(path string) (m module.Version, dir string, err error) {
|
||||||
return Target, mainDir, nil
|
return Target, mainDir, nil
|
||||||
}
|
}
|
||||||
readVendorList()
|
readVendorList()
|
||||||
return vendorMap[path], vendorDir, nil
|
return vendorPkgModule[path], vendorDir, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check each module on the build list.
|
// Check each module on the build list.
|
||||||
|
|
@ -178,6 +185,12 @@ func Import(path string) (m module.Version, dir string, err error) {
|
||||||
if cfg.BuildMod == "readonly" {
|
if cfg.BuildMod == "readonly" {
|
||||||
return module.Version{}, "", fmt.Errorf("import lookup disabled by -mod=%s", cfg.BuildMod)
|
return module.Version{}, "", fmt.Errorf("import lookup disabled by -mod=%s", cfg.BuildMod)
|
||||||
}
|
}
|
||||||
|
if modRoot == "" && !allowMissingModuleImports {
|
||||||
|
return module.Version{}, "", &ImportMissingError{
|
||||||
|
Path: path,
|
||||||
|
QueryErr: errors.New("working directory is not part of a module"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Not on build list.
|
// Not on build list.
|
||||||
// To avoid spurious remote fetches, next try the latest replacement for each module.
|
// To avoid spurious remote fetches, next try the latest replacement for each module.
|
||||||
|
|
@ -220,7 +233,7 @@ func Import(path string) (m module.Version, dir string, err error) {
|
||||||
}
|
}
|
||||||
_, ok := dirInModule(path, m.Path, root, isLocal)
|
_, ok := dirInModule(path, m.Path, root, isLocal)
|
||||||
if ok {
|
if ok {
|
||||||
return m, "", &ImportMissingError{ImportPath: path, Module: m}
|
return m, "", &ImportMissingError{Path: path, Module: m}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -230,7 +243,7 @@ func Import(path string) (m module.Version, dir string, err error) {
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
// Return "cannot find module providing package […]" instead of whatever
|
// Return "cannot find module providing package […]" instead of whatever
|
||||||
// low-level error QueryPackage produced.
|
// low-level error QueryPackage produced.
|
||||||
return module.Version{}, "", &ImportMissingError{ImportPath: path, QueryErr: err}
|
return module.Version{}, "", &ImportMissingError{Path: path, QueryErr: err}
|
||||||
} else {
|
} else {
|
||||||
return module.Version{}, "", err
|
return module.Version{}, "", err
|
||||||
}
|
}
|
||||||
|
|
@ -255,7 +268,7 @@ func Import(path string) (m module.Version, dir string, err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return m, "", &ImportMissingError{ImportPath: path, Module: m, newMissingVersion: newMissingVersion}
|
return m, "", &ImportMissingError{Path: path, Module: m, newMissingVersion: newMissingVersion}
|
||||||
}
|
}
|
||||||
|
|
||||||
// maybeInModule reports whether, syntactically,
|
// maybeInModule reports whether, syntactically,
|
||||||
|
|
|
||||||
|
|
@ -44,6 +44,10 @@ var importTests = []struct {
|
||||||
func TestImport(t *testing.T) {
|
func TestImport(t *testing.T) {
|
||||||
testenv.MustHaveExternalNetwork(t)
|
testenv.MustHaveExternalNetwork(t)
|
||||||
testenv.MustHaveExecPath(t, "git")
|
testenv.MustHaveExecPath(t, "git")
|
||||||
|
defer func(old bool) {
|
||||||
|
allowMissingModuleImports = old
|
||||||
|
}(allowMissingModuleImports)
|
||||||
|
AllowMissingModuleImports()
|
||||||
|
|
||||||
for _, tt := range importTests {
|
for _, tt := range importTests {
|
||||||
t.Run(strings.ReplaceAll(tt.path, "/", "_"), func(t *testing.T) {
|
t.Run(strings.ReplaceAll(tt.path, "/", "_"), func(t *testing.T) {
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue