[dev.typeparams] merge master into dev.typeparams

Change-Id: I0e56b7b659ac84e14121325c560a242554196808
This commit is contained in:
Rob Findley 2020-11-03 12:31:40 -05:00
commit 165ceb09f9
641 changed files with 44069 additions and 23186 deletions

View file

@ -1,3 +1,5 @@
pkg archive/zip, method (*ReadCloser) Open(string) (fs.File, error)
pkg archive/zip, method (*Reader) Open(string) (fs.File, error)
pkg debug/elf, const DT_ADDRRNGHI = 1879047935
pkg debug/elf, const DT_ADDRRNGHI DynTag
pkg debug/elf, const DT_ADDRRNGLO = 1879047680
@ -216,13 +218,25 @@ pkg debug/elf, const PT_SUNWSTACK = 1879048187
pkg debug/elf, const PT_SUNWSTACK ProgType
pkg debug/elf, const PT_SUNW_EH_FRAME = 1685382480
pkg debug/elf, const PT_SUNW_EH_FRAME ProgType
pkg embed, method (FS) Open(string) (fs.File, error)
pkg embed, method (FS) ReadDir(string) ([]fs.DirEntry, error)
pkg embed, method (FS) ReadFile(string) ([]uint8, error)
pkg embed, type FS struct
pkg flag, func Func(string, string, func(string) error)
pkg flag, method (*FlagSet) Func(string, string, func(string) error)
pkg go/build, type Package struct, EmbedPatterns []string
pkg go/build, type Package struct, IgnoredOtherFiles []string
pkg go/build, type Package struct, TestEmbedPatterns []string
pkg go/build, type Package struct, XTestEmbedPatterns []string
pkg html/template, func ParseFS(fs.FS, ...string) (*Template, error)
pkg html/template, method (*Template) ParseFS(fs.FS, ...string) (*Template, error)
pkg io, func NopCloser(Reader) ReadCloser
pkg io, func ReadAll(Reader) ([]uint8, error)
pkg io, type ReadSeekCloser interface { Close, Read, Seek }
pkg io, type ReadSeekCloser interface, Close() error
pkg io, type ReadSeekCloser interface, Read([]uint8) (int, error)
pkg io, type ReadSeekCloser interface, Seek(int64, int) (int64, error)
pkg io, var Discard Writer
pkg io/fs, const ModeAppend = 1073741824
pkg io/fs, const ModeAppend FileMode
pkg io/fs, const ModeCharDevice = 2097152
@ -253,6 +267,11 @@ pkg io/fs, const ModeTemporary = 268435456
pkg io/fs, const ModeTemporary FileMode
pkg io/fs, const ModeType = 2401763328
pkg io/fs, const ModeType FileMode
pkg io/fs, func Glob(FS, string) ([]string, error)
pkg io/fs, func ReadDir(FS, string) ([]DirEntry, error)
pkg io/fs, func ReadFile(FS, string) ([]uint8, error)
pkg io/fs, func Stat(FS, string) (FileInfo, error)
pkg io/fs, func ValidPath(string) bool
pkg io/fs, method (*PathError) Error() string
pkg io/fs, method (*PathError) Timeout() bool
pkg io/fs, method (*PathError) Unwrap() error
@ -261,6 +280,17 @@ pkg io/fs, method (FileMode) IsRegular() bool
pkg io/fs, method (FileMode) Perm() FileMode
pkg io/fs, method (FileMode) String() string
pkg io/fs, method (FileMode) Type() FileMode
pkg io/fs, type DirEntry interface { Info, IsDir, Name, Type }
pkg io/fs, type DirEntry interface, Info() (FileInfo, error)
pkg io/fs, type DirEntry interface, IsDir() bool
pkg io/fs, type DirEntry interface, Name() string
pkg io/fs, type DirEntry interface, Type() FileMode
pkg io/fs, type FS interface { Open }
pkg io/fs, type FS interface, Open(string) (File, error)
pkg io/fs, type File interface { Close, Read, Stat }
pkg io/fs, type File interface, Close() error
pkg io/fs, type File interface, Read([]uint8) (int, error)
pkg io/fs, type File interface, Stat() (FileInfo, error)
pkg io/fs, type FileInfo interface { IsDir, ModTime, Mode, Name, Size, Sys }
pkg io/fs, type FileInfo interface, IsDir() bool
pkg io/fs, type FileInfo interface, ModTime() time.Time
@ -269,16 +299,35 @@ pkg io/fs, type FileInfo interface, Name() string
pkg io/fs, type FileInfo interface, Size() int64
pkg io/fs, type FileInfo interface, Sys() interface{}
pkg io/fs, type FileMode uint32
pkg io/fs, type GlobFS interface { Glob, Open }
pkg io/fs, type GlobFS interface, Glob(string) ([]string, error)
pkg io/fs, type GlobFS interface, Open(string) (File, error)
pkg io/fs, type PathError struct
pkg io/fs, type PathError struct, Err error
pkg io/fs, type PathError struct, Op string
pkg io/fs, type PathError struct, Path string
pkg io/fs, type ReadDirFS interface { Open, ReadDir }
pkg io/fs, type ReadDirFS interface, Open(string) (File, error)
pkg io/fs, type ReadDirFS interface, ReadDir(string) ([]DirEntry, error)
pkg io/fs, type ReadDirFile interface { Close, Read, ReadDir, Stat }
pkg io/fs, type ReadDirFile interface, Close() error
pkg io/fs, type ReadDirFile interface, Read([]uint8) (int, error)
pkg io/fs, type ReadDirFile interface, ReadDir(int) ([]DirEntry, error)
pkg io/fs, type ReadDirFile interface, Stat() (FileInfo, error)
pkg io/fs, type ReadFileFS interface { Open, ReadFile }
pkg io/fs, type ReadFileFS interface, Open(string) (File, error)
pkg io/fs, type ReadFileFS interface, ReadFile(string) ([]uint8, error)
pkg io/fs, type StatFS interface { Open, Stat }
pkg io/fs, type StatFS interface, Open(string) (File, error)
pkg io/fs, type StatFS interface, Stat(string) (FileInfo, error)
pkg io/fs, var ErrClosed error
pkg io/fs, var ErrExist error
pkg io/fs, var ErrInvalid error
pkg io/fs, var ErrNotExist error
pkg io/fs, var ErrPermission error
pkg log, func Default() *Logger
pkg net, var ErrClosed error
pkg net/http, func FS(fs.FS) FileSystem
pkg net/http, type Transport struct, GetProxyConnectHeader func(context.Context, *url.URL, string) (Header, error)
pkg os, const ModeAppend fs.FileMode
pkg os, const ModeCharDevice fs.FileMode
@ -296,6 +345,7 @@ pkg os, const ModeSymlink fs.FileMode
pkg os, const ModeTemporary fs.FileMode
pkg os, const ModeType fs.FileMode
pkg os, func Chmod(string, fs.FileMode) error
pkg os, func DirFS(string) fs.FS
pkg os, func Lstat(string) (fs.FileInfo, error)
pkg os, func Mkdir(string, fs.FileMode) error
pkg os, func MkdirAll(string, fs.FileMode) error
@ -303,19 +353,84 @@ pkg os, func OpenFile(string, int, fs.FileMode) (*File, error)
pkg os, func SameFile(fs.FileInfo, fs.FileInfo) bool
pkg os, func Stat(string) (fs.FileInfo, error)
pkg os, method (*File) Chmod(fs.FileMode) error
pkg os, method (*File) ReadDir(int) ([]DirEntry, error)
pkg os, method (*File) ReadDir(int) ([]fs.DirEntry, error)
pkg os, method (*File) Readdir(int) ([]fs.FileInfo, error)
pkg os, method (*File) Stat() (fs.FileInfo, error)
pkg os, type DirEntry interface { Info, IsDir, Name, Type }
pkg os, type DirEntry interface, Info() (fs.FileInfo, error)
pkg os, type DirEntry interface, IsDir() bool
pkg os, type DirEntry interface, Name() string
pkg os, type DirEntry interface, Type() fs.FileMode
pkg os, type DirEntry = fs.DirEntry
pkg os, type FileInfo = fs.FileInfo
pkg os, type FileMode = fs.FileMode
pkg os, type PathError = fs.PathError
pkg os/signal, func NotifyContext(context.Context, ...os.Signal) (context.Context, context.CancelFunc)
pkg runtime/metrics, const KindBad = 0
pkg runtime/metrics, const KindBad ValueKind
pkg runtime/metrics, const KindFloat64 = 2
pkg runtime/metrics, const KindFloat64 ValueKind
pkg runtime/metrics, const KindFloat64Histogram = 3
pkg runtime/metrics, const KindFloat64Histogram ValueKind
pkg runtime/metrics, const KindUint64 = 1
pkg runtime/metrics, const KindUint64 ValueKind
pkg runtime/metrics, func All() []Description
pkg runtime/metrics, func Read([]Sample)
pkg runtime/metrics, method (Value) Float64() float64
pkg runtime/metrics, method (Value) Float64Histogram() *Float64Histogram
pkg runtime/metrics, method (Value) Kind() ValueKind
pkg runtime/metrics, method (Value) Uint64() uint64
pkg runtime/metrics, type Description struct
pkg runtime/metrics, type Description struct, Cumulative bool
pkg runtime/metrics, type Description struct, Description string
pkg runtime/metrics, type Description struct, Kind ValueKind
pkg runtime/metrics, type Description struct, Name string
pkg runtime/metrics, type Description struct, StopTheWorld bool
pkg runtime/metrics, type Float64Histogram struct
pkg runtime/metrics, type Float64Histogram struct, Buckets []float64
pkg runtime/metrics, type Float64Histogram struct, Counts []uint64
pkg runtime/metrics, type Sample struct
pkg runtime/metrics, type Sample struct, Name string
pkg runtime/metrics, type Sample struct, Value Value
pkg runtime/metrics, type Value struct
pkg runtime/metrics, type ValueKind int
pkg syscall (linux-386), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-386), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-386), func Setegid(int) error
pkg syscall (linux-386), func Seteuid(int) error
pkg syscall (linux-386-cgo), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-386-cgo), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-386-cgo), func Setegid(int) error
pkg syscall (linux-386-cgo), func Seteuid(int) error
pkg syscall (linux-amd64), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-amd64), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-amd64), func Setegid(int) error
pkg syscall (linux-amd64), func Seteuid(int) error
pkg syscall (linux-amd64-cgo), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-amd64-cgo), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-amd64-cgo), func Setegid(int) error
pkg syscall (linux-amd64-cgo), func Seteuid(int) error
pkg syscall (linux-arm), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-arm), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-arm), func Setegid(int) error
pkg syscall (linux-arm), func Seteuid(int) error
pkg syscall (linux-arm-cgo), func AllThreadsSyscall(uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-arm-cgo), func AllThreadsSyscall6(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno)
pkg syscall (linux-arm-cgo), func Setegid(int) error
pkg syscall (linux-arm-cgo), func Seteuid(int) error
pkg syscall (windows-386), func RtlGenRandom(*uint8, uint32) error
pkg syscall (windows-amd64), func RtlGenRandom(*uint8, uint32) error
pkg testing/fstest, func TestFS(fs.FS, ...string) error
pkg testing/fstest, method (MapFS) Glob(string) ([]string, error)
pkg testing/fstest, method (MapFS) Open(string) (fs.File, error)
pkg testing/fstest, method (MapFS) ReadDir(string) ([]fs.DirEntry, error)
pkg testing/fstest, method (MapFS) ReadFile(string) ([]uint8, error)
pkg testing/fstest, method (MapFS) Stat(string) (fs.FileInfo, error)
pkg testing/fstest, type MapFS map[string]*MapFile
pkg testing/fstest, type MapFile struct
pkg testing/fstest, type MapFile struct, Data []uint8
pkg testing/fstest, type MapFile struct, ModTime time.Time
pkg testing/fstest, type MapFile struct, Mode fs.FileMode
pkg testing/fstest, type MapFile struct, Sys interface{}
pkg testing/iotest, func ErrReader(error) io.Reader
pkg testing/iotest, func TestReader(io.Reader, []uint8) error
pkg text/template, func ParseFS(fs.FS, ...string) (*Template, error)
pkg text/template, method (*Template) ParseFS(fs.FS, ...string) (*Template, error)
pkg text/template/parse, const NodeComment = 20
pkg text/template/parse, const NodeComment NodeType
pkg text/template/parse, const ParseComments = 1

View file

@ -947,10 +947,18 @@ The Gerrit voting system involves an integer in the range -2 to +2:
</li>
</ul>
<p>
At least two maintainers must approve of the change, and at least one
of those maintainers must +2 the change.
The second maintainer may cast a vote of Trust+1, meaning that the
change looks basically OK, but that the maintainer hasn't done the
detailed review required for a +2 vote.
</p>
<h3 id="submit">Submitting an approved change</h3>
<p>
After the code has been +2'ed, an approver will
After the code has been +2'ed and Trust+1'ed, an approver will
apply it to the master branch using the Gerrit user interface.
This is called "submitting the change".
</p>

View file

@ -131,6 +131,16 @@ Do not send CLs removing the interior tags from such phrases.
being built.
</p>
<h4 id="list-buildid">The <code>list</code> command</h4>
<p><!-- golang.org/cl/263542 -->
When the <code>-export</code> flag is specified, the <code>BuildID</code>
field is now set to the build ID of the compiled package. This is equivalent
to running <code>go</code> <code>tool</code> <code>buildid</code> on
<code>go</code> <code>list</code> <code>-exported</code> <code>-f</code> <code>{{.Export}</code>,
but without the extra step.
</p>
<h3 id="cgo">Cgo</h3>
<p> <!-- CL 252378 -->
@ -151,6 +161,18 @@ Do not send CLs removing the interior tags from such phrases.
TODO
</p>
<p><!-- CL 267100 -->
On Linux, the runtime now defaults to releasing memory to the
operating system promptly (using <code>MADV_DONTNEED</code>), rather
than lazily when the operating system is under memory pressure
(using <code>MADV_FREE</code>). This means process-level memory
statistics like RSS will more accurately reflect the amount of
physical memory being used by Go processes. Systems that are
currently using <code>GODEBUG=madvdontneed=1</code> to improve
memory monitoring behavior no longer need to set this environment
variable.
</p>
<h2 id="compiler">Compiler</h2>
<p>
@ -182,7 +204,10 @@ Do not send CLs removing the interior tags from such phrases.
TODO: update with final numbers later in the release.
</p>
<!-- CL 255259: https://golang.org/cl/255259: cmd/link: enable ASLR on windows binaries built with -buildmode=c-shared -->
<p> <!-- CL 255259 -->
On Windows, <code>go build -buildmode=c-shared</code> now generates Windows
ASLR DLLs by default. ASLR can be disabled with <code>--ldflags=-aslr=false</code>.
</p>
<h2 id="library">Core library</h2>
@ -251,15 +276,6 @@ Do not send CLs removing the interior tags from such phrases.
On Linux kernel version 4.1 and above, the maximum is now <code>4294967295</code>.
</p>
<h3 id="reflect"><a href="/pkg/reflect/">reflect</a></h3>
<p><!-- CL 259237, golang.org/issue/22075 -->
For interface types and values, <a href="/pkg/reflect/#Value.Method">Method</a>,
<a href="/pkg/reflect/#Value.MethodByName">MethodByName</a>, and
<a href="/pkg/reflect/#Value.NumMethod">NumMethod</a> now
operate on the interface's exported method set, rather than its full method set.
</p>
<h3 id="text/template/parse"><a href="/pkg/text/template/parse/">text/template/parse</a></h3>
<p><!-- CL 229398, golang.org/issue/34652 -->

View file

@ -1,6 +1,6 @@
<!--{
"Title": "The Go Programming Language Specification",
"Subtitle": "Version of Sep 24, 2020",
"Subtitle": "Version of Oct 7, 2020",
"Path": "/ref/spec"
}-->
@ -3594,23 +3594,33 @@ replaced by its left operand alone.
</p>
<pre>
var a [1024]byte
var s uint = 33
// The results of the following examples are given for 64-bit ints.
var i = 1&lt;&lt;s // 1 has type int
var j int32 = 1&lt;&lt;s // 1 has type int32; j == 0
var k = uint64(1&lt;&lt;s) // 1 has type uint64; k == 1&lt;&lt;33
var m int = 1.0&lt;&lt;s // 1.0 has type int; m == 0 if ints are 32bits in size
var n = 1.0&lt;&lt;s == j // 1.0 has type int32; n == true
var o = 1&lt;&lt;s == 2&lt;&lt;s // 1 and 2 have type int; o == true if ints are 32bits in size
var p = 1&lt;&lt;s == 1&lt;&lt;33 // illegal if ints are 32bits in size: 1 has type int, but 1&lt;&lt;33 overflows int
var m int = 1.0&lt;&lt;s // 1.0 has type int; m == 1&lt;&lt;33
var n = 1.0&lt;&lt;s == j // 1.0 has type int; n == true
var o = 1&lt;&lt;s == 2&lt;&lt;s // 1 and 2 have type int; o == false
var p = 1&lt;&lt;s == 1&lt;&lt;33 // 1 has type int; p == true
var u = 1.0&lt;&lt;s // illegal: 1.0 has type float64, cannot shift
var u1 = 1.0&lt;&lt;s != 0 // illegal: 1.0 has type float64, cannot shift
var u2 = 1&lt;&lt;s != 1.0 // illegal: 1 has type float64, cannot shift
var v float32 = 1&lt;&lt;s // illegal: 1 has type float32, cannot shift
var w int64 = 1.0&lt;&lt;33 // 1.0&lt;&lt;33 is a constant shift expression
var x = a[1.0&lt;&lt;s] // 1.0 has type int; x == a[0] if ints are 32bits in size
var a = make([]byte, 1.0&lt;&lt;s) // 1.0 has type int; len(a) == 0 if ints are 32bits in size
</pre>
var w int64 = 1.0&lt;&lt;33 // 1.0&lt;&lt;33 is a constant shift expression; w == 1&lt;&lt;33
var x = a[1.0&lt;&lt;s] // panics: 1.0 has type int, but 1&lt;&lt;33 overflows array bounds
var b = make([]byte, 1.0&lt;&lt;s) // 1.0 has type int; len(b) == 1&lt;&lt;33
// The results of the following examples are given for 32-bit ints,
// which means the shifts will overflow.
var mm int = 1.0&lt;&lt;s // 1.0 has type int; mm == 0
var oo = 1&lt;&lt;s == 2&lt;&lt;s // 1 and 2 have type int; oo == true
var pp = 1&lt;&lt;s == 1&lt;&lt;33 // illegal: 1 has type int, but 1&lt;&lt;33 overflows int
var xx = a[1.0&lt;&lt;s] // 1.0 has type int; xx == a[0]
var bb = make([]byte, 1.0&lt;&lt;s) // 1.0 has type int; len(bb) == 0
</pre>
<h4 id="Operator_precedence">Operator precedence</h4>
<p>

View file

@ -8,8 +8,8 @@
# Consult https://www.iana.org/time-zones for the latest versions.
# Versions to use.
CODE=2020b
DATA=2020b
CODE=2020d
DATA=2020d
set -e
rm -rf work

Binary file not shown.

View file

@ -181,7 +181,7 @@ func testCallbackCallers(t *testing.T) {
name := []string{
"runtime.cgocallbackg1",
"runtime.cgocallbackg",
"runtime.cgocallback_gofunc",
"runtime.cgocallback",
"runtime.asmcgocall",
"runtime.cgocall",
"test._Cfunc_callback",

View file

@ -15,5 +15,6 @@ func TestSetgid(t *testing.T) {
}
testSetgid(t)
}
func Test1435(t *testing.T) { test1435(t) }
func Test6997(t *testing.T) { test6997(t) }
func TestBuildID(t *testing.T) { testBuildID(t) }

View file

@ -76,6 +76,8 @@ func TestCheckConst(t *testing.T) { testCheckConst(t) }
func TestConst(t *testing.T) { testConst(t) }
func TestCthread(t *testing.T) { testCthread(t) }
func TestEnum(t *testing.T) { testEnum(t) }
func TestNamedEnum(t *testing.T) { testNamedEnum(t) }
func TestCastToEnum(t *testing.T) { testCastToEnum(t) }
func TestErrno(t *testing.T) { testErrno(t) }
func TestFpVar(t *testing.T) { testFpVar(t) }
func TestHelpers(t *testing.T) { testHelpers(t) }

152
misc/cgo/test/issue1435.go Normal file
View file

@ -0,0 +1,152 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux,cgo
package cgotest
import (
"fmt"
"io/ioutil"
"strings"
"syscall"
"testing"
)
// #include <stdio.h>
// #include <stdlib.h>
// #include <pthread.h>
// #include <unistd.h>
// #include <sys/types.h>
//
// pthread_t *t = NULL;
// pthread_mutex_t mu;
// int nts = 0;
// int all_done = 0;
//
// static void *aFn(void *vargp) {
// int done = 0;
// while (!done) {
// usleep(100);
// pthread_mutex_lock(&mu);
// done = all_done;
// pthread_mutex_unlock(&mu);
// }
// return NULL;
// }
//
// void trial(int argc) {
// int i;
// nts = argc;
// t = calloc(nts, sizeof(pthread_t));
// pthread_mutex_init(&mu, NULL);
// for (i = 0; i < nts; i++) {
// pthread_create(&t[i], NULL, aFn, NULL);
// }
// }
//
// void cleanup(void) {
// int i;
// pthread_mutex_lock(&mu);
// all_done = 1;
// pthread_mutex_unlock(&mu);
// for (i = 0; i < nts; i++) {
// pthread_join(t[i], NULL);
// }
// pthread_mutex_destroy(&mu);
// free(t);
// }
import "C"
// compareStatus is used to confirm the contents of the thread
// specific status files match expectations.
func compareStatus(filter, expect string) error {
expected := filter + "\t" + expect
pid := syscall.Getpid()
fs, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/task", pid))
if err != nil {
return fmt.Errorf("unable to find %d tasks: %v", pid, err)
}
for _, f := range fs {
tf := fmt.Sprintf("/proc/%s/status", f.Name())
d, err := ioutil.ReadFile(tf)
if err != nil {
return fmt.Errorf("unable to read %q: %v", tf, err)
}
lines := strings.Split(string(d), "\n")
for _, line := range lines {
if strings.HasPrefix(line, filter) {
if line != expected {
return fmt.Errorf("%s %s (bad)\n", tf, line)
}
break
}
}
}
return nil
}
// test1435 test 9 glibc implemented setuid/gid syscall functions are
// mapped. This test is a slightly more expansive test than that of
// src/syscall/syscall_linux_test.go:TestSetuidEtc() insofar as it
// launches concurrent threads from C code via CGo and validates that
// they are subject to the system calls being tested. For the actual
// Go functionality being tested here, the syscall_linux_test version
// is considered authoritative, but non-trivial improvements to that
// should be mirrored here.
func test1435(t *testing.T) {
if syscall.Getuid() != 0 {
t.Skip("skipping root only test")
}
// Launch some threads in C.
const cts = 5
C.trial(cts)
defer C.cleanup()
vs := []struct {
call string
fn func() error
filter, expect string
}{
{call: "Setegid(1)", fn: func() error { return syscall.Setegid(1) }, filter: "Gid:", expect: "0\t1\t0\t1"},
{call: "Setegid(0)", fn: func() error { return syscall.Setegid(0) }, filter: "Gid:", expect: "0\t0\t0\t0"},
{call: "Seteuid(1)", fn: func() error { return syscall.Seteuid(1) }, filter: "Uid:", expect: "0\t1\t0\t1"},
{call: "Setuid(0)", fn: func() error { return syscall.Setuid(0) }, filter: "Uid:", expect: "0\t0\t0\t0"},
{call: "Setgid(1)", fn: func() error { return syscall.Setgid(1) }, filter: "Gid:", expect: "1\t1\t1\t1"},
{call: "Setgid(0)", fn: func() error { return syscall.Setgid(0) }, filter: "Gid:", expect: "0\t0\t0\t0"},
{call: "Setgroups([]int{0,1,2,3})", fn: func() error { return syscall.Setgroups([]int{0, 1, 2, 3}) }, filter: "Groups:", expect: "0 1 2 3 "},
{call: "Setgroups(nil)", fn: func() error { return syscall.Setgroups(nil) }, filter: "Groups:", expect: " "},
{call: "Setgroups([]int{0})", fn: func() error { return syscall.Setgroups([]int{0}) }, filter: "Groups:", expect: "0 "},
{call: "Setregid(101,0)", fn: func() error { return syscall.Setregid(101, 0) }, filter: "Gid:", expect: "101\t0\t0\t0"},
{call: "Setregid(0,102)", fn: func() error { return syscall.Setregid(0, 102) }, filter: "Gid:", expect: "0\t102\t102\t102"},
{call: "Setregid(0,0)", fn: func() error { return syscall.Setregid(0, 0) }, filter: "Gid:", expect: "0\t0\t0\t0"},
{call: "Setreuid(1,0)", fn: func() error { return syscall.Setreuid(1, 0) }, filter: "Uid:", expect: "1\t0\t0\t0"},
{call: "Setreuid(0,2)", fn: func() error { return syscall.Setreuid(0, 2) }, filter: "Uid:", expect: "0\t2\t2\t2"},
{call: "Setreuid(0,0)", fn: func() error { return syscall.Setreuid(0, 0) }, filter: "Uid:", expect: "0\t0\t0\t0"},
{call: "Setresgid(101,0,102)", fn: func() error { return syscall.Setresgid(101, 0, 102) }, filter: "Gid:", expect: "101\t0\t102\t0"},
{call: "Setresgid(0,102,101)", fn: func() error { return syscall.Setresgid(0, 102, 101) }, filter: "Gid:", expect: "0\t102\t101\t102"},
{call: "Setresgid(0,0,0)", fn: func() error { return syscall.Setresgid(0, 0, 0) }, filter: "Gid:", expect: "0\t0\t0\t0"},
{call: "Setresuid(1,0,2)", fn: func() error { return syscall.Setresuid(1, 0, 2) }, filter: "Uid:", expect: "1\t0\t2\t0"},
{call: "Setresuid(0,2,1)", fn: func() error { return syscall.Setresuid(0, 2, 1) }, filter: "Uid:", expect: "0\t2\t1\t2"},
{call: "Setresuid(0,0,0)", fn: func() error { return syscall.Setresuid(0, 0, 0) }, filter: "Uid:", expect: "0\t0\t0\t0"},
}
for i, v := range vs {
if err := v.fn(); err != nil {
t.Errorf("[%d] %q failed: %v", i, v.call, err)
continue
}
if err := compareStatus(v.filter, v.expect); err != nil {
t.Errorf("[%d] %q comparison: %v", i, v.call, err)
}
}
}

View file

@ -1000,6 +1000,32 @@ func testEnum(t *testing.T) {
}
}
func testNamedEnum(t *testing.T) {
e := new(C.enum_E)
*e = C.Enum1
if *e != 1 {
t.Error("bad enum", C.Enum1)
}
*e = C.Enum2
if *e != 2 {
t.Error("bad enum", C.Enum2)
}
}
func testCastToEnum(t *testing.T) {
e := C.enum_E(C.Enum1)
if e != 1 {
t.Error("bad enum", C.Enum1)
}
e = C.enum_E(C.Enum2)
if e != 2 {
t.Error("bad enum", C.Enum2)
}
}
func testAtol(t *testing.T) {
l := Atol("123")
if l != 123 {

View file

@ -0,0 +1,31 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build riscv64
// +build !gccgo
#include "textflag.h"
TEXT ·RewindAndSetgid(SB),NOSPLIT|NOFRAME,$0-0
// Rewind stack pointer so anything that happens on the stack
// will clobber the test pattern created by the caller
ADD $(1024*8), X2
// Ask signaller to setgid
MOV $1, X5
FENCE
MOVW X5, ·Baton(SB)
FENCE
// Wait for setgid completion
loop:
FENCE
MOVW ·Baton(SB), X5
OR X6, X6, X6 // hint that we're in a spin loop
BNE ZERO, X5, loop
FENCE
// Restore stack
ADD $(-1024*8), X2
RET

View file

@ -7,6 +7,8 @@ package cshared_test
import (
"bytes"
"debug/elf"
"debug/pe"
"encoding/binary"
"flag"
"fmt"
"io/ioutil"
@ -355,6 +357,101 @@ func TestExportedSymbols(t *testing.T) {
}
}
func checkNumberOfExportedFunctionsWindows(t *testing.T, exportAllSymbols bool) {
const prog = `
package main
import "C"
//export GoFunc
func GoFunc() {
println(42)
}
//export GoFunc2
func GoFunc2() {
println(24)
}
func main() {
}
`
tmpdir := t.TempDir()
srcfile := filepath.Join(tmpdir, "test.go")
objfile := filepath.Join(tmpdir, "test.dll")
if err := ioutil.WriteFile(srcfile, []byte(prog), 0666); err != nil {
t.Fatal(err)
}
argv := []string{"build", "-buildmode=c-shared"}
if exportAllSymbols {
argv = append(argv, "-ldflags", "-extldflags=-Wl,--export-all-symbols")
}
argv = append(argv, "-o", objfile, srcfile)
out, err := exec.Command("go", argv...).CombinedOutput()
if err != nil {
t.Fatalf("build failure: %s\n%s\n", err, string(out))
}
f, err := pe.Open(objfile)
if err != nil {
t.Fatalf("pe.Open failed: %v", err)
}
defer f.Close()
section := f.Section(".edata")
if section == nil {
t.Fatalf(".edata section is not present")
}
// TODO: deduplicate this struct from cmd/link/internal/ld/pe.go
type IMAGE_EXPORT_DIRECTORY struct {
_ [2]uint32
_ [2]uint16
_ [2]uint32
NumberOfFunctions uint32
NumberOfNames uint32
_ [3]uint32
}
var e IMAGE_EXPORT_DIRECTORY
if err := binary.Read(section.Open(), binary.LittleEndian, &e); err != nil {
t.Fatalf("binary.Read failed: %v", err)
}
// Only the two exported functions and _cgo_dummy_export should be exported
expectedNumber := uint32(3)
if exportAllSymbols {
if e.NumberOfFunctions <= expectedNumber {
t.Fatalf("missing exported functions: %v", e.NumberOfFunctions)
}
if e.NumberOfNames <= expectedNumber {
t.Fatalf("missing exported names: %v", e.NumberOfNames)
}
} else {
if e.NumberOfFunctions != expectedNumber {
t.Fatalf("got %d exported functions; want %d", e.NumberOfFunctions, expectedNumber)
}
if e.NumberOfNames != expectedNumber {
t.Fatalf("got %d exported names; want %d", e.NumberOfNames, expectedNumber)
}
}
}
func TestNumberOfExportedFunctions(t *testing.T) {
if GOOS != "windows" {
t.Skip("skipping windows only test")
}
t.Parallel()
t.Run("OnlyExported", func(t *testing.T) {
checkNumberOfExportedFunctionsWindows(t, false)
})
t.Run("All", func(t *testing.T) {
checkNumberOfExportedFunctionsWindows(t, true)
})
}
// test1: shared library can be dynamically loaded and exported symbols are accessible.
func TestExportedSymbolsWithDynamicLoad(t *testing.T) {
t.Parallel()

View file

@ -503,6 +503,9 @@
}
async run(instance) {
if (!(instance instanceof WebAssembly.Instance)) {
throw new Error("Go.run: WebAssembly.Instance expected");
}
this._inst = instance;
this.mem = new DataView(this._inst.exports.mem.buffer);
this._values = [ // JS values that Go currently has references to, indexed by reference id

View file

@ -535,6 +535,9 @@ func archRISCV64() *Arch {
// Standard register names.
for i := riscv.REG_X0; i <= riscv.REG_X31; i++ {
if i == riscv.REG_G {
continue
}
name := fmt.Sprintf("X%d", i-riscv.REG_X0)
register[name] = int16(i)
}
@ -571,7 +574,7 @@ func archRISCV64() *Arch {
register["S8"] = riscv.REG_S8
register["S9"] = riscv.REG_S9
register["S10"] = riscv.REG_S10
register["S11"] = riscv.REG_S11
// Skip S11 as it is the g register.
register["T3"] = riscv.REG_T3
register["T4"] = riscv.REG_T4
register["T5"] = riscv.REG_T5

View file

@ -75,7 +75,7 @@ func IsARM64STLXR(op obj.As) bool {
arm64.ASTXP, arm64.ASTXPW, arm64.ASTLXP, arm64.ASTLXPW:
return true
}
// atomic instructions
// LDADDx/SWPx/CASx atomic instructions
if arm64.IsAtomicInstruction(op) {
return true
}
@ -93,6 +93,17 @@ func IsARM64TBL(op obj.As) bool {
return false
}
// IsARM64CASP reports whether the op (as defined by an arm64.A*
// constant) is one of the CASP-like instructions, and its 2nd
// destination is a register pair that require special handling.
func IsARM64CASP(op obj.As) bool {
switch op {
case arm64.ACASPD, arm64.ACASPW:
return true
}
return false
}
// ARM64Suffix handles the special suffix for the ARM64.
// It returns a boolean to indicate success; failure means
// cond was unrecognized.

View file

@ -637,6 +637,18 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
prog.From = a[0]
prog.SetFrom3(a[1])
prog.To = a[2]
case arch.IsARM64CASP(op):
prog.From = a[0]
prog.To = a[1]
// both 1st operand and 3rd operand are (Rs, Rs+1) register pair.
// And the register pair must be contiguous.
if (a[0].Type != obj.TYPE_REGREG) || (a[2].Type != obj.TYPE_REGREG) {
p.errorf("invalid addressing modes for 1st or 3rd operand to %s instruction, must be register pair", op)
return
}
// For ARM64 CASP-like instructions, its 2nd destination operand is register pair(Rt, Rt+1) that can
// not fit into prog.RegTo2, so save it to the prog.RestArgs.
prog.SetTo2(a[2])
default:
prog.From = a[0]
prog.Reg = p.getRegister(prog, op, &a[1])
@ -725,7 +737,7 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
}
if p.arch.Family == sys.AMD64 {
prog.From = a[0]
prog.RestArgs = []obj.Addr{a[1], a[2]}
prog.SetRestArgs([]obj.Addr{a[1], a[2]})
prog.To = a[3]
break
}
@ -808,13 +820,13 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
}
if p.arch.Family == sys.AMD64 {
prog.From = a[0]
prog.RestArgs = []obj.Addr{a[1], a[2], a[3]}
prog.SetRestArgs([]obj.Addr{a[1], a[2], a[3]})
prog.To = a[4]
break
}
if p.arch.Family == sys.S390X {
prog.From = a[0]
prog.RestArgs = []obj.Addr{a[1], a[2], a[3]}
prog.SetRestArgs([]obj.Addr{a[1], a[2], a[3]})
prog.To = a[4]
break
}

View file

@ -390,12 +390,7 @@ func TestARM64Errors(t *testing.T) {
}
func TestAMD64EndToEnd(t *testing.T) {
defer func(old string) { objabi.GOAMD64 = old }(objabi.GOAMD64)
for _, goamd64 := range []string{"normaljumps", "alignedjumps"} {
t.Logf("GOAMD64=%s", goamd64)
objabi.GOAMD64 = goamd64
testEndToEnd(t, "amd64", "amd64")
}
}
func Test386Encoder(t *testing.T) {

View file

@ -10,14 +10,8 @@
TEXT foo(SB), DUPOK|NOSPLIT, $-8
//
// ADD
//
// LTYPE1 imsr ',' spreg ',' reg
// {
// outcode($1, &$2, $4, &$6);
// }
// imsr comes from the old 7a, we only support immediates and registers
// arithmetic operations
ADDW $1, R2, R3
ADDW R1, R2, R3
ADDW R1, ZR, R3
@ -25,6 +19,13 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
ADD R1, R2, R3
ADD R1, ZR, R3
ADD $1, R2, R3
ADDW $1, R2
ADDW R1, R2
ADD $1, R2
ADD R1, R2
ADD R1>>11, R2
ADD R1<<22, R2
ADD R1->33, R2
ADD $0x000aaa, R2, R3 // ADD $2730, R2, R3 // 43a82a91
ADD $0x000aaa, R2 // ADD $2730, R2 // 42a82a91
ADD $0xaaa000, R2, R3 // ADD $11182080, R2, R3 // 43a86a91
@ -37,6 +38,10 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
SUB $0xaaa000, R2 // SUB $11182080, R2 // 42a86ad1
SUB $0xaaaaaa, R2, R3 // SUB $11184810, R2, R3 // 43a82ad163a86ad1
SUB $0xaaaaaa, R2 // SUB $11184810, R2 // 42a82ad142a86ad1
ADDW $0x60060, R2 // ADDW $393312, R2 // 4280011142804111
ADD $0x186a0, R2, R5 // ADD $100000, R2, R5 // 45801a91a5604091
SUB $0xe7791f700, R3, R1 // SUB $62135596800, R3, R1 // 1be09ed23bf2aef2db01c0f261001bcb
ADD $0x3fffffffc000, R5 // ADD $70368744161280, R5 // fb7f72b2a5001b8b
ADD R1>>11, R2, R3
ADD R1<<22, R2, R3
ADD R1->33, R2, R3
@ -59,6 +64,30 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
CMN R1.SXTX<<2, R10 // 5fe921ab
CMPW R2.UXTH<<3, R11 // 7f2d226b
CMNW R1.SXTB, R9 // 3f81212b
CMPW $0x60060, R2 // CMPW $393312, R2 // 1b0c8052db00a0725f001b6b
CMPW $40960, R0 // 1f284071
CMPW $27745, R2 // 3b8c8d525f001b6b
CMNW $0x3fffffc0, R2 // CMNW $1073741760, R2 // fb5f1a325f001b2b
CMPW $0xffff0, R1 // CMPW $1048560, R1 // fb3f1c323f001b6b
CMP $0xffffffffffa0, R3 // CMP $281474976710560, R3 // fb0b80921b00e0f27f001beb
CMP $0xf4240, R1 // CMP $1000000, R1 // 1b4888d2fb01a0f23f001beb
CMP $3343198598084851058, R3 // 5bae8ed2db8daef23badcdf2bbcce5f27f001beb
CMP $3, R2
CMP R1, R2
CMP R1->11, R2
CMP R1>>22, R2
CMP R1<<33, R2
CMP R22.SXTX, RSP // ffe336eb
CMP $0x22220000, RSP // CMP $572653568, RSP // 5b44a4d2ff633beb
CMPW $0x22220000, RSP // CMPW $572653568, RSP // 5b44a452ff633b6b
CCMN MI, ZR, R1, $4 // e44341ba
// MADD Rn,Rm,Ra,Rd
MADD R1, R2, R3, R4 // 6408019b
// CLS
CLSW R1, R2
CLS R1, R2
// fp/simd instructions.
VADDP V1.B16, V2.B16, V3.B16 // 43bc214e
VADDP V1.S4, V2.S4, V3.S4 // 43bca14e
VADDP V1.D2, V2.D2, V3.D2 // 43bce14e
@ -67,22 +96,6 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
VORR V5.B16, V4.B16, V3.B16 // 831ca54e
VADD V16.S4, V5.S4, V9.S4 // a984b04e
VEOR V0.B16, V1.B16, V0.B16 // 201c206e
SHA256H V9.S4, V3, V2 // 6240095e
SHA256H2 V9.S4, V4, V3 // 8350095e
SHA256SU0 V8.S4, V7.S4 // 0729285e
SHA256SU1 V6.S4, V5.S4, V7.S4 // a760065e
SHA1SU0 V11.S4, V8.S4, V6.S4 // 06310b5e
SHA1SU1 V5.S4, V1.S4 // a118285e
SHA1C V1.S4, V2, V3 // 4300015e
SHA1H V5, V4 // a408285e
SHA1M V8.S4, V7, V6 // e620085e
SHA1P V11.S4, V10, V9 // 49110b5e
SHA512H V2.D2, V1, V0 // 208062ce
SHA512H2 V4.D2, V3, V2 // 628464ce
SHA512SU0 V9.D2, V8.D2 // 2881c0ce
SHA512SU1 V7.D2, V6.D2, V5.D2 // c58867ce
VRAX1 V26.D2, V29.D2, V30.D2 // be8f7ace
VXAR $63, V27.D2, V21.D2, V26.D2 // bafe9bce
VADDV V0.S4, V0 // 00b8b14e
VMOVI $82, V0.B16 // 40e6024f
VUADDLV V6.B16, V6 // c638306e
@ -96,10 +109,6 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
VFMLS V1.D2, V12.D2, V1.D2 // 81cde14e
VFMLS V1.S2, V12.S2, V1.S2 // 81cda10e
VFMLS V1.S4, V12.S4, V1.S4 // 81cda14e
VPMULL V2.D1, V1.D1, V3.Q1 // 23e0e20e
VPMULL2 V2.D2, V1.D2, V4.Q1 // 24e0e24e
VPMULL V2.B8, V1.B8, V3.H8 // 23e0220e
VPMULL2 V2.B16, V1.B16, V4.H8 // 24e0224e
VEXT $4, V2.B8, V1.B8, V3.B8 // 2320022e
VEXT $8, V2.B16, V1.B16, V3.B16 // 2340026e
VRBIT V24.B16, V24.B16 // 185b606e
@ -125,6 +134,14 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
VSRI $8, V1.H8, V2.H8 // 2244186f
VSRI $2, V1.B8, V2.B8 // 22440e2f
VSRI $2, V1.B16, V2.B16 // 22440e6f
VSLI $7, V2.B16, V3.B16 // 43540f6f
VSLI $15, V3.H4, V4.H4 // 64541f2f
VSLI $31, V5.S4, V6.S4 // a6543f6f
VSLI $63, V7.D2, V8.D2 // e8547f6f
VUSRA $8, V2.B16, V3.B16 // 4314086f
VUSRA $16, V3.H4, V4.H4 // 6414102f
VUSRA $32, V5.S4, V6.S4 // a614206f
VUSRA $64, V7.D2, V8.D2 // e814406f
VTBL V22.B16, [V28.B16, V29.B16], V11.B16 // 8b23164e
VTBL V18.B8, [V17.B16, V18.B16, V19.B16], V22.B8 // 3642120e
VTBL V31.B8, [V14.B16, V15.B16, V16.B16, V17.B16], V15.B8 // cf611f0e
@ -141,8 +158,6 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
VTBL V14.B16, [V3.B16, V4.B16, V5.B16], V17.B16 // 71400e4e
VTBL V13.B16, [V29.B16, V30.B16, V31.B16, V0.B16], V28.B16 // bc630d4e
VTBL V3.B8, [V27.B16], V8.B8 // 6803030e
VEOR3 V2.B16, V7.B16, V12.B16, V25.B16 // 990907ce
VBCAX V1.B16, V2.B16, V26.B16, V31.B16 // 5f0722ce
VZIP1 V16.H8, V3.H8, V19.H8 // 7338504e
VZIP2 V22.D2, V25.D2, V21.D2 // 357bd64e
VZIP1 V6.D2, V9.D2, V11.D2 // 2b39c64e
@ -180,105 +195,87 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
VUSHLL2 $31, V30.S4, V2.D2 // c2a73f6f
VBIF V0.B8, V30.B8, V1.B8 // c11fe02e
VBIF V30.B16, V0.B16, V2.B16 // 021cfe6e
MOVD (R2)(R6.SXTW), R4 // 44c866f8
MOVD (R3)(R6), R5 // MOVD (R3)(R6*1), R5 // 656866f8
MOVD (R2)(R6), R4 // MOVD (R2)(R6*1), R4 // 446866f8
MOVWU (R19)(R20<<2), R20 // 747a74b8
MOVD (R2)(R6<<3), R4 // 447866f8
MOVD (R3)(R7.SXTX<<3), R8 // 68f867f8
MOVWU (R5)(R4.UXTW), R10 // aa4864b8
MOVBU (R3)(R9.UXTW), R8 // 68486938
MOVBU (R5)(R8), R10 // MOVBU (R5)(R8*1), R10 // aa686838
MOVHU (R2)(R7.SXTW<<1), R11 // 4bd86778
MOVHU (R1)(R2<<1), R5 // 25786278
MOVB (R9)(R3.UXTW), R6 // 2649a338
MOVB (R10)(R6), R15 // MOVB (R10)(R6*1), R15 // 4f69a638
MOVH (R5)(R7.SXTX<<1), R19 // b3f8a778
MOVH (R8)(R4<<1), R10 // 0a79a478
MOVW (R9)(R8.SXTW<<2), R19 // 33d9a8b8
MOVW (R1)(R4.SXTX), R11 // 2be8a4b8
MOVW (R1)(R4.SXTX), ZR // 3fe8a4b8
MOVW (R2)(R5), R12 // MOVW (R2)(R5*1), R12 // 4c68a5b8
MOVD R5, (R2)(R6<<3) // 457826f8
MOVD R9, (R6)(R7.SXTX<<3) // c9f827f8
MOVD ZR, (R6)(R7.SXTX<<3) // dff827f8
MOVW R8, (R2)(R3.UXTW<<2) // 485823b8
MOVW R7, (R3)(R4.SXTW) // 67c824b8
MOVB R4, (R2)(R6.SXTX) // 44e82638
MOVB R8, (R3)(R9.UXTW) // 68482938
MOVB R10, (R5)(R8) // MOVB R10, (R5)(R8*1) // aa682838
MOVH R11, (R2)(R7.SXTW<<1) // 4bd82778
MOVH R5, (R1)(R2<<1) // 25782278
MOVH R7, (R2)(R5.SXTX<<1) // 47f82578
MOVH R8, (R3)(R6.UXTW) // 68482678
MOVB (R29)(R30<<0), R14 // ae7bbe38
MOVB (R29)(R30), R14 // MOVB (R29)(R30*1), R14 // ae6bbe38
MOVB R4, (R2)(R6.SXTX) // 44e82638
FMOVS $(4.0), F0 // 0010221e
FMOVD $(4.0), F0 // 0010621e
FMOVS $(0.265625), F1 // 01302a1e
FMOVD $(0.1796875), F2 // 02f0681e
FMOVS $(0.96875), F3 // 03f02d1e
FMOVD $(28.0), F4 // 0490671e
VUADDW V9.B8, V12.H8, V14.H8 // 8e11292e
VUADDW V13.H4, V10.S4, V11.S4 // 4b116d2e
VUADDW V21.S2, V24.D2, V29.D2 // 1d13b52e
VUADDW2 V9.B16, V12.H8, V14.H8 // 8e11296e
VUADDW2 V13.H8, V20.S4, V30.S4 // 9e126d6e
VUADDW2 V21.S4, V24.D2, V29.D2 // 1d13b56e
FCCMPS LT, F1, F2, $1 // 41b4211e
FMADDS F1, F3, F2, F4 // 440c011f
FMADDD F4, F5, F4, F4 // 8414441f
FMSUBS F13, F21, F13, F19 // b3d50d1f
FMSUBD F11, F7, F15, F31 // ff9d4b1f
FNMADDS F1, F3, F2, F4 // 440c211f
FNMADDD F1, F3, F2, F4 // 440c611f
FNMSUBS F1, F3, F2, F4 // 448c211f
FNMSUBD F1, F3, F2, F4 // 448c611f
FADDS F2, F3, F4 // 6428221e
FADDD F1, F2 // 4228611e
VDUP V19.S[0], V17.S4 // 7106044e
// move a large constant to a Vd.
VMOVS $0x80402010, V11 // VMOVS $2151686160, V11
VMOVD $0x8040201008040201, V20 // VMOVD $-9205322385119247871, V20
VMOVQ $0x7040201008040201, $0x8040201008040201, V10 // VMOVQ $8088500183983456769, $-9205322385119247871, V10
VMOVQ $0x8040201008040202, $0x7040201008040201, V20 // VMOVQ $-9205322385119247870, $8088500183983456769, V20
FMOVS (R2)(R6), F4 // FMOVS (R2)(R6*1), F4 // 446866bc
FMOVS (R2)(R6<<2), F4 // 447866bc
FMOVD (R2)(R6), F4 // FMOVD (R2)(R6*1), F4 // 446866fc
FMOVD (R2)(R6<<3), F4 // 447866fc
FMOVS F4, (R2)(R6) // FMOVS F4, (R2)(R6*1) // 446826bc
FMOVS F4, (R2)(R6<<2) // 447826bc
FMOVD F4, (R2)(R6) // FMOVD F4, (R2)(R6*1) // 446826fc
FMOVD F4, (R2)(R6<<3) // 447826fc
// special
PRFM (R2), PLDL1KEEP // 400080f9
PRFM 16(R2), PLDL1KEEP // 400880f9
PRFM 48(R6), PSTL2STRM // d31880f9
PRFM 8(R12), PLIL3STRM // 8d0580f9
PRFM (R8), $25 // 190180f9
PRFM 8(R9), $30 // 3e0580f9
NOOP // 1f2003d5
HINT $0 // 1f2003d5
DMB $1
SVC
CMPW $40960, R0 // 1f284071
CMPW $27745, R2 // 3b8c8d525f001b6b
CMNW $0x3fffffc0, R2 // CMNW $1073741760, R2 // fb5f1a325f001b2b
CMPW $0xffff0, R1 // CMPW $1048560, R1 // fb3f1c323f001b6b
CMP $0xffffffffffa0, R3 // CMP $281474976710560, R3 // fb0b80921b00e0f27f001beb
CMP $0xf4240, R1 // CMP $1000000, R1 // 1b4888d2fb01a0f23f001beb
ADD $0x186a0, R2, R5 // ADD $100000, R2, R5 // 45801a91a5604091
SUB $0xe7791f700, R3, R1 // SUB $62135596800, R3, R1 // 1be09ed23bf2aef2db01c0f261001bcb
CMP $3343198598084851058, R3 // 5bae8ed2db8daef23badcdf2bbcce5f27f001beb
ADD $0x3fffffffc000, R5 // ADD $70368744161280, R5 // fb7f72b2a5001b8b
// LTYPE1 imsr ',' spreg ','
// {
// outcode($1, &$2, $4, &nullgen);
// }
// LTYPE1 imsr ',' reg
// {
// outcode($1, &$2, NREG, &$4);
// }
ADDW $1, R2
ADDW R1, R2
ADD $1, R2
ADD R1, R2
ADD R1>>11, R2
ADD R1<<22, R2
ADD R1->33, R2
AND R1@>33, R2
// encryption
SHA256H V9.S4, V3, V2 // 6240095e
SHA256H2 V9.S4, V4, V3 // 8350095e
SHA256SU0 V8.S4, V7.S4 // 0729285e
SHA256SU1 V6.S4, V5.S4, V7.S4 // a760065e
SHA1SU0 V11.S4, V8.S4, V6.S4 // 06310b5e
SHA1SU1 V5.S4, V1.S4 // a118285e
SHA1C V1.S4, V2, V3 // 4300015e
SHA1H V5, V4 // a408285e
SHA1M V8.S4, V7, V6 // e620085e
SHA1P V11.S4, V10, V9 // 49110b5e
SHA512H V2.D2, V1, V0 // 208062ce
SHA512H2 V4.D2, V3, V2 // 628464ce
SHA512SU0 V9.D2, V8.D2 // 2881c0ce
SHA512SU1 V7.D2, V6.D2, V5.D2 // c58867ce
VRAX1 V26.D2, V29.D2, V30.D2 // be8f7ace
VXAR $63, V27.D2, V21.D2, V26.D2 // bafe9bce
VPMULL V2.D1, V1.D1, V3.Q1 // 23e0e20e
VPMULL2 V2.D2, V1.D2, V4.Q1 // 24e0e24e
VPMULL V2.B8, V1.B8, V3.H8 // 23e0220e
VPMULL2 V2.B16, V1.B16, V4.H8 // 24e0224e
VEOR3 V2.B16, V7.B16, V12.B16, V25.B16 // 990907ce
VBCAX V1.B16, V2.B16, V26.B16, V31.B16 // 5f0722ce
VREV32 V5.B16, V5.B16 // a508206e
VREV64 V2.S2, V3.S2 // 4308a00e
VREV64 V2.S4, V3.S4 // 4308a04e
// logical ops
//
// make sure constants get encoded into an instruction when it could
AND R1@>33, R2
AND $(1<<63), R1 // AND $-9223372036854775808, R1 // 21004192
AND $(1<<63-1), R1 // AND $9223372036854775807, R1 // 21f84092
ORR $(1<<63), R1 // ORR $-9223372036854775808, R1 // 210041b2
ORR $(1<<63-1), R1 // ORR $9223372036854775807, R1 // 21f840b2
EOR $(1<<63), R1 // EOR $-9223372036854775808, R1 // 210041d2
EOR $(1<<63-1), R1 // EOR $9223372036854775807, R1 // 21f840d2
ANDW $0x3ff00000, R2 // ANDW $1072693248, R2 // 42240c12
BICW $0x3ff00000, R2 // BICW $1072693248, R2 // 42540212
ORRW $0x3ff00000, R2 // ORRW $1072693248, R2 // 42240c32
ORNW $0x3ff00000, R2 // ORNW $1072693248, R2 // 42540232
EORW $0x3ff00000, R2 // EORW $1072693248, R2 // 42240c52
EONW $0x3ff00000, R2 // EONW $1072693248, R2 // 42540252
AND $0x22220000, R3, R4 // AND $572653568, R3, R4 // 5b44a4d264001b8a
ORR $0x22220000, R3, R4 // ORR $572653568, R3, R4 // 5b44a4d264001baa
EOR $0x22220000, R3, R4 // EOR $572653568, R3, R4 // 5b44a4d264001bca
@ -287,7 +284,6 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
EON $0x22220000, R3, R4 // EON $572653568, R3, R4 // 5b44a4d264003bca
ANDS $0x22220000, R3, R4 // ANDS $572653568, R3, R4 // 5b44a4d264001bea
BICS $0x22220000, R3, R4 // BICS $572653568, R3, R4 // 5b44a4d264003bea
EOR $0xe03fffffffffffff, R20, R22 // EOR $-2287828610704211969, R20, R22 // 96e243d2
TSTW $0x600000006, R1 // TSTW $25769803782, R1 // 3f041f72
TST $0x4900000049, R0 // TST $313532612681, R0 // 3b0980d23b09c0f21f001bea
@ -316,19 +312,22 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
EONW $0x6006000060060, R5 // EONW $1689262177517664, R5 // 1b0c8052db00a072a5003b4a
ORNW $0x6006000060060, R5 // ORNW $1689262177517664, R5 // 1b0c8052db00a072a5003b2a
BICSW $0x6006000060060, R5 // BICSW $1689262177517664, R5 // 1b0c8052db00a072a5003b6a
ADDW $0x60060, R2 // ADDW $393312, R2 // 4280011142804111
CMPW $0x60060, R2 // CMPW $393312, R2 // 1b0c8052db00a0725f001b6b
// TODO: this could have better encoding
ANDW $-1, R10 // 1b0080124a011b0a
AND $8, R0, RSP // 1f007d92
ORR $8, R0, RSP // 1f007db2
EOR $8, R0, RSP // 1f007dd2
BIC $8, R0, RSP // 1ff87c92
ORN $8, R0, RSP // 1ff87cb2
EON $8, R0, RSP // 1ff87cd2
TST $15, R2 // 5f0c40f2
TST R1, R2 // 5f0001ea
TST R1->11, R2 // 5f2c81ea
TST R1>>22, R2 // 5f5841ea
TST R1<<33, R2 // 5f8401ea
TST $0x22220000, R3 // TST $572653568, R3 // 5b44a4d27f001bea
// move an immediate to a Rn.
MOVD $0x3fffffffc000, R0 // MOVD $70368744161280, R0 // e07f72b2
MOVW $1000000, R4 // 04488852e401a072
MOVW $0xaaaa0000, R1 // MOVW $2863267840, R1 // 4155b552
@ -348,46 +347,37 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
MOVD $-1, R1 // 01008092
MOVD $0x210000, R0 // MOVD $2162688, R0 // 2004a0d2
MOVD $0xffffffffffffaaaa, R1 // MOVD $-21846, R1 // a1aa8a92
MOVW $1, ZR
MOVW $1, R1
MOVD $1, ZR
MOVD $1, R1
MOVK $1, R1
// move a large constant to a Vd.
VMOVS $0x80402010, V11 // VMOVS $2151686160, V11
VMOVD $0x8040201008040201, V20 // VMOVD $-9205322385119247871, V20
VMOVQ $0x7040201008040201, $0x8040201008040201, V10 // VMOVQ $8088500183983456769, $-9205322385119247871, V10
VMOVQ $0x8040201008040202, $0x7040201008040201, V20 // VMOVQ $-9205322385119247870, $8088500183983456769, V20
// mov(to/from sp)
MOVD $0x1002(RSP), R1 // MOVD $4098(RSP), R1 // fb074091610b0091
MOVD $0x1708(RSP), RSP // MOVD $5896(RSP), RSP // fb0740917f231c91
MOVD $0x2001(R7), R1 // MOVD $8193(R7), R1 // fb08409161070091
MOVD $0xffffff(R7), R1 // MOVD $16777215(R7), R1 // fbfc7f9161ff3f91
MOVD $-0x1(R7), R1 // MOVD $-1(R7), R1 // e10400d1
MOVD $-0x30(R7), R1 // MOVD $-48(R7), R1 // e1c000d1
MOVD $-0x708(R7), R1 // MOVD $-1800(R7), R1 // e1201cd1
MOVD $-0x2000(RSP), R1 // MOVD $-8192(RSP), R1 // e10b40d1
MOVD $-0x10000(RSP), RSP // MOVD $-65536(RSP), RSP // ff4340d1
//
// CLS
//
// LTYPE2 imsr ',' reg
// {
// outcode($1, &$2, NREG, &$4);
// }
CLSW R1, R2
CLS R1, R2
//
// MOV
//
// LTYPE3 addr ',' addr
// {
// outcode($1, &$2, NREG, &$4);
// }
MOVW R1, R2
MOVW ZR, R1
MOVW R1, ZR
MOVW $1, ZR
MOVW $1, R1
MOVW ZR, (R1)
MOVD R1, R2
MOVD ZR, R1
MOVD $1, ZR
MOVD $1, R1
MOVD ZR, (R1)
// store and load
//
// LD1/ST1
VLD1 (R8), [V1.B16, V2.B16] // 01a1404c
VLD1.P (R3), [V31.H8, V0.H8] // 7fa4df4c
VLD1.P (R8)(R20), [V21.B16, V22.B16] // VLD1.P (R8)(R20*1), [V21.B16,V22.B16] // 15a1d44c
@ -445,34 +435,21 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
VST4 [V22.D2, V23.D2, V24.D2, V25.D2], (R3) // 760c004c
VST4.P [V14.D2, V15.D2, V16.D2, V17.D2], 64(R15) // ee0d9f4c
VST4.P [V24.B8, V25.B8, V26.B8, V27.B8], (R3)(R23) // VST4.P [V24.B8, V25.B8, V26.B8, V27.B8], (R3)(R23*1) // 7800970c
FMOVS F20, (R0) // 140000bd
// pre/post-indexed
FMOVS.P F20, 4(R0) // 144400bc
FMOVS.W F20, 4(R0) // 144c00bc
FMOVS (R0), F20 // 140040bd
FMOVD.P F20, 8(R1) // 348400fc
FMOVQ.P F13, 11(R10) // 4db5803c
FMOVQ.W F15, 11(R20) // 8fbe803c
FMOVS.P 8(R0), F20 // 148440bc
FMOVS.W 8(R0), F20 // 148c40bc
FMOVD F20, (R2) // 540000fd
FMOVD.P F20, 8(R1) // 348400fc
FMOVD.W 8(R1), F20 // 348c40fc
PRFM (R2), PLDL1KEEP // 400080f9
PRFM 16(R2), PLDL1KEEP // 400880f9
PRFM 48(R6), PSTL2STRM // d31880f9
PRFM 8(R12), PLIL3STRM // 8d0580f9
PRFM (R8), $25 // 190180f9
PRFM 8(R9), $30 // 3e0580f9
FMOVQ.P 11(R10), F13 // 4db5c03c
FMOVQ.W 11(R20), F15 // 8fbec03c
// small offset fits into instructions
MOVB 1(R1), R2 // 22048039
MOVH 1(R1), R2 // 22108078
MOVH 2(R1), R2 // 22048079
MOVW 1(R1), R2 // 221080b8
MOVW 4(R1), R2 // 220480b9
MOVD 1(R1), R2 // 221040f8
MOVD 8(R1), R2 // 220440f9
FMOVS 1(R1), F2 // 221040bc
FMOVS 4(R1), F2 // 220440bd
FMOVD 1(R1), F2 // 221040fc
FMOVD 8(R1), F2 // 220440fd
// small offset fits into instructions
MOVB R1, 1(R2) // 41040039
MOVH R1, 1(R2) // 41100078
MOVH R1, 2(R2) // 41040079
@ -480,18 +457,37 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
MOVW R1, 4(R2) // 410400b9
MOVD R1, 1(R2) // 411000f8
MOVD R1, 8(R2) // 410400f9
MOVD ZR, (R1)
MOVW ZR, (R1)
FMOVS F1, 1(R2) // 411000bc
FMOVS F1, 4(R2) // 410400bd
FMOVS F20, (R0) // 140000bd
FMOVD F1, 1(R2) // 411000fc
FMOVD F1, 8(R2) // 410400fd
FMOVD F20, (R2) // 540000fd
FMOVQ F0, 32(R5)// a008803d
FMOVQ F10, 65520(R10) // 4afdbf3d
FMOVQ F11, 64(RSP) // eb13803d
FMOVQ F11, 8(R20) // 8b82803c
FMOVQ F11, 4(R20) // 8b42803c
// large aligned offset, use two instructions
MOVB 0x1001(R1), R2 // MOVB 4097(R1), R2 // 3b04409162078039
MOVH 0x2002(R1), R2 // MOVH 8194(R1), R2 // 3b08409162078079
MOVW 0x4004(R1), R2 // MOVW 16388(R1), R2 // 3b104091620780b9
MOVD 0x8008(R1), R2 // MOVD 32776(R1), R2 // 3b204091620740f9
FMOVS 0x4004(R1), F2 // FMOVS 16388(R1), F2 // 3b104091620740bd
FMOVD 0x8008(R1), F2 // FMOVD 32776(R1), F2 // 3b204091620740fd
MOVB 1(R1), R2 // 22048039
MOVH 1(R1), R2 // 22108078
MOVH 2(R1), R2 // 22048079
MOVW 1(R1), R2 // 221080b8
MOVW 4(R1), R2 // 220480b9
MOVD 1(R1), R2 // 221040f8
MOVD 8(R1), R2 // 220440f9
FMOVS (R0), F20 // 140040bd
FMOVS 1(R1), F2 // 221040bc
FMOVS 4(R1), F2 // 220440bd
FMOVD 1(R1), F2 // 221040fc
FMOVD 8(R1), F2 // 220440fd
FMOVQ 32(R5), F2 // a208c03d
FMOVQ 65520(R10), F10 // 4afdff3d
FMOVQ 64(RSP), F11 // eb13c03d
// large aligned offset, use two instructions(add+ldr/store).
MOVB R1, 0x1001(R2) // MOVB R1, 4097(R2) // 5b04409161070039
MOVH R1, 0x2002(R2) // MOVH R1, 8194(R2) // 5b08409161070079
MOVW R1, 0x4004(R2) // MOVW R1, 16388(R2) // 5b104091610700b9
@ -499,15 +495,16 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
FMOVS F1, 0x4004(R2) // FMOVS F1, 16388(R2) // 5b104091610700bd
FMOVD F1, 0x8008(R2) // FMOVD F1, 32776(R2) // 5b204091610700fd
// very large or unaligned offset uses constant pool
// the encoding cannot be checked as the address of the constant pool is unknown.
// here we only test that they can be assembled.
MOVB 0x44332211(R1), R2 // MOVB 1144201745(R1), R2
MOVH 0x44332211(R1), R2 // MOVH 1144201745(R1), R2
MOVW 0x44332211(R1), R2 // MOVW 1144201745(R1), R2
MOVD 0x44332211(R1), R2 // MOVD 1144201745(R1), R2
FMOVS 0x44332211(R1), F2 // FMOVS 1144201745(R1), F2
FMOVD 0x44332211(R1), F2 // FMOVD 1144201745(R1), F2
MOVB 0x1001(R1), R2 // MOVB 4097(R1), R2 // 3b04409162078039
MOVH 0x2002(R1), R2 // MOVH 8194(R1), R2 // 3b08409162078079
MOVW 0x4004(R1), R2 // MOVW 16388(R1), R2 // 3b104091620780b9
MOVD 0x8008(R1), R2 // MOVD 32776(R1), R2 // 3b204091620740f9
FMOVS 0x4004(R1), F2 // FMOVS 16388(R1), F2 // 3b104091620740bd
FMOVD 0x8008(R1), F2 // FMOVD 32776(R1), F2 // 3b204091620740fd
// very large or unaligned offset uses constant pool.
// the encoding cannot be checked as the address of the constant pool is unknown.
// here we only test that they can be assembled.
MOVB R1, 0x44332211(R2) // MOVB R1, 1144201745(R2)
MOVH R1, 0x44332211(R2) // MOVH R1, 1144201745(R2)
MOVW R1, 0x44332211(R2) // MOVW R1, 1144201745(R2)
@ -515,14 +512,59 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
FMOVS F1, 0x44332211(R2) // FMOVS F1, 1144201745(R2)
FMOVD F1, 0x44332211(R2) // FMOVD F1, 1144201745(R2)
//
// MOVK
//
// LMOVK imm ',' reg
// {
// outcode($1, &$2, NREG, &$4);
// }
MOVK $1, R1
MOVB 0x44332211(R1), R2 // MOVB 1144201745(R1), R2
MOVH 0x44332211(R1), R2 // MOVH 1144201745(R1), R2
MOVW 0x44332211(R1), R2 // MOVW 1144201745(R1), R2
MOVD 0x44332211(R1), R2 // MOVD 1144201745(R1), R2
FMOVS 0x44332211(R1), F2 // FMOVS 1144201745(R1), F2
FMOVD 0x44332211(R1), F2 // FMOVD 1144201745(R1), F2
// shifted or extended register offset.
MOVD (R2)(R6.SXTW), R4 // 44c866f8
MOVD (R3)(R6), R5 // MOVD (R3)(R6*1), R5 // 656866f8
MOVD (R2)(R6), R4 // MOVD (R2)(R6*1), R4 // 446866f8
MOVWU (R19)(R20<<2), R20 // 747a74b8
MOVD (R2)(R6<<3), R4 // 447866f8
MOVD (R3)(R7.SXTX<<3), R8 // 68f867f8
MOVWU (R5)(R4.UXTW), R10 // aa4864b8
MOVBU (R3)(R9.UXTW), R8 // 68486938
MOVBU (R5)(R8), R10 // MOVBU (R5)(R8*1), R10 // aa686838
MOVHU (R2)(R7.SXTW<<1), R11 // 4bd86778
MOVHU (R1)(R2<<1), R5 // 25786278
MOVB (R9)(R3.UXTW), R6 // 2649a338
MOVB (R10)(R6), R15 // MOVB (R10)(R6*1), R15 // 4f69a638
MOVB (R29)(R30<<0), R14 // ae7bbe38
MOVB (R29)(R30), R14 // MOVB (R29)(R30*1), R14 // ae6bbe38
MOVH (R5)(R7.SXTX<<1), R19 // b3f8a778
MOVH (R8)(R4<<1), R10 // 0a79a478
MOVW (R9)(R8.SXTW<<2), R19 // 33d9a8b8
MOVW (R1)(R4.SXTX), R11 // 2be8a4b8
MOVW (R1)(R4.SXTX), ZR // 3fe8a4b8
MOVW (R2)(R5), R12 // MOVW (R2)(R5*1), R12 // 4c68a5b8
FMOVS (R2)(R6), F4 // FMOVS (R2)(R6*1), F4 // 446866bc
FMOVS (R2)(R6<<2), F4 // 447866bc
FMOVD (R2)(R6), F4 // FMOVD (R2)(R6*1), F4 // 446866fc
FMOVD (R2)(R6<<3), F4 // 447866fc
MOVD R5, (R2)(R6<<3) // 457826f8
MOVD R9, (R6)(R7.SXTX<<3) // c9f827f8
MOVD ZR, (R6)(R7.SXTX<<3) // dff827f8
MOVW R8, (R2)(R3.UXTW<<2) // 485823b8
MOVW R7, (R3)(R4.SXTW) // 67c824b8
MOVB R4, (R2)(R6.SXTX) // 44e82638
MOVB R8, (R3)(R9.UXTW) // 68482938
MOVB R10, (R5)(R8) // MOVB R10, (R5)(R8*1) // aa682838
MOVH R11, (R2)(R7.SXTW<<1) // 4bd82778
MOVH R5, (R1)(R2<<1) // 25782278
MOVH R7, (R2)(R5.SXTX<<1) // 47f82578
MOVH R8, (R3)(R6.UXTW) // 68482678
MOVB R4, (R2)(R6.SXTX) // 44e82638
FMOVS F4, (R2)(R6) // FMOVS F4, (R2)(R6*1) // 446826bc
FMOVS F4, (R2)(R6<<2) // 447826bc
FMOVD F4, (R2)(R6) // FMOVD F4, (R2)(R6*1) // 446826fc
FMOVD F4, (R2)(R6<<3) // 447826fc
// vmov
VMOV V8.S[1], R1 // 013d0c0e
VMOV V0.D[0], R11 // 0b3c084e
VMOV V0.D[1], R11 // 0b3c184e
@ -537,205 +579,28 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
VMOV V9.H[0], V12.H[1] // 2c05066e
VMOV V8.B[0], V12.B[1] // 0c05036e
VMOV V8.B[7], V4.B[8] // 043d116e
VREV32 V5.B16, V5.B16 // a508206e
VREV64 V2.S2, V3.S2 // 4308a00e
VREV64 V2.S4, V3.S4 // 4308a04e
VDUP V19.S[0], V17.S4 // 7106044e
//
// B/BL
//
// LTYPE4 comma rel
// {
// outcode($1, &nullgen, NREG, &$3);
// }
BL 1(PC) // CALL 1(PC)
// LTYPE4 comma nireg
// {
// outcode($1, &nullgen, NREG, &$3);
// }
BL (R2) // CALL (R2)
BL foo(SB) // CALL foo(SB)
BL bar<>(SB) // CALL bar<>(SB)
//
// BEQ
//
// LTYPE5 comma rel
// {
// outcode($1, &nullgen, NREG, &$3);
// }
BEQ 1(PC)
//
// SVC
//
// LTYPE6
// {
// outcode($1, &nullgen, NREG, &nullgen);
// }
SVC
//
// CMP
//
// LTYPE7 imsr ',' spreg comma
// {
// outcode($1, &$2, $4, &nullgen);
// }
CMP $3, R2
CMP R1, R2
CMP R1->11, R2
CMP R1>>22, R2
CMP R1<<33, R2
CMP R22.SXTX, RSP // ffe336eb
CMP $0x22220000, RSP // CMP $572653568, RSP // 5b44a4d2ff633beb
CMPW $0x22220000, RSP // CMPW $572653568, RSP // 5b44a452ff633b6b
// TST
TST $15, R2 // 5f0c40f2
TST R1, R2 // 5f0001ea
TST R1->11, R2 // 5f2c81ea
TST R1>>22, R2 // 5f5841ea
TST R1<<33, R2 // 5f8401ea
TST $0x22220000, R3 // TST $572653568, R3 // 5b44a4d27f001bea
//
// CBZ
//
// LTYPE8 reg ',' rel
// {
// outcode($1, &$2, NREG, &$4);
// }
again:
CBZ R1, again // CBZ R1
//
// CSET
//
// LTYPER cond ',' reg
// {
// outcode($1, &$2, NREG, &$4);
// }
// conditional operations
CSET GT, R1 // e1d79f9a
CSETW HI, R2 // e2979f1a
//
// CSEL/CSINC/CSNEG/CSINV
//
// LTYPES cond ',' reg ',' reg ',' reg
// {
// outgcode($1, &$2, $6.reg, &$4, &$8);
// }
CSEL LT, R1, R2, ZR // 3fb0829a
CSELW LT, R2, R3, R4 // 44b0831a
CSINC GT, R1, ZR, R3 // 23c49f9a
CSNEG MI, R1, R2, R3 // 234482da
CSINV CS, R1, R2, R3 // CSINV HS, R1, R2, R3 // 232082da
CSINVW MI, R2, ZR, R2 // 42409f5a
// LTYPES cond ',' reg ',' reg
// {
// outcode($1, &$2, $4.reg, &$6);
// }
CINC EQ, R4, R9 // 8914849a
CINCW PL, R2, ZR // 5f44821a
CINV PL, R11, R22 // 76418bda
CINVW LS, R7, R13 // ed80875a
CNEG LS, R13, R7 // a7858dda
CNEGW EQ, R8, R13 // 0d15885a
//
// CCMN
//
// LTYPEU cond ',' imsr ',' reg ',' imm comma
// {
// outgcode($1, &$2, $6.reg, &$4, &$8);
// }
CCMN MI, ZR, R1, $4 // e44341ba
//
// FADDD
//
// LTYPEK frcon ',' freg
// {
// outcode($1, &$2, NREG, &$4);
// }
// FADDD $0.5, F1 // FADDD $(0.5), F1
FADDD F1, F2
// LTYPEK frcon ',' freg ',' freg
// {
// outcode($1, &$2, $4.reg, &$6);
// }
// FADDD $0.7, F1, F2 // FADDD $(0.69999999999999996), F1, F2
FADDD F1, F2, F3
//
// FCMP
//
// LTYPEL frcon ',' freg comma
// {
// outcode($1, &$2, $4.reg, &nullgen);
// }
// FCMP $0.2, F1
// FCMP F1, F2
//
// FCCMP
//
// LTYPEF cond ',' freg ',' freg ',' imm comma
// {
// outgcode($1, &$2, $6.reg, &$4, &$8);
// }
FCCMPS LT, F1, F2, $1 // 41b4211e
//
// FMULA
//
// LTYPE9 freg ',' freg ',' freg ',' freg comma
// {
// outgcode($1, &$2, $4.reg, &$6, &$8);
// }
// FMULA F1, F2, F3, F4
//
// FCSEL
//
// LFCSEL cond ',' freg ',' freg ',' freg
// {
// outgcode($1, &$2, $6.reg, &$4, &$8);
// }
//
// MADD Rn,Rm,Ra,Rd
//
// LTYPEM reg ',' reg ',' sreg ',' reg
// {
// outgcode($1, &$2, $6, &$4, &$8);
// }
// MADD R1, R2, R3, R4
FMADDS F1, F3, F2, F4 // 440c011f
FMADDD F4, F5, F4, F4 // 8414441f
FMSUBS F13, F21, F13, F19 // b3d50d1f
FMSUBD F11, F7, F15, F31 // ff9d4b1f
FNMADDS F1, F3, F2, F4 // 440c211f
FNMADDD F1, F3, F2, F4 // 440c611f
FNMSUBS F1, F3, F2, F4 // 448c211f
FNMSUBD F1, F3, F2, F4 // 448c611f
// DMB, HINT
//
// LDMB imm
// {
// outcode($1, &$2, NREG, &nullgen);
// }
DMB $1
//
// STXR
//
// LSTXR reg ',' addr ',' reg
// {
// outcode($1, &$2, &$4, &$6);
// }
// atomic ops
LDARB (R25), R2 // 22ffdf08
LDARH (R5), R7 // a7fcdf48
LDAXPW (R10), (R20, R16) // 54c17f88
@ -912,21 +777,36 @@ again:
LDORLH R5, (RSP), R7 // e7336578
LDORLB R5, (R6), R7 // c7306538
LDORLB R5, (RSP), R7 // e7336538
CASD R1, (R2), ZR // 5f7ca1c8
CASW R1, (RSP), ZR // ff7fa188
CASB ZR, (R5), R3 // a37cbf08
CASH R3, (RSP), ZR // ff7fa348
CASW R5, (R7), R6 // e67ca588
CASLD ZR, (RSP), R8 // e8ffbfc8
CASLW R9, (R10), ZR // 5ffda988
CASAD R7, (R11), R15 // 6f7de7c8
CASAW R10, (RSP), R19 // f37fea88
CASALD R5, (R6), R7 // c7fce5c8
CASALD R5, (RSP), R7 // e7ffe5c8
CASALW R5, (R6), R7 // c7fce588
CASALW R5, (RSP), R7 // e7ffe588
CASALH ZR, (R5), R8 // a8fcff48
CASALB R8, (R9), ZR // 3ffde808
CASPD (R30, ZR), (RSP), (R8, R9) // e87f3e48
CASPW (R6, R7), (R8), (R4, R5) // 047d2608
CASPD (R2, R3), (R2), (R8, R9) // 487c2248
// RET
//
// LTYPEA comma
// {
// outcode($1, &nullgen, NREG, &nullgen);
// }
BEQ 2(PC)
RET
RET foo(SB)
// More B/BL cases, and canonical names JMP, CALL.
BEQ 2(PC)
B foo(SB) // JMP foo(SB)
// B/BL/B.cond cases, and canonical names JMP, CALL.
BL 1(PC) // CALL 1(PC)
BL (R2) // CALL (R2)
BL foo(SB) // CALL foo(SB)
BL bar<>(SB) // CALL bar<>(SB)
B foo(SB) // JMP foo(SB)
BEQ 1(PC)
BEQ 2(PC)
TBZ $1, R1, 2(PC)
TBNZ $2, R2, 2(PC)
@ -1101,8 +981,6 @@ again:
FSTPS (F3, F4), 1024(RSP) // fb0310916313002d
FSTPS (F3, F4), x(SB)
FSTPS (F3, F4), x+8(SB)
NOOP // 1f2003d5
HINT $0 // 1f2003d5
// System Register
MSR $1, SPSel // bf4100d5
@ -1664,11 +1542,4 @@ again:
MSR R13, ZCR_EL1 // 0d1218d5
MRS ZCR_EL1, R23 // 171238d5
MSR R17, ZCR_EL1 // 111218d5
// END
//
// LTYPEE comma
// {
// outcode($1, &nullgen, NREG, &nullgen);
// }
END

View file

@ -87,13 +87,13 @@ TEXT errors(SB),$0
VLD1.P 32(R1), [V8.S4, V9.S4, V10.S4] // ERROR "invalid post-increment offset"
VLD1.P 48(R1), [V7.S4, V8.S4, V9.S4, V10.S4] // ERROR "invalid post-increment offset"
VPMULL V1.D1, V2.H4, V3.Q1 // ERROR "invalid arrangement"
VPMULL V1.H4, V2.H4, V3.Q1 // ERROR "invalid arrangement"
VPMULL V1.D2, V2.D2, V3.Q1 // ERROR "invalid arrangement"
VPMULL V1.B16, V2.B16, V3.H8 // ERROR "invalid arrangement"
VPMULL V1.H4, V2.H4, V3.Q1 // ERROR "operand mismatch"
VPMULL V1.D2, V2.D2, V3.Q1 // ERROR "operand mismatch"
VPMULL V1.B16, V2.B16, V3.H8 // ERROR "operand mismatch"
VPMULL2 V1.D2, V2.H4, V3.Q1 // ERROR "invalid arrangement"
VPMULL2 V1.H4, V2.H4, V3.Q1 // ERROR "invalid arrangement"
VPMULL2 V1.D1, V2.D1, V3.Q1 // ERROR "invalid arrangement"
VPMULL2 V1.B8, V2.B8, V3.H8 // ERROR "invalid arrangement"
VPMULL2 V1.H4, V2.H4, V3.Q1 // ERROR "operand mismatch"
VPMULL2 V1.D1, V2.D1, V3.Q1 // ERROR "operand mismatch"
VPMULL2 V1.B8, V2.B8, V3.H8 // ERROR "operand mismatch"
VEXT $8, V1.B16, V2.B8, V2.B16 // ERROR "invalid arrangement"
VEXT $8, V1.H8, V2.H8, V2.H8 // ERROR "invalid arrangement"
VRBIT V1.B16, V2.B8 // ERROR "invalid arrangement"
@ -353,4 +353,12 @@ TEXT errors(SB),$0
VUSHLL2 $32, V30.S4, V2.D2 // ERROR "shift amount out of range"
VBIF V0.B8, V1.B8, V2.B16 // ERROR "operand mismatch"
VBIF V0.D2, V1.D2, V2.D2 // ERROR "invalid arrangement"
VUADDW V9.B8, V12.H8, V14.B8 // ERROR "invalid arrangement"
VUADDW2 V9.B8, V12.S4, V14.S4 // ERROR "operand mismatch"
VSLI $64, V7.D2, V8.D2 // ERROR "shift out of range"
VUSRA $0, V7.D2, V8.D2 // ERROR "shift out of range"
CASPD (R3, R4), (R2), (R8, R9) // ERROR "source register pair must start from even register"
CASPD (R2, R3), (R2), (R9, R10) // ERROR "destination register pair must start from even register"
CASPD (R2, R4), (R2), (R8, R9) // ERROR "source register pair must be contiguous"
CASPD (R2, R3), (R2), (R8, R10) // ERROR "destination register pair must be contiguous"
RET

View file

@ -297,6 +297,13 @@ start:
MOVW X5, (X6) // 23205300
MOVW X5, 4(X6) // 23225300
MOVB X5, X6 // 1393820313538343
MOVH X5, X6 // 1393020313530343
MOVW X5, X6 // 1b830200
MOVBU X5, X6 // 13f3f20f
MOVHU X5, X6 // 1393020313530303
MOVWU X5, X6 // 1393020213530302
MOVF 4(X5), F0 // 07a04200
MOVF F0, 4(X5) // 27a20200
MOVF F0, F1 // d3000020
@ -318,7 +325,7 @@ start:
// These jumps can get printed as jumps to 2 because they go to the
// second instruction in the function (the first instruction is an
// invisible stack pointer adjustment).
JMP start // JMP 2 // 6ff01fc5
JMP start // JMP 2 // 6ff09fc2
JMP (X5) // 67800200
JMP 4(X5) // 67804200
@ -331,16 +338,16 @@ start:
JMP asmtest(SB) // 970f0000
// Branch pseudo-instructions
BEQZ X5, start // BEQZ X5, 2 // e38a02c2
BGEZ X5, start // BGEZ X5, 2 // e3d802c2
BGT X5, X6, start // BGT X5, X6, 2 // e3c662c2
BGTU X5, X6, start // BGTU X5, X6, 2 // e3e462c2
BGTZ X5, start // BGTZ X5, 2 // e34250c2
BLE X5, X6, start // BLE X5, X6, 2 // e3d062c2
BLEU X5, X6, start // BLEU X5, X6, 2 // e3fe62c0
BLEZ X5, start // BLEZ X5, 2 // e35c50c0
BLTZ X5, start // BLTZ X5, 2 // e3ca02c0
BNEZ X5, start // BNEZ X5, 2 // e39802c0
BEQZ X5, start // BEQZ X5, 2 // e38602c0
BGEZ X5, start // BGEZ X5, 2 // e3d402c0
BGT X5, X6, start // BGT X5, X6, 2 // e3c262c0
BGTU X5, X6, start // BGTU X5, X6, 2 // e3e062c0
BGTZ X5, start // BGTZ X5, 2 // e34e50be
BLE X5, X6, start // BLE X5, X6, 2 // e3dc62be
BLEU X5, X6, start // BLEU X5, X6, 2 // e3fa62be
BLEZ X5, start // BLEZ X5, 2 // e35850be
BLTZ X5, start // BLTZ X5, 2 // e3c602be
BNEZ X5, start // BNEZ X5, 2 // e39402be
// Set pseudo-instructions
SEQZ X15, X15 // 93b71700

View file

@ -22,21 +22,6 @@ func usage() {
var wflag = flag.Bool("w", false, "write build ID")
// taken from cmd/go/internal/work/buildid.go
func hashToString(h [32]byte) string {
const b64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"
const chunks = 5
var dst [chunks * 4]byte
for i := 0; i < chunks; i++ {
v := uint32(h[3*i])<<16 | uint32(h[3*i+1])<<8 | uint32(h[3*i+2])
dst[4*i+0] = b64[(v>>18)&0x3F]
dst[4*i+1] = b64[(v>>12)&0x3F]
dst[4*i+2] = b64[(v>>6)&0x3F]
dst[4*i+3] = b64[v&0x3F]
}
return string(dst[:])
}
func main() {
log.SetPrefix("buildid: ")
log.SetFlags(0)
@ -63,12 +48,12 @@ func main() {
log.Fatal(err)
}
matches, hash, err := buildid.FindAndHash(f, id, 0)
f.Close()
if err != nil {
log.Fatal(err)
}
f.Close()
newID := id[:strings.LastIndex(id, "/")] + "/" + hashToString(hash)
newID := id[:strings.LastIndex(id, "/")] + "/" + buildid.HashToString(hash)
if len(newID) != len(id) {
log.Fatalf("%s: build ID length mismatch %q vs %q", file, id, newID)
}

View file

@ -13,7 +13,6 @@ import (
"go/scanner"
"go/token"
"os"
"path/filepath"
"strings"
)
@ -44,14 +43,7 @@ func sourceLine(n ast.Node) int {
// attached to the import "C" comment, a list of references to C.xxx,
// a list of exported functions, and the actual AST, to be rewritten and
// printed.
func (f *File) ParseGo(name string, src []byte) {
// Create absolute path for file, so that it will be used in error
// messages and recorded in debug line number information.
// This matches the rest of the toolchain. See golang.org/issue/5122.
if aname, err := filepath.Abs(name); err == nil {
name = aname
}
func (f *File) ParseGo(abspath string, src []byte) {
// Two different parses: once with comments, once without.
// The printer is not good enough at printing comments in the
// right place when we start editing the AST behind its back,
@ -60,8 +52,8 @@ func (f *File) ParseGo(name string, src []byte) {
// and reprinting.
// In cgo mode, we ignore ast2 and just apply edits directly
// the text behind ast1. In godefs mode we modify and print ast2.
ast1 := parse(name, src, parser.ParseComments)
ast2 := parse(name, src, 0)
ast1 := parse(abspath, src, parser.ParseComments)
ast2 := parse(abspath, src, 0)
f.Package = ast1.Name.Name
f.Name = make(map[string]*Name)
@ -88,7 +80,7 @@ func (f *File) ParseGo(name string, src []byte) {
cg = d.Doc
}
if cg != nil {
f.Preamble += fmt.Sprintf("#line %d %q\n", sourceLine(cg), name)
f.Preamble += fmt.Sprintf("#line %d %q\n", sourceLine(cg), abspath)
f.Preamble += commentText(cg) + "\n"
f.Preamble += "#line 1 \"cgo-generated-wrapper\"\n"
}

View file

@ -721,7 +721,7 @@ linkage to the desired libraries. The main function is provided by
_cgo_main.c:
int main() { return 0; }
void crosscall2(void(*fn)(void*, int, uintptr_t), void *a, int c, uintptr_t ctxt) { }
void crosscall2(void(*fn)(void*), void *a, int c, uintptr_t ctxt) { }
uintptr_t _cgo_wait_runtime_init_done(void) { return 0; }
void _cgo_release_context(uintptr_t ctxt) { }
char* _cgo_topofstack(void) { return (char*)0; }

View file

@ -16,7 +16,7 @@ import (
)
// godefs returns the output for -godefs mode.
func (p *Package) godefs(f *File, srcfile string) string {
func (p *Package) godefs(f *File) string {
var buf bytes.Buffer
fmt.Fprintf(&buf, "// Code generated by cmd/cgo -godefs; DO NOT EDIT.\n")

View file

@ -243,6 +243,8 @@ var gccgopkgpath = flag.String("gccgopkgpath", "", "-fgo-pkgpath option used wit
var gccgoMangler func(string) string
var importRuntimeCgo = flag.Bool("import_runtime_cgo", true, "import runtime/cgo in generated code")
var importSyscall = flag.Bool("import_syscall", true, "import syscall in generated code")
var trimpath = flag.String("trimpath", "", "applies supplied rewrites or trims prefixes to recorded source file paths")
var goarch, goos string
func main() {
@ -322,6 +324,13 @@ func main() {
input = filepath.Join(*srcDir, input)
}
// Create absolute path for file, so that it will be used in error
// messages and recorded in debug line number information.
// This matches the rest of the toolchain. See golang.org/issue/5122.
if aname, err := filepath.Abs(input); err == nil {
input = aname
}
b, err := ioutil.ReadFile(input)
if err != nil {
fatalf("%s", err)
@ -330,6 +339,10 @@ func main() {
fatalf("%s", err)
}
// Apply trimpath to the file path. The path won't be read from after this point.
input, _ = objabi.ApplyRewrites(input, *trimpath)
goFiles[i] = input
f := new(File)
f.Edit = edit.NewBuffer(b)
f.ParseGo(input, b)
@ -367,7 +380,7 @@ func main() {
p.PackagePath = f.Package
p.Record(f)
if *godefs {
os.Stdout.WriteString(p.godefs(f, input))
os.Stdout.WriteString(p.godefs(f))
} else {
p.writeOutput(f, input)
}

View file

@ -59,14 +59,14 @@ func (p *Package) writeDefs() {
// Write C main file for using gcc to resolve imports.
fmt.Fprintf(fm, "int main() { return 0; }\n")
if *importRuntimeCgo {
fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*, int, __SIZE_TYPE__), void *a, int c, __SIZE_TYPE__ ctxt) { }\n")
fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*), void *a, int c, __SIZE_TYPE__ ctxt) { }\n")
fmt.Fprintf(fm, "__SIZE_TYPE__ _cgo_wait_runtime_init_done(void) { return 0; }\n")
fmt.Fprintf(fm, "void _cgo_release_context(__SIZE_TYPE__ ctxt) { }\n")
fmt.Fprintf(fm, "char* _cgo_topofstack(void) { return (char*)0; }\n")
} else {
// If we're not importing runtime/cgo, we *are* runtime/cgo,
// which provides these functions. We just need a prototype.
fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*, int, __SIZE_TYPE__), void *a, int c, __SIZE_TYPE__ ctxt);\n")
fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*), void *a, int c, __SIZE_TYPE__ ctxt);\n")
fmt.Fprintf(fm, "__SIZE_TYPE__ _cgo_wait_runtime_init_done(void);\n")
fmt.Fprintf(fm, "void _cgo_release_context(__SIZE_TYPE__);\n")
}
@ -852,7 +852,7 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
fmt.Fprintf(fgcc, "#pragma GCC diagnostic ignored \"-Wpragmas\"\n")
fmt.Fprintf(fgcc, "#pragma GCC diagnostic ignored \"-Waddress-of-packed-member\"\n")
fmt.Fprintf(fgcc, "extern void crosscall2(void (*fn)(void *, int, __SIZE_TYPE__), void *, int, __SIZE_TYPE__);\n")
fmt.Fprintf(fgcc, "extern void crosscall2(void (*fn)(void *), void *, int, __SIZE_TYPE__);\n")
fmt.Fprintf(fgcc, "extern __SIZE_TYPE__ _cgo_wait_runtime_init_done(void);\n")
fmt.Fprintf(fgcc, "extern void _cgo_release_context(__SIZE_TYPE__);\n\n")
fmt.Fprintf(fgcc, "extern char* _cgo_topofstack(void);")
@ -862,59 +862,48 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
for _, exp := range p.ExpFunc {
fn := exp.Func
// Construct a gcc struct matching the gc argument and
// result frame. The gcc struct will be compiled with
// __attribute__((packed)) so all padding must be accounted
// for explicitly.
// Construct a struct that will be used to communicate
// arguments from C to Go. The C and Go definitions
// just have to agree. The gcc struct will be compiled
// with __attribute__((packed)) so all padding must be
// accounted for explicitly.
ctype := "struct {\n"
gotype := new(bytes.Buffer)
fmt.Fprintf(gotype, "struct {\n")
off := int64(0)
npad := 0
if fn.Recv != nil {
t := p.cgoType(fn.Recv.List[0].Type)
ctype += fmt.Sprintf("\t\t%s recv;\n", t.C)
argField := func(typ ast.Expr, namePat string, args ...interface{}) {
name := fmt.Sprintf(namePat, args...)
t := p.cgoType(typ)
if off%t.Align != 0 {
pad := t.Align - off%t.Align
ctype += fmt.Sprintf("\t\tchar __pad%d[%d];\n", npad, pad)
off += pad
npad++
}
ctype += fmt.Sprintf("\t\t%s %s;\n", t.C, name)
fmt.Fprintf(gotype, "\t\t%s ", name)
noSourceConf.Fprint(gotype, fset, typ)
fmt.Fprintf(gotype, "\n")
off += t.Size
}
if fn.Recv != nil {
argField(fn.Recv.List[0].Type, "recv")
}
fntype := fn.Type
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
t := p.cgoType(atype)
if off%t.Align != 0 {
pad := t.Align - off%t.Align
ctype += fmt.Sprintf("\t\tchar __pad%d[%d];\n", npad, pad)
off += pad
npad++
}
ctype += fmt.Sprintf("\t\t%s p%d;\n", t.C, i)
off += t.Size
argField(atype, "p%d", i)
})
if off%p.PtrSize != 0 {
pad := p.PtrSize - off%p.PtrSize
ctype += fmt.Sprintf("\t\tchar __pad%d[%d];\n", npad, pad)
off += pad
npad++
}
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
t := p.cgoType(atype)
if off%t.Align != 0 {
pad := t.Align - off%t.Align
ctype += fmt.Sprintf("\t\tchar __pad%d[%d];\n", npad, pad)
off += pad
npad++
}
ctype += fmt.Sprintf("\t\t%s r%d;\n", t.C, i)
off += t.Size
argField(atype, "r%d", i)
})
if off%p.PtrSize != 0 {
pad := p.PtrSize - off%p.PtrSize
ctype += fmt.Sprintf("\t\tchar __pad%d[%d];\n", npad, pad)
off += pad
npad++
}
if ctype == "struct {\n" {
ctype += "\t\tchar unused;\n" // avoid empty struct
}
ctype += "\t}"
fmt.Fprintf(gotype, "\t}")
// Get the return type of the wrapper function
// compiled by gcc.
@ -939,7 +928,11 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
}
// Build the wrapper function compiled by gcc.
s := fmt.Sprintf("%s %s(", gccResult, exp.ExpName)
gccExport := ""
if goos == "windows" {
gccExport = "__declspec(dllexport)"
}
s := fmt.Sprintf("%s %s %s(", gccExport, gccResult, exp.ExpName)
if fn.Recv != nil {
s += p.cgoType(fn.Recv.List[0].Type).C.String()
s += " recv"
@ -961,12 +954,15 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
}
fmt.Fprintf(fgcch, "extern %s;\n", s)
fmt.Fprintf(fgcc, "extern void _cgoexp%s_%s(void *, int, __SIZE_TYPE__);\n", cPrefix, exp.ExpName)
fmt.Fprintf(fgcc, "extern void _cgoexp%s_%s(void *);\n", cPrefix, exp.ExpName)
fmt.Fprintf(fgcc, "\nCGO_NO_SANITIZE_THREAD")
fmt.Fprintf(fgcc, "\n%s\n", s)
fmt.Fprintf(fgcc, "{\n")
fmt.Fprintf(fgcc, "\t__SIZE_TYPE__ _cgo_ctxt = _cgo_wait_runtime_init_done();\n")
fmt.Fprintf(fgcc, "\t%s %v _cgo_a;\n", ctype, p.packedAttribute())
// The results part of the argument structure must be
// initialized to 0 so the write barriers generated by
// the assignments to these fields in Go are safe.
fmt.Fprintf(fgcc, "\t%s %v _cgo_a = {0};\n", ctype, p.packedAttribute())
if gccResult != "void" && (len(fntype.Results.List) > 1 || len(fntype.Results.List[0].Names) > 1) {
fmt.Fprintf(fgcc, "\t%s r;\n", gccResult)
}
@ -995,82 +991,28 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
fmt.Fprintf(fgcc, "}\n")
// Build the wrapper function compiled by cmd/compile.
goname := "_cgoexpwrap" + cPrefix + "_"
if fn.Recv != nil {
goname += fn.Recv.List[0].Names[0].Name + "_"
}
goname += exp.Func.Name.Name
// This unpacks the argument struct above and calls the Go function.
fmt.Fprintf(fgo2, "//go:cgo_export_dynamic %s\n", exp.ExpName)
fmt.Fprintf(fgo2, "//go:linkname _cgoexp%s_%s _cgoexp%s_%s\n", cPrefix, exp.ExpName, cPrefix, exp.ExpName)
fmt.Fprintf(fgo2, "//go:cgo_export_static _cgoexp%s_%s\n", cPrefix, exp.ExpName)
fmt.Fprintf(fgo2, "//go:nosplit\n") // no split stack, so no use of m or g
fmt.Fprintf(fgo2, "//go:norace\n") // must not have race detector calls inserted
fmt.Fprintf(fgo2, "func _cgoexp%s_%s(a unsafe.Pointer, n int32, ctxt uintptr) {\n", cPrefix, exp.ExpName)
fmt.Fprintf(fgo2, "\tfn := %s\n", goname)
// The indirect here is converting from a Go function pointer to a C function pointer.
fmt.Fprintf(fgo2, "\t_cgo_runtime_cgocallback(**(**unsafe.Pointer)(unsafe.Pointer(&fn)), a, uintptr(n), ctxt);\n")
fmt.Fprintf(fgo2, "}\n")
fmt.Fprintf(fgo2, "func _cgoexp%s_%s(a *%s) {\n", cPrefix, exp.ExpName, gotype)
fmt.Fprintf(fm, "int _cgoexp%s_%s;\n", cPrefix, exp.ExpName)
// This code uses printer.Fprint, not conf.Fprint,
// because we don't want //line comments in the middle
// of the function types.
fmt.Fprintf(fgo2, "\n")
fmt.Fprintf(fgo2, "func %s(", goname)
comma := false
if fn.Recv != nil {
fmt.Fprintf(fgo2, "recv ")
printer.Fprint(fgo2, fset, fn.Recv.List[0].Type)
comma = true
}
forFieldList(fntype.Params,
func(i int, aname string, atype ast.Expr) {
if comma {
fmt.Fprintf(fgo2, ", ")
}
fmt.Fprintf(fgo2, "p%d ", i)
printer.Fprint(fgo2, fset, atype)
comma = true
})
fmt.Fprintf(fgo2, ")")
if gccResult != "void" {
fmt.Fprint(fgo2, " (")
// Write results back to frame.
fmt.Fprintf(fgo2, "\t")
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
if i > 0 {
fmt.Fprint(fgo2, ", ")
fmt.Fprintf(fgo2, ", ")
}
fmt.Fprintf(fgo2, "r%d ", i)
printer.Fprint(fgo2, fset, atype)
fmt.Fprintf(fgo2, "a.r%d", i)
})
fmt.Fprint(fgo2, ")")
}
fmt.Fprint(fgo2, " {\n")
if gccResult == "void" {
fmt.Fprint(fgo2, "\t")
} else {
// Verify that any results don't contain any
// Go pointers.
addedDefer := false
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
if !p.hasPointer(nil, atype, false) {
return
}
if !addedDefer {
fmt.Fprint(fgo2, "\tdefer func() {\n")
addedDefer = true
}
fmt.Fprintf(fgo2, "\t\t_cgoCheckResult(r%d)\n", i)
})
if addedDefer {
fmt.Fprint(fgo2, "\t}()\n")
}
fmt.Fprint(fgo2, "\treturn ")
fmt.Fprintf(fgo2, " = ")
}
if fn.Recv != nil {
fmt.Fprintf(fgo2, "recv.")
fmt.Fprintf(fgo2, "a.recv.")
}
fmt.Fprintf(fgo2, "%s(", exp.Func.Name)
forFieldList(fntype.Params,
@ -1078,9 +1020,20 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
if i > 0 {
fmt.Fprint(fgo2, ", ")
}
fmt.Fprintf(fgo2, "p%d", i)
fmt.Fprintf(fgo2, "a.p%d", i)
})
fmt.Fprint(fgo2, ")\n")
if gccResult != "void" {
// Verify that any results don't contain any
// Go pointers.
forFieldList(fntype.Results,
func(i int, aname string, atype ast.Expr) {
if !p.hasPointer(nil, atype, false) {
return
}
fmt.Fprintf(fgo2, "\t_cgoCheckResult(a.r%d)\n", i)
})
}
fmt.Fprint(fgo2, "}\n")
}
@ -1578,9 +1531,6 @@ const goProlog = `
//go:linkname _cgo_runtime_cgocall runtime.cgocall
func _cgo_runtime_cgocall(unsafe.Pointer, uintptr) int32
//go:linkname _cgo_runtime_cgocallback runtime.cgocallback
func _cgo_runtime_cgocallback(unsafe.Pointer, unsafe.Pointer, uintptr, uintptr)
//go:linkname _cgoCheckPointer runtime.cgoCheckPointer
func _cgoCheckPointer(interface{}, interface{})

View file

@ -133,10 +133,8 @@ var knownFormats = map[string]string{
"cmd/compile/internal/ssa.GCNode %v": "",
"cmd/compile/internal/ssa.ID %d": "",
"cmd/compile/internal/ssa.ID %v": "",
"cmd/compile/internal/ssa.LocPair %s": "",
"cmd/compile/internal/ssa.LocalSlot %s": "",
"cmd/compile/internal/ssa.LocalSlot %v": "",
"cmd/compile/internal/ssa.Location %T": "",
"cmd/compile/internal/ssa.Location %s": "",
"cmd/compile/internal/ssa.Op %s": "",
"cmd/compile/internal/ssa.Op %v": "",

View file

@ -42,10 +42,11 @@ func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
// loadByType returns the load instruction of the given type.
func loadByType(t *types.Type) obj.As {
// Avoid partial register write
if !t.IsFloat() && t.Size() <= 2 {
if t.Size() == 1 {
if !t.IsFloat() {
switch t.Size() {
case 1:
return x86.AMOVBLZX
} else {
case 2:
return x86.AMOVWLZX
}
}
@ -1070,7 +1071,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
val := v.AuxInt
// 0 means math.RoundToEven, 1 Floor, 2 Ceil, 3 Trunc
if val != 0 && val != 1 && val != 2 && val != 3 {
if val < 0 || val > 3 {
v.Fatalf("Invalid rounding mode")
}
p.From.Offset = val
@ -1210,7 +1211,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p = s.Prog(x86.ASETEQ)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpAMD64ANDBlock, ssa.OpAMD64ORBlock:
case ssa.OpAMD64ANDBlock, ssa.OpAMD64ANDLlock, ssa.OpAMD64ORBlock, ssa.OpAMD64ORLlock:
s.Prog(x86.ALOCK)
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG

View file

@ -688,15 +688,23 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p5.To.Reg = out
gc.Patch(p2, p5)
case ssa.OpARM64LoweredAtomicAnd8,
ssa.OpARM64LoweredAtomicOr8:
// LDAXRB (Rarg0), Rout
ssa.OpARM64LoweredAtomicAnd32,
ssa.OpARM64LoweredAtomicOr8,
ssa.OpARM64LoweredAtomicOr32:
// LDAXRB/LDAXRW (Rarg0), Rout
// AND/OR Rarg1, Rout
// STLXRB Rout, (Rarg0), Rtmp
// STLXRB/STLXRB Rout, (Rarg0), Rtmp
// CBNZ Rtmp, -3(PC)
ld := arm64.ALDAXRB
st := arm64.ASTLXRB
if v.Op == ssa.OpARM64LoweredAtomicAnd32 || v.Op == ssa.OpARM64LoweredAtomicOr32 {
ld = arm64.ALDAXRW
st = arm64.ASTLXRW
}
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
out := v.Reg0()
p := s.Prog(arm64.ALDAXRB)
p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
@ -706,7 +714,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p1.From.Reg = r1
p1.To.Type = obj.TYPE_REG
p1.To.Reg = out
p2 := s.Prog(arm64.ASTLXRB)
p2 := s.Prog(st)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = out
p2.To.Type = obj.TYPE_MEM

View file

@ -529,6 +529,10 @@ func geneq(t *types.Type) *obj.LSym {
fn := dclfunc(sym, tfn)
np := asNode(tfn.Type.Params().Field(0).Nname)
nq := asNode(tfn.Type.Params().Field(1).Nname)
nr := asNode(tfn.Type.Results().Field(0).Nname)
// Label to jump to if an equality test fails.
neq := autolabel(".neq")
// We reach here only for types that have equality but
// cannot be handled by the standard algorithms,
@ -555,13 +559,13 @@ func geneq(t *types.Type) *obj.LSym {
// for i := 0; i < nelem; i++ {
// if eq(p[i], q[i]) {
// } else {
// return
// goto neq
// }
// }
//
// TODO(josharian): consider doing some loop unrolling
// for larger nelem as well, processing a few elements at a time in a loop.
checkAll := func(unroll int64, eq func(pi, qi *Node) *Node) {
checkAll := func(unroll int64, last bool, eq func(pi, qi *Node) *Node) {
// checkIdx generates a node to check for equality at index i.
checkIdx := func(i *Node) *Node {
// pi := p[i]
@ -576,23 +580,21 @@ func geneq(t *types.Type) *obj.LSym {
}
if nelem <= unroll {
if last {
// Do last comparison in a different manner.
nelem--
}
// Generate a series of checks.
var cond *Node
for i := int64(0); i < nelem; i++ {
c := nodintconst(i)
check := checkIdx(c)
if cond == nil {
cond = check
continue
}
cond = nod(OANDAND, cond, check)
}
nif := nod(OIF, cond, nil)
nif.Rlist.Append(nod(ORETURN, nil, nil))
// if check {} else { goto neq }
nif := nod(OIF, checkIdx(nodintconst(i)), nil)
nif.Rlist.Append(nodSym(OGOTO, nil, neq))
fn.Nbody.Append(nif)
return
}
if last {
fn.Nbody.Append(nod(OAS, nr, checkIdx(nodintconst(nelem))))
}
} else {
// Generate a for loop.
// for i := 0; i < nelem; i++
i := temp(types.Types[TINT])
@ -601,12 +603,15 @@ func geneq(t *types.Type) *obj.LSym {
post := nod(OAS, i, nod(OADD, i, nodintconst(1)))
loop := nod(OFOR, cond, post)
loop.Ninit.Append(init)
// if eq(pi, qi) {} else { return }
check := checkIdx(i)
nif := nod(OIF, check, nil)
nif.Rlist.Append(nod(ORETURN, nil, nil))
// if eq(pi, qi) {} else { goto neq }
nif := nod(OIF, checkIdx(i), nil)
nif.Rlist.Append(nodSym(OGOTO, nil, neq))
loop.Nbody.Append(nif)
fn.Nbody.Append(loop)
if last {
fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
}
}
}
switch t.Elem().Etype {
@ -614,32 +619,28 @@ func geneq(t *types.Type) *obj.LSym {
// Do two loops. First, check that all the lengths match (cheap).
// Second, check that all the contents match (expensive).
// TODO: when the array size is small, unroll the length match checks.
checkAll(3, func(pi, qi *Node) *Node {
checkAll(3, false, func(pi, qi *Node) *Node {
// Compare lengths.
eqlen, _ := eqstring(pi, qi)
return eqlen
})
checkAll(1, func(pi, qi *Node) *Node {
checkAll(1, true, func(pi, qi *Node) *Node {
// Compare contents.
_, eqmem := eqstring(pi, qi)
return eqmem
})
case TFLOAT32, TFLOAT64:
checkAll(2, func(pi, qi *Node) *Node {
checkAll(2, true, func(pi, qi *Node) *Node {
// p[i] == q[i]
return nod(OEQ, pi, qi)
})
// TODO: pick apart structs, do them piecemeal too
default:
checkAll(1, func(pi, qi *Node) *Node {
checkAll(1, true, func(pi, qi *Node) *Node {
// p[i] == q[i]
return nod(OEQ, pi, qi)
})
}
// return true
ret := nod(ORETURN, nil, nil)
ret.List.Append(nodbool(true))
fn.Nbody.Append(ret)
case TSTRUCT:
// Build a list of conditions to satisfy.
@ -717,20 +718,40 @@ func geneq(t *types.Type) *obj.LSym {
flatConds = append(flatConds, c...)
}
var cond *Node
if len(flatConds) == 0 {
cond = nodbool(true)
fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
} else {
cond = flatConds[0]
for _, c := range flatConds[1:] {
cond = nod(OANDAND, cond, c)
for _, c := range flatConds[:len(flatConds)-1] {
// if cond {} else { goto neq }
n := nod(OIF, c, nil)
n.Rlist.Append(nodSym(OGOTO, nil, neq))
fn.Nbody.Append(n)
}
fn.Nbody.Append(nod(OAS, nr, flatConds[len(flatConds)-1]))
}
}
ret := nod(ORETURN, nil, nil)
ret.List.Append(cond)
fn.Nbody.Append(ret)
// ret:
// return
ret := autolabel(".ret")
fn.Nbody.Append(nodSym(OLABEL, nil, ret))
fn.Nbody.Append(nod(ORETURN, nil, nil))
// neq:
// r = false
// return (or goto ret)
fn.Nbody.Append(nodSym(OLABEL, nil, neq))
fn.Nbody.Append(nod(OAS, nr, nodbool(false)))
if EqCanPanic(t) || hasCall(fn) {
// Epilogue is large, so share it with the equal case.
fn.Nbody.Append(nodSym(OGOTO, nil, ret))
} else {
// Epilogue is small, so don't bother sharing.
fn.Nbody.Append(nod(ORETURN, nil, nil))
}
// TODO(khr): the epilogue size detection condition above isn't perfect.
// We should really do a generic CL that shares epilogues across
// the board. See #24936.
if Debug.r != 0 {
dumplist("geneq body", fn.Nbody)
@ -762,6 +783,39 @@ func geneq(t *types.Type) *obj.LSym {
return closure
}
func hasCall(n *Node) bool {
if n.Op == OCALL || n.Op == OCALLFUNC {
return true
}
if n.Left != nil && hasCall(n.Left) {
return true
}
if n.Right != nil && hasCall(n.Right) {
return true
}
for _, x := range n.Ninit.Slice() {
if hasCall(x) {
return true
}
}
for _, x := range n.Nbody.Slice() {
if hasCall(x) {
return true
}
}
for _, x := range n.List.Slice() {
if hasCall(x) {
return true
}
}
for _, x := range n.Rlist.Slice() {
if hasCall(x) {
return true
}
}
return false
}
// eqfield returns the node
// p.field == q.field
func eqfield(p *Node, q *Node, field *types.Sym) *Node {

View file

@ -44,6 +44,7 @@ var runtimeDecls = [...]struct {
{"printcomplex", funcTag, 27},
{"printstring", funcTag, 29},
{"printpointer", funcTag, 30},
{"printuintptr", funcTag, 31},
{"printiface", funcTag, 30},
{"printeface", funcTag, 30},
{"printslice", funcTag, 30},
@ -51,134 +52,134 @@ var runtimeDecls = [...]struct {
{"printsp", funcTag, 9},
{"printlock", funcTag, 9},
{"printunlock", funcTag, 9},
{"concatstring2", funcTag, 33},
{"concatstring3", funcTag, 34},
{"concatstring4", funcTag, 35},
{"concatstring5", funcTag, 36},
{"concatstrings", funcTag, 38},
{"cmpstring", funcTag, 39},
{"intstring", funcTag, 42},
{"slicebytetostring", funcTag, 43},
{"slicebytetostringtmp", funcTag, 44},
{"slicerunetostring", funcTag, 47},
{"stringtoslicebyte", funcTag, 49},
{"stringtoslicerune", funcTag, 52},
{"slicecopy", funcTag, 53},
{"decoderune", funcTag, 54},
{"countrunes", funcTag, 55},
{"convI2I", funcTag, 56},
{"convT16", funcTag, 57},
{"convT32", funcTag, 57},
{"convT64", funcTag, 57},
{"convTstring", funcTag, 57},
{"convTslice", funcTag, 57},
{"convT2E", funcTag, 58},
{"convT2Enoptr", funcTag, 58},
{"convT2I", funcTag, 58},
{"convT2Inoptr", funcTag, 58},
{"assertE2I", funcTag, 56},
{"assertE2I2", funcTag, 59},
{"assertI2I", funcTag, 56},
{"assertI2I2", funcTag, 59},
{"panicdottypeE", funcTag, 60},
{"panicdottypeI", funcTag, 60},
{"panicnildottype", funcTag, 61},
{"ifaceeq", funcTag, 63},
{"efaceeq", funcTag, 63},
{"fastrand", funcTag, 65},
{"makemap64", funcTag, 67},
{"makemap", funcTag, 68},
{"makemap_small", funcTag, 69},
{"mapaccess1", funcTag, 70},
{"mapaccess1_fast32", funcTag, 71},
{"mapaccess1_fast64", funcTag, 71},
{"mapaccess1_faststr", funcTag, 71},
{"mapaccess1_fat", funcTag, 72},
{"mapaccess2", funcTag, 73},
{"mapaccess2_fast32", funcTag, 74},
{"mapaccess2_fast64", funcTag, 74},
{"mapaccess2_faststr", funcTag, 74},
{"mapaccess2_fat", funcTag, 75},
{"mapassign", funcTag, 70},
{"mapassign_fast32", funcTag, 71},
{"mapassign_fast32ptr", funcTag, 71},
{"mapassign_fast64", funcTag, 71},
{"mapassign_fast64ptr", funcTag, 71},
{"mapassign_faststr", funcTag, 71},
{"mapiterinit", funcTag, 76},
{"mapdelete", funcTag, 76},
{"mapdelete_fast32", funcTag, 77},
{"mapdelete_fast64", funcTag, 77},
{"mapdelete_faststr", funcTag, 77},
{"mapiternext", funcTag, 78},
{"mapclear", funcTag, 79},
{"makechan64", funcTag, 81},
{"makechan", funcTag, 82},
{"chanrecv1", funcTag, 84},
{"chanrecv2", funcTag, 85},
{"chansend1", funcTag, 87},
{"concatstring2", funcTag, 34},
{"concatstring3", funcTag, 35},
{"concatstring4", funcTag, 36},
{"concatstring5", funcTag, 37},
{"concatstrings", funcTag, 39},
{"cmpstring", funcTag, 40},
{"intstring", funcTag, 43},
{"slicebytetostring", funcTag, 44},
{"slicebytetostringtmp", funcTag, 45},
{"slicerunetostring", funcTag, 48},
{"stringtoslicebyte", funcTag, 50},
{"stringtoslicerune", funcTag, 53},
{"slicecopy", funcTag, 54},
{"decoderune", funcTag, 55},
{"countrunes", funcTag, 56},
{"convI2I", funcTag, 57},
{"convT16", funcTag, 58},
{"convT32", funcTag, 58},
{"convT64", funcTag, 58},
{"convTstring", funcTag, 58},
{"convTslice", funcTag, 58},
{"convT2E", funcTag, 59},
{"convT2Enoptr", funcTag, 59},
{"convT2I", funcTag, 59},
{"convT2Inoptr", funcTag, 59},
{"assertE2I", funcTag, 57},
{"assertE2I2", funcTag, 60},
{"assertI2I", funcTag, 57},
{"assertI2I2", funcTag, 60},
{"panicdottypeE", funcTag, 61},
{"panicdottypeI", funcTag, 61},
{"panicnildottype", funcTag, 62},
{"ifaceeq", funcTag, 64},
{"efaceeq", funcTag, 64},
{"fastrand", funcTag, 66},
{"makemap64", funcTag, 68},
{"makemap", funcTag, 69},
{"makemap_small", funcTag, 70},
{"mapaccess1", funcTag, 71},
{"mapaccess1_fast32", funcTag, 72},
{"mapaccess1_fast64", funcTag, 72},
{"mapaccess1_faststr", funcTag, 72},
{"mapaccess1_fat", funcTag, 73},
{"mapaccess2", funcTag, 74},
{"mapaccess2_fast32", funcTag, 75},
{"mapaccess2_fast64", funcTag, 75},
{"mapaccess2_faststr", funcTag, 75},
{"mapaccess2_fat", funcTag, 76},
{"mapassign", funcTag, 71},
{"mapassign_fast32", funcTag, 72},
{"mapassign_fast32ptr", funcTag, 72},
{"mapassign_fast64", funcTag, 72},
{"mapassign_fast64ptr", funcTag, 72},
{"mapassign_faststr", funcTag, 72},
{"mapiterinit", funcTag, 77},
{"mapdelete", funcTag, 77},
{"mapdelete_fast32", funcTag, 78},
{"mapdelete_fast64", funcTag, 78},
{"mapdelete_faststr", funcTag, 78},
{"mapiternext", funcTag, 79},
{"mapclear", funcTag, 80},
{"makechan64", funcTag, 82},
{"makechan", funcTag, 83},
{"chanrecv1", funcTag, 85},
{"chanrecv2", funcTag, 86},
{"chansend1", funcTag, 88},
{"closechan", funcTag, 30},
{"writeBarrier", varTag, 89},
{"typedmemmove", funcTag, 90},
{"typedmemclr", funcTag, 91},
{"typedslicecopy", funcTag, 92},
{"selectnbsend", funcTag, 93},
{"selectnbrecv", funcTag, 94},
{"selectnbrecv2", funcTag, 96},
{"selectsetpc", funcTag, 97},
{"selectgo", funcTag, 98},
{"writeBarrier", varTag, 90},
{"typedmemmove", funcTag, 91},
{"typedmemclr", funcTag, 92},
{"typedslicecopy", funcTag, 93},
{"selectnbsend", funcTag, 94},
{"selectnbrecv", funcTag, 95},
{"selectnbrecv2", funcTag, 97},
{"selectsetpc", funcTag, 98},
{"selectgo", funcTag, 99},
{"block", funcTag, 9},
{"makeslice", funcTag, 99},
{"makeslice64", funcTag, 100},
{"makeslicecopy", funcTag, 101},
{"growslice", funcTag, 103},
{"memmove", funcTag, 104},
{"memclrNoHeapPointers", funcTag, 105},
{"memclrHasPointers", funcTag, 105},
{"memequal", funcTag, 106},
{"memequal0", funcTag, 107},
{"memequal8", funcTag, 107},
{"memequal16", funcTag, 107},
{"memequal32", funcTag, 107},
{"memequal64", funcTag, 107},
{"memequal128", funcTag, 107},
{"f32equal", funcTag, 108},
{"f64equal", funcTag, 108},
{"c64equal", funcTag, 108},
{"c128equal", funcTag, 108},
{"strequal", funcTag, 108},
{"interequal", funcTag, 108},
{"nilinterequal", funcTag, 108},
{"memhash", funcTag, 109},
{"memhash0", funcTag, 110},
{"memhash8", funcTag, 110},
{"memhash16", funcTag, 110},
{"memhash32", funcTag, 110},
{"memhash64", funcTag, 110},
{"memhash128", funcTag, 110},
{"f32hash", funcTag, 110},
{"f64hash", funcTag, 110},
{"c64hash", funcTag, 110},
{"c128hash", funcTag, 110},
{"strhash", funcTag, 110},
{"interhash", funcTag, 110},
{"nilinterhash", funcTag, 110},
{"int64div", funcTag, 111},
{"uint64div", funcTag, 112},
{"int64mod", funcTag, 111},
{"uint64mod", funcTag, 112},
{"float64toint64", funcTag, 113},
{"float64touint64", funcTag, 114},
{"float64touint32", funcTag, 115},
{"int64tofloat64", funcTag, 116},
{"uint64tofloat64", funcTag, 117},
{"uint32tofloat64", funcTag, 118},
{"complex128div", funcTag, 119},
{"racefuncenter", funcTag, 120},
{"makeslice", funcTag, 100},
{"makeslice64", funcTag, 101},
{"makeslicecopy", funcTag, 102},
{"growslice", funcTag, 104},
{"memmove", funcTag, 105},
{"memclrNoHeapPointers", funcTag, 106},
{"memclrHasPointers", funcTag, 106},
{"memequal", funcTag, 107},
{"memequal0", funcTag, 108},
{"memequal8", funcTag, 108},
{"memequal16", funcTag, 108},
{"memequal32", funcTag, 108},
{"memequal64", funcTag, 108},
{"memequal128", funcTag, 108},
{"f32equal", funcTag, 109},
{"f64equal", funcTag, 109},
{"c64equal", funcTag, 109},
{"c128equal", funcTag, 109},
{"strequal", funcTag, 109},
{"interequal", funcTag, 109},
{"nilinterequal", funcTag, 109},
{"memhash", funcTag, 110},
{"memhash0", funcTag, 111},
{"memhash8", funcTag, 111},
{"memhash16", funcTag, 111},
{"memhash32", funcTag, 111},
{"memhash64", funcTag, 111},
{"memhash128", funcTag, 111},
{"f32hash", funcTag, 111},
{"f64hash", funcTag, 111},
{"c64hash", funcTag, 111},
{"c128hash", funcTag, 111},
{"strhash", funcTag, 111},
{"interhash", funcTag, 111},
{"nilinterhash", funcTag, 111},
{"int64div", funcTag, 112},
{"uint64div", funcTag, 113},
{"int64mod", funcTag, 112},
{"uint64mod", funcTag, 113},
{"float64toint64", funcTag, 114},
{"float64touint64", funcTag, 115},
{"float64touint32", funcTag, 116},
{"int64tofloat64", funcTag, 117},
{"uint64tofloat64", funcTag, 118},
{"uint32tofloat64", funcTag, 119},
{"complex128div", funcTag, 120},
{"racefuncenter", funcTag, 31},
{"racefuncenterfp", funcTag, 9},
{"racefuncexit", funcTag, 9},
{"raceread", funcTag, 120},
{"racewrite", funcTag, 120},
{"raceread", funcTag, 31},
{"racewrite", funcTag, 31},
{"racereadrange", funcTag, 121},
{"racewriterange", funcTag, 121},
{"msanread", funcTag, 121},
@ -233,96 +234,96 @@ func runtimeTypes() []*types.Type {
typs[28] = types.Types[TSTRING]
typs[29] = functype(nil, []*Node{anonfield(typs[28])}, nil)
typs[30] = functype(nil, []*Node{anonfield(typs[2])}, nil)
typs[31] = types.NewArray(typs[0], 32)
typs[32] = types.NewPtr(typs[31])
typs[33] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[34] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[35] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[36] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[37] = types.NewSlice(typs[28])
typs[38] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[37])}, []*Node{anonfield(typs[28])})
typs[39] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[15])})
typs[40] = types.NewArray(typs[0], 4)
typs[41] = types.NewPtr(typs[40])
typs[42] = functype(nil, []*Node{anonfield(typs[41]), anonfield(typs[22])}, []*Node{anonfield(typs[28])})
typs[43] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
typs[44] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
typs[45] = types.Runetype
typs[46] = types.NewSlice(typs[45])
typs[47] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[46])}, []*Node{anonfield(typs[28])})
typs[48] = types.NewSlice(typs[0])
typs[49] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28])}, []*Node{anonfield(typs[48])})
typs[50] = types.NewArray(typs[45], 32)
typs[51] = types.NewPtr(typs[50])
typs[52] = functype(nil, []*Node{anonfield(typs[51]), anonfield(typs[28])}, []*Node{anonfield(typs[46])})
typs[53] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*Node{anonfield(typs[15])})
typs[54] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[45]), anonfield(typs[15])})
typs[55] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])})
typs[56] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
typs[57] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])})
typs[58] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])})
typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
typs[61] = functype(nil, []*Node{anonfield(typs[1])}, nil)
typs[62] = types.NewPtr(typs[5])
typs[63] = functype(nil, []*Node{anonfield(typs[62]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
typs[64] = types.Types[TUINT32]
typs[65] = functype(nil, nil, []*Node{anonfield(typs[64])})
typs[66] = types.NewMap(typs[2], typs[2])
typs[67] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[66])})
typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[66])})
typs[69] = functype(nil, nil, []*Node{anonfield(typs[66])})
typs[70] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3])}, nil)
typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[2])}, nil)
typs[78] = functype(nil, []*Node{anonfield(typs[3])}, nil)
typs[79] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66])}, nil)
typs[80] = types.NewChan(typs[2], types.Cboth)
typs[81] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[80])})
typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[80])})
typs[83] = types.NewChan(typs[2], types.Crecv)
typs[84] = functype(nil, []*Node{anonfield(typs[83]), anonfield(typs[3])}, nil)
typs[85] = functype(nil, []*Node{anonfield(typs[83]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[86] = types.NewChan(typs[2], types.Csend)
typs[87] = functype(nil, []*Node{anonfield(typs[86]), anonfield(typs[3])}, nil)
typs[88] = types.NewArray(typs[0], 3)
typs[89] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[88]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
typs[90] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
typs[93] = functype(nil, []*Node{anonfield(typs[86]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[94] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[83])}, []*Node{anonfield(typs[6])})
typs[95] = types.NewPtr(typs[6])
typs[96] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[95]), anonfield(typs[83])}, []*Node{anonfield(typs[6])})
typs[97] = functype(nil, []*Node{anonfield(typs[62])}, nil)
typs[98] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[62]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*Node{anonfield(typs[15]), anonfield(typs[6])})
typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[7])})
typs[100] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[7])})
typs[101] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*Node{anonfield(typs[7])})
typs[102] = types.NewSlice(typs[2])
typs[103] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[102]), anonfield(typs[15])}, []*Node{anonfield(typs[102])})
typs[104] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
typs[105] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
typs[106] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*Node{anonfield(typs[6])})
typs[107] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[108] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
typs[109] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
typs[110] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
typs[111] = functype(nil, []*Node{anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[22])})
typs[112] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, []*Node{anonfield(typs[24])})
typs[113] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[22])})
typs[114] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[24])})
typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[64])})
typs[116] = functype(nil, []*Node{anonfield(typs[22])}, []*Node{anonfield(typs[20])})
typs[117] = functype(nil, []*Node{anonfield(typs[24])}, []*Node{anonfield(typs[20])})
typs[118] = functype(nil, []*Node{anonfield(typs[64])}, []*Node{anonfield(typs[20])})
typs[119] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[26])}, []*Node{anonfield(typs[26])})
typs[120] = functype(nil, []*Node{anonfield(typs[5])}, nil)
typs[31] = functype(nil, []*Node{anonfield(typs[5])}, nil)
typs[32] = types.NewArray(typs[0], 32)
typs[33] = types.NewPtr(typs[32])
typs[34] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[35] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[36] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[37] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
typs[38] = types.NewSlice(typs[28])
typs[39] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[38])}, []*Node{anonfield(typs[28])})
typs[40] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[15])})
typs[41] = types.NewArray(typs[0], 4)
typs[42] = types.NewPtr(typs[41])
typs[43] = functype(nil, []*Node{anonfield(typs[42]), anonfield(typs[22])}, []*Node{anonfield(typs[28])})
typs[44] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
typs[45] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
typs[46] = types.Runetype
typs[47] = types.NewSlice(typs[46])
typs[48] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[47])}, []*Node{anonfield(typs[28])})
typs[49] = types.NewSlice(typs[0])
typs[50] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28])}, []*Node{anonfield(typs[49])})
typs[51] = types.NewArray(typs[46], 32)
typs[52] = types.NewPtr(typs[51])
typs[53] = functype(nil, []*Node{anonfield(typs[52]), anonfield(typs[28])}, []*Node{anonfield(typs[47])})
typs[54] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*Node{anonfield(typs[15])})
typs[55] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[46]), anonfield(typs[15])})
typs[56] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])})
typs[57] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
typs[58] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])})
typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])})
typs[61] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
typs[62] = functype(nil, []*Node{anonfield(typs[1])}, nil)
typs[63] = types.NewPtr(typs[5])
typs[64] = functype(nil, []*Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
typs[65] = types.Types[TUINT32]
typs[66] = functype(nil, nil, []*Node{anonfield(typs[65])})
typs[67] = types.NewMap(typs[2], typs[2])
typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
typs[70] = functype(nil, nil, []*Node{anonfield(typs[67])})
typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
typs[78] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
typs[79] = functype(nil, []*Node{anonfield(typs[3])}, nil)
typs[80] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
typs[81] = types.NewChan(typs[2], types.Cboth)
typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[81])})
typs[83] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[81])})
typs[84] = types.NewChan(typs[2], types.Crecv)
typs[85] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
typs[86] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[87] = types.NewChan(typs[2], types.Csend)
typs[88] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
typs[89] = types.NewArray(typs[0], 3)
typs[90] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
typs[94] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[95] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
typs[96] = types.NewPtr(typs[6])
typs[97] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
typs[98] = functype(nil, []*Node{anonfield(typs[63])}, nil)
typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*Node{anonfield(typs[15]), anonfield(typs[6])})
typs[100] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[7])})
typs[101] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[7])})
typs[102] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*Node{anonfield(typs[7])})
typs[103] = types.NewSlice(typs[2])
typs[104] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*Node{anonfield(typs[103])})
typs[105] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
typs[106] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
typs[107] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*Node{anonfield(typs[6])})
typs[108] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[109] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
typs[110] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
typs[111] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
typs[112] = functype(nil, []*Node{anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[22])})
typs[113] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, []*Node{anonfield(typs[24])})
typs[114] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[22])})
typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[24])})
typs[116] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[65])})
typs[117] = functype(nil, []*Node{anonfield(typs[22])}, []*Node{anonfield(typs[20])})
typs[118] = functype(nil, []*Node{anonfield(typs[24])}, []*Node{anonfield(typs[20])})
typs[119] = functype(nil, []*Node{anonfield(typs[65])}, []*Node{anonfield(typs[20])})
typs[120] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[26])}, []*Node{anonfield(typs[26])})
typs[121] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
typs[122] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
typs[123] = types.NewSlice(typs[7])
@ -331,7 +332,7 @@ func runtimeTypes() []*types.Type {
typs[126] = functype(nil, []*Node{anonfield(typs[125]), anonfield(typs[125])}, nil)
typs[127] = types.Types[TUINT16]
typs[128] = functype(nil, []*Node{anonfield(typs[127]), anonfield(typs[127])}, nil)
typs[129] = functype(nil, []*Node{anonfield(typs[64]), anonfield(typs[64])}, nil)
typs[129] = functype(nil, []*Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
typs[130] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
return typs[:]
}

View file

@ -54,6 +54,7 @@ func printuint(uint64)
func printcomplex(complex128)
func printstring(string)
func printpointer(any)
func printuintptr(uintptr)
func printiface(any)
func printeface(any)
func printslice(any)

View file

@ -0,0 +1,273 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/syntax"
"cmd/compile/internal/types"
"cmd/internal/obj"
"encoding/json"
"io/ioutil"
"log"
"path"
"sort"
"strconv"
"strings"
)
var embedlist []*Node
var embedCfg struct {
Patterns map[string][]string
Files map[string]string
}
func readEmbedCfg(file string) {
data, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("-embedcfg: %v", err)
}
if err := json.Unmarshal(data, &embedCfg); err != nil {
log.Fatalf("%s: %v", file, err)
}
if embedCfg.Patterns == nil {
log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
}
if embedCfg.Files == nil {
log.Fatalf("%s: invalid embedcfg: missing Files", file)
}
}
const (
embedUnknown = iota
embedBytes
embedString
embedFiles
)
var numLocalEmbed int
func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []PragmaEmbed) (newExprs []*Node) {
haveEmbed := false
for _, decl := range p.file.DeclList {
imp, ok := decl.(*syntax.ImportDecl)
if !ok {
// imports always come first
break
}
path, _ := strconv.Unquote(imp.Path.Value)
if path == "embed" {
haveEmbed = true
break
}
}
pos := embeds[0].Pos
if !haveEmbed {
p.yyerrorpos(pos, "invalid go:embed: missing import \"embed\"")
return exprs
}
if embedCfg.Patterns == nil {
p.yyerrorpos(pos, "invalid go:embed: build system did not supply embed configuration")
return exprs
}
if len(names) > 1 {
p.yyerrorpos(pos, "go:embed cannot apply to multiple vars")
return exprs
}
if len(exprs) > 0 {
p.yyerrorpos(pos, "go:embed cannot apply to var with initializer")
return exprs
}
if typ == nil {
// Should not happen, since len(exprs) == 0 now.
p.yyerrorpos(pos, "go:embed cannot apply to var without type")
return exprs
}
kind := embedKindApprox(typ)
if kind == embedUnknown {
p.yyerrorpos(pos, "go:embed cannot apply to var of type %v", typ)
return exprs
}
// Build list of files to store.
have := make(map[string]bool)
var list []string
for _, e := range embeds {
for _, pattern := range e.Patterns {
files, ok := embedCfg.Patterns[pattern]
if !ok {
p.yyerrorpos(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
}
for _, file := range files {
if embedCfg.Files[file] == "" {
p.yyerrorpos(e.Pos, "invalid go:embed: build system did not map file: %s", file)
continue
}
if !have[file] {
have[file] = true
list = append(list, file)
}
if kind == embedFiles {
for dir := path.Dir(file); dir != "." && !have[dir]; dir = path.Dir(dir) {
have[dir] = true
list = append(list, dir+"/")
}
}
}
}
}
sort.Slice(list, func(i, j int) bool {
return embedFileLess(list[i], list[j])
})
if kind == embedString || kind == embedBytes {
if len(list) > 1 {
p.yyerrorpos(pos, "invalid go:embed: multiple files for type %v", typ)
return exprs
}
}
v := names[0]
if dclcontext != PEXTERN {
numLocalEmbed++
v = newnamel(v.Pos, lookupN("embed.", numLocalEmbed))
v.Sym.Def = asTypesNode(v)
v.Name.Param.Ntype = typ
v.SetClass(PEXTERN)
externdcl = append(externdcl, v)
exprs = []*Node{v}
}
v.Name.Param.SetEmbedFiles(list)
embedlist = append(embedlist, v)
return exprs
}
// embedKindApprox determines the kind of embedding variable, approximately.
// The match is approximate because we haven't done scope resolution yet and
// can't tell whether "string" and "byte" really mean "string" and "byte".
// The result must be confirmed later, after type checking, using embedKind.
func embedKindApprox(typ *Node) int {
if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
return embedFiles
}
// These are not guaranteed to match only string and []byte -
// maybe the local package has redefined one of those words.
// But it's the best we can do now during the noder.
// The stricter check happens later, in initEmbed calling embedKind.
if typ.Sym != nil && typ.Sym.Name == "string" && typ.Sym.Pkg == localpkg {
return embedString
}
if typ.Op == OTARRAY && typ.Left == nil && typ.Right.Sym != nil && typ.Right.Sym.Name == "byte" && typ.Right.Sym.Pkg == localpkg {
return embedBytes
}
return embedUnknown
}
// embedKind determines the kind of embedding variable.
func embedKind(typ *types.Type) int {
if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
return embedFiles
}
if typ == types.Types[TSTRING] {
return embedString
}
if typ.Sym == nil && typ.IsSlice() && typ.Elem() == types.Bytetype {
return embedBytes
}
return embedUnknown
}
func embedFileNameSplit(name string) (dir, elem string, isDir bool) {
if name[len(name)-1] == '/' {
isDir = true
name = name[:len(name)-1]
}
i := len(name) - 1
for i >= 0 && name[i] != '/' {
i--
}
if i < 0 {
return ".", name, isDir
}
return name[:i], name[i+1:], isDir
}
// embedFileLess implements the sort order for a list of embedded files.
// See the comment inside ../../../../embed/embed.go's Files struct for rationale.
func embedFileLess(x, y string) bool {
xdir, xelem, _ := embedFileNameSplit(x)
ydir, yelem, _ := embedFileNameSplit(y)
return xdir < ydir || xdir == ydir && xelem < yelem
}
func dumpembeds() {
for _, v := range embedlist {
initEmbed(v)
}
}
// initEmbed emits the init data for a //go:embed variable,
// which is either a string, a []byte, or an embed.FS.
func initEmbed(v *Node) {
files := v.Name.Param.EmbedFiles()
switch kind := embedKind(v.Type); kind {
case embedUnknown:
yyerrorl(v.Pos, "go:embed cannot apply to var of type %v", v.Type)
case embedString, embedBytes:
file := files[0]
fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], kind == embedString, nil)
if err != nil {
yyerrorl(v.Pos, "embed %s: %v", file, err)
}
sym := v.Sym.Linksym()
off := 0
off = dsymptr(sym, off, fsym, 0) // data string
off = duintptr(sym, off, uint64(size)) // len
if kind == embedBytes {
duintptr(sym, off, uint64(size)) // cap for slice
}
case embedFiles:
slicedata := Ctxt.Lookup(`"".` + v.Sym.Name + `.files`)
off := 0
// []files pointed at by Files
off = dsymptr(slicedata, off, slicedata, 3*Widthptr) // []file, pointing just past slice
off = duintptr(slicedata, off, uint64(len(files)))
off = duintptr(slicedata, off, uint64(len(files)))
// embed/embed.go type file is:
// name string
// data string
// hash [16]byte
// Emit one of these per file in the set.
const hashSize = 16
hash := make([]byte, hashSize)
for _, file := range files {
off = dsymptr(slicedata, off, stringsym(v.Pos, file), 0) // file string
off = duintptr(slicedata, off, uint64(len(file)))
if strings.HasSuffix(file, "/") {
// entry for directory - no data
off = duintptr(slicedata, off, 0)
off = duintptr(slicedata, off, 0)
off += hashSize
} else {
fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], true, hash)
if err != nil {
yyerrorl(v.Pos, "embed %s: %v", file, err)
}
off = dsymptr(slicedata, off, fsym, 0) // data string
off = duintptr(slicedata, off, uint64(size))
off = int(slicedata.WriteBytes(Ctxt, int64(off), hash))
}
}
ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL)
sym := v.Sym.Linksym()
dsymptr(sym, 0, slicedata, 0)
}
}

View file

@ -419,10 +419,17 @@ func (n *Node) format(s fmt.State, verb rune, mode fmtMode) {
func (n *Node) jconv(s fmt.State, flag FmtFlag) {
c := flag & FmtShort
// Useful to see which nodes in an AST printout are actually identical
fmt.Fprintf(s, " p(%p)", n)
if c == 0 && n.Name != nil && n.Name.Vargen != 0 {
fmt.Fprintf(s, " g(%d)", n.Name.Vargen)
}
if c == 0 && n.Name != nil && n.Name.Defn != nil {
// Useful to see where Defn is set and what node it points to
fmt.Fprintf(s, " defn(%p)", n.Name.Defn)
}
if n.Pos.IsKnown() {
pfx := ""
switch n.Pos.IsStmt() {
@ -492,6 +499,15 @@ func (n *Node) jconv(s fmt.State, flag FmtFlag) {
if n.Name.Assigned() {
fmt.Fprint(s, " assigned")
}
if n.Name.IsClosureVar() {
fmt.Fprint(s, " closurevar")
}
if n.Name.Captured() {
fmt.Fprint(s, " captured")
}
if n.Name.IsOutputParamHeapAddr() {
fmt.Fprint(s, " outputparamheapaddr")
}
}
if n.Bounded() {
fmt.Fprint(s, " bounded")
@ -1710,6 +1726,9 @@ func (n *Node) nodedump(s fmt.State, flag FmtFlag, mode fmtMode) {
}
}
if n.Op == OCLOSURE && n.Func.Closure != nil && n.Func.Closure.Func.Nname.Sym != nil {
mode.Fprintf(s, " fnName %v", n.Func.Closure.Func.Nname.Sym)
}
if n.Sym != nil && n.Op != ONAME {
mode.Fprintf(s, " %v", n.Sym)
}
@ -1725,6 +1744,16 @@ func (n *Node) nodedump(s fmt.State, flag FmtFlag, mode fmtMode) {
if n.Right != nil {
mode.Fprintf(s, "%v", n.Right)
}
if n.Func != nil && n.Func.Closure != nil && n.Func.Closure.Nbody.Len() != 0 {
indent(s)
// The function associated with a closure
mode.Fprintf(s, "%v-clofunc%v", n.Op, n.Func.Closure)
}
if n.Func != nil && n.Func.Dcl != nil && len(n.Func.Dcl) != 0 {
indent(s)
// The dcls for a func or closure
mode.Fprintf(s, "%v-dcl%v", n.Op, asNodes(n.Func.Dcl))
}
if n.List.Len() != 0 {
indent(s)
mode.Fprintf(s, "%v-list%v", n.Op, n.List)

View file

@ -70,12 +70,8 @@ func newProgs(fn *Node, worker int) *Progs {
pp.pos = fn.Pos
pp.settext(fn)
// PCDATA tables implicitly start with index -1.
pp.prevLive = LivenessIndex{-1, -1, false}
if go115ReduceLiveness {
pp.prevLive = LivenessIndex{-1, false}
pp.nextLive = pp.prevLive
} else {
pp.nextLive = LivenessInvalid
}
return pp
}
@ -120,21 +116,6 @@ func (pp *Progs) Prog(as obj.As) *obj.Prog {
Addrconst(&p.From, objabi.PCDATA_StackMapIndex)
Addrconst(&p.To, int64(idx))
}
if !go115ReduceLiveness {
if pp.nextLive.isUnsafePoint {
// Unsafe points are encoded as a special value in the
// register map.
pp.nextLive.regMapIndex = objabi.PCDATA_RegMapUnsafe
}
if pp.nextLive.regMapIndex != pp.prevLive.regMapIndex {
// Emit register map index change.
idx := pp.nextLive.regMapIndex
pp.prevLive.regMapIndex = idx
p := pp.Prog(obj.APCDATA)
Addrconst(&p.From, objabi.PCDATA_RegMapIndex)
Addrconst(&p.To, int64(idx))
}
} else {
if pp.nextLive.isUnsafePoint != pp.prevLive.isUnsafePoint {
// Emit unsafe-point marker.
pp.prevLive.isUnsafePoint = pp.nextLive.isUnsafePoint
@ -146,7 +127,6 @@ func (pp *Progs) Prog(as obj.As) *obj.Prog {
Addrconst(&p.To, objabi.PCDATA_UnsafePointSafe)
}
}
}
p := pp.next
pp.next = pp.NewProg()

View file

@ -257,21 +257,39 @@ func inlFlood(n *Node) {
typecheckinl(n)
// Recursively identify all referenced functions for
// reexport. We want to include even non-called functions,
// because after inlining they might be callable.
inspectList(asNodes(n.Func.Inl.Body), func(n *Node) bool {
switch n.Op {
case ONAME:
// Mark any referenced global variables or
// functions for reexport. Skip methods,
// because they're reexported alongside their
// receiver type.
if n.Class() == PEXTERN || n.Class() == PFUNC && !n.isMethodExpression() {
switch n.Class() {
case PFUNC:
if n.isMethodExpression() {
inlFlood(asNode(n.Type.Nname()))
} else {
inlFlood(n)
exportsym(n)
}
case PEXTERN:
exportsym(n)
}
case OCALLFUNC, OCALLMETH:
// Recursively flood any functions called by
// this one.
inlFlood(asNode(n.Left.Type.Nname()))
case ODOTMETH:
fn := asNode(n.Type.Nname())
inlFlood(fn)
case OCALLPART:
// Okay, because we don't yet inline indirect
// calls to method values.
case OCLOSURE:
// If the closure is inlinable, we'll need to
// flood it too. But today we don't support
// inlining functions that contain closures.
//
// When we do, we'll probably want:
// inlFlood(n.Func.Closure.Func.Nname)
Fatalf("unexpected closure in inlinable function")
}
return true
})
@ -574,13 +592,11 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node {
}
switch n.Op {
// inhibit inlining of their argument
case ODEFER, OGO:
switch n.Left.Op {
case OCALLFUNC, OCALLMETH:
n.Left.SetNoInline(true)
}
return n
// TODO do them here (or earlier),
// so escape analysis can avoid more heapmoves.
@ -708,7 +724,14 @@ func inlCallee(fn *Node) *Node {
switch {
case fn.Op == ONAME && fn.Class() == PFUNC:
if fn.isMethodExpression() {
return asNode(fn.Sym.Def)
n := asNode(fn.Type.Nname())
// Check that receiver type matches fn.Left.
// TODO(mdempsky): Handle implicit dereference
// of pointer receiver argument?
if n == nil || !types.Identical(n.Type.Recv().Type, fn.Left.Type) {
return nil
}
return n
}
return fn
case fn.Op == OCLOSURE:
@ -721,6 +744,11 @@ func inlCallee(fn *Node) *Node {
func staticValue(n *Node) *Node {
for {
if n.Op == OCONVNOP {
n = n.Left
continue
}
n1 := staticValue1(n)
if n1 == nil {
return n
@ -811,14 +839,12 @@ func (v *reassignVisitor) visit(n *Node) *Node {
if n.Left == v.name && n != v.name.Name.Defn {
return n
}
return nil
case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE:
for _, p := range n.List.Slice() {
if p == v.name && n != v.name.Name.Defn {
return n
}
}
return nil
}
if a := v.visit(n.Left); a != nil {
return a
@ -1011,15 +1037,28 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
}
}
nreturns := 0
inspectList(asNodes(fn.Func.Inl.Body), func(n *Node) bool {
if n != nil && n.Op == ORETURN {
nreturns++
}
return true
})
// We can delay declaring+initializing result parameters if:
// (1) there's only one "return" statement in the inlined
// function, and (2) the result parameters aren't named.
delayretvars := nreturns == 1
// temporaries for return values.
var retvars []*Node
for i, t := range fn.Type.Results().Fields().Slice() {
var m *Node
mpos := t.Pos
if n := asNode(t.Nname); n != nil && !n.isBlank() {
if n := asNode(t.Nname); n != nil && !n.isBlank() && !strings.HasPrefix(n.Sym.Name, "~r") {
m = inlvar(n)
m = typecheck(m, ctxExpr)
inlvars[n] = m
delayretvars = false // found a named result parameter
} else {
// anonymous return values, synthesize names for use in assignment that replaces return
m = retvar(t, i)
@ -1031,12 +1070,11 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
// were not part of the original callee.
if !strings.HasPrefix(m.Sym.Name, "~R") {
m.Name.SetInlFormal(true)
m.Pos = mpos
m.Pos = t.Pos
inlfvars = append(inlfvars, m)
}
}
ninit.Append(nod(ODCL, m, nil))
retvars = append(retvars, m)
}
@ -1097,12 +1135,15 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
ninit.Append(vas)
}
if !delayretvars {
// Zero the return parameters.
for _, n := range retvars {
ninit.Append(nod(ODCL, n, nil))
ras := nod(OAS, n, nil)
ras = typecheck(ras, ctxStmt)
ninit.Append(ras)
}
}
retlabel := autolabel(".i")
@ -1134,6 +1175,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
subst := inlsubst{
retlabel: retlabel,
retvars: retvars,
delayretvars: delayretvars,
inlvars: inlvars,
bases: make(map[*src.PosBase]*src.PosBase),
newInlIndex: newIndex,
@ -1232,6 +1274,10 @@ type inlsubst struct {
// Temporary result variables.
retvars []*Node
// Whether result variables should be initialized at the
// "return" statement.
delayretvars bool
inlvars map[*Node]*Node
// bases maps from original PosBase to PosBase with an extra
@ -1300,6 +1346,14 @@ func (subst *inlsubst) node(n *Node) *Node {
as.List.Append(n)
}
as.Rlist.Set(subst.list(n.List))
if subst.delayretvars {
for _, n := range as.List.Slice() {
as.Ninit.Append(nod(ODCL, n, nil))
n.Name.Defn = as
}
}
as = typecheck(as, ctxStmt)
m.Ninit.Append(as)
}
@ -1362,3 +1416,68 @@ func pruneUnusedAutos(ll []*Node, vis *hairyVisitor) []*Node {
}
return s
}
// devirtualize replaces interface method calls within fn with direct
// concrete-type method calls where applicable.
func devirtualize(fn *Node) {
Curfn = fn
inspectList(fn.Nbody, func(n *Node) bool {
if n.Op == OCALLINTER {
devirtualizeCall(n)
}
return true
})
}
func devirtualizeCall(call *Node) {
recv := staticValue(call.Left.Left)
if recv.Op != OCONVIFACE {
return
}
typ := recv.Left.Type
if typ.IsInterface() {
return
}
x := nodl(call.Left.Pos, ODOTTYPE, call.Left.Left, nil)
x.Type = typ
x = nodlSym(call.Left.Pos, OXDOT, x, call.Left.Sym)
x = typecheck(x, ctxExpr|ctxCallee)
switch x.Op {
case ODOTMETH:
if Debug.m != 0 {
Warnl(call.Pos, "devirtualizing %v to %v", call.Left, typ)
}
call.Op = OCALLMETH
call.Left = x
case ODOTINTER:
// Promoted method from embedded interface-typed field (#42279).
if Debug.m != 0 {
Warnl(call.Pos, "partially devirtualizing %v to %v", call.Left, typ)
}
call.Op = OCALLINTER
call.Left = x
default:
// TODO(mdempsky): Turn back into Fatalf after more testing.
if Debug.m != 0 {
Warnl(call.Pos, "failed to devirtualize %v (%v)", x, x.Op)
}
return
}
// Duplicated logic from typecheck for function call return
// value types.
//
// Receiver parameter size may have changed; need to update
// call.Type to get correct stack offsets for result
// parameters.
checkwidth(x.Type)
switch ft := x.Type; ft.NumResults() {
case 0:
case 1:
call.Type = ft.Results().Field(0).Type
default:
call.Type = ft.Results()
}
}

View file

@ -51,6 +51,7 @@ func TestIntendedInlining(t *testing.T) {
"funcPC",
"getArgInfoFast",
"getm",
"getMCache",
"isDirectIface",
"itabHashFunc",
"noescape",

View file

@ -34,8 +34,6 @@ import (
"strings"
)
var imported_unsafe bool
var (
buildid string
spectre string
@ -241,6 +239,7 @@ func Main(archInit func(*Arch)) {
flag.BoolVar(&flagDWARF, "dwarf", !Wasm, "generate DWARF symbols")
flag.BoolVar(&Ctxt.Flag_locationlists, "dwarflocationlists", true, "add location lists to DWARF in optimized mode")
flag.IntVar(&genDwarfInline, "gendwarfinl", 2, "generate DWARF inline info records")
objabi.Flagfn1("embedcfg", "read go:embed configuration from `file`", readEmbedCfg)
objabi.Flagfn1("importmap", "add `definition` of the form source=actual to import map", addImportMap)
objabi.Flagfn1("importcfg", "read import configuration from `file`", readImportCfg)
flag.StringVar(&flag_installsuffix, "installsuffix", "", "set pkg directory `suffix`")
@ -605,7 +604,7 @@ func Main(archInit func(*Arch)) {
timings.Start("fe", "typecheck", "top1")
for i := 0; i < len(xtop); i++ {
n := xtop[i]
if op := n.Op; op != ODCL && op != OAS && op != OAS2 && (op != ODCLTYPE || !n.Left.Name.Param.Alias) {
if op := n.Op; op != ODCL && op != OAS && op != OAS2 && (op != ODCLTYPE || !n.Left.Name.Param.Alias()) {
xtop[i] = typecheck(n, ctxStmt)
}
}
@ -617,7 +616,7 @@ func Main(archInit func(*Arch)) {
timings.Start("fe", "typecheck", "top2")
for i := 0; i < len(xtop); i++ {
n := xtop[i]
if op := n.Op; op == ODCL || op == OAS || op == OAS2 || op == ODCLTYPE && n.Left.Name.Param.Alias {
if op := n.Op; op == ODCL || op == OAS || op == OAS2 || op == ODCLTYPE && n.Left.Name.Param.Alias() {
xtop[i] = typecheck(n, ctxStmt)
}
}
@ -710,6 +709,13 @@ func Main(archInit func(*Arch)) {
})
}
for _, n := range xtop {
if n.Op == ODCLFUNC {
devirtualize(n)
}
}
Curfn = nil
// Phase 6: Escape analysis.
// Required for moving heap allocations onto stack,
// which in turn is required by the closure implementation,
@ -1185,7 +1191,6 @@ func importfile(f *Val) *types.Pkg {
}
if path_ == "unsafe" {
imported_unsafe = true
return unsafepkg
}

View file

@ -12,6 +12,7 @@ import (
"runtime"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"cmd/compile/internal/importer"
@ -152,7 +153,11 @@ func (p *noder) makeSrcPosBase(b0 *syntax.PosBase) *src.PosBase {
} else {
// line directive base
p0 := b0.Pos()
p1 := src.MakePos(p.makeSrcPosBase(p0.Base()), p0.Line(), p0.Col())
p0b := p0.Base()
if p0b == b0 {
panic("infinite recursion in makeSrcPosBase")
}
p1 := src.MakePos(p.makeSrcPosBase(p0b), p0.Line(), p0.Col())
b1 = src.NewLinePragmaBase(p1, fn, fileh(fn), b0.Line(), b0.Col())
}
p.basemap[b0] = b1
@ -197,6 +202,8 @@ type noder struct {
pragcgobuf [][]string
err chan syntax.Error
scope ScopeID
importedUnsafe bool
importedEmbed bool
// scopeVars is a stack tracking the number of variables declared in the
// current function at the moment each open scope was opened.
@ -298,7 +305,8 @@ type linkname struct {
func (p *noder) node() {
types.Block = 1
imported_unsafe = false
p.importedUnsafe = false
p.importedEmbed = false
p.setlineno(p.file.PkgName)
mkpackage(p.file.PkgName.Value)
@ -311,7 +319,7 @@ func (p *noder) node() {
xtop = append(xtop, p.decls(p.file.DeclList)...)
for _, n := range p.linknames {
if !imported_unsafe {
if !p.importedUnsafe {
p.yyerrorpos(n.pos, "//go:linkname only allowed in Go files that import \"unsafe\"")
continue
}
@ -386,7 +394,6 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) {
val := p.basicLit(imp.Path)
ipkg := importfile(&val)
if ipkg == nil {
if nerrors == 0 {
Fatalf("phase error in import")
@ -394,6 +401,13 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) {
return
}
if ipkg == unsafepkg {
p.importedUnsafe = true
}
if ipkg.Path == "embed" {
p.importedEmbed = true
}
ipkg.Direct = true
var my *types.Sym
@ -435,6 +449,20 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []*Node {
}
if pragma, ok := decl.Pragma.(*Pragma); ok {
if len(pragma.Embeds) > 0 {
if !p.importedEmbed {
// This check can't be done when building the list pragma.Embeds
// because that list is created before the noder starts walking over the file,
// so at that point it hasn't seen the imports.
// We're left to check now, just before applying the //go:embed lines.
for _, e := range pragma.Embeds {
p.yyerrorpos(e.Pos, "//go:embed only allowed in Go files that import \"embed\"")
}
} else {
exprs = varEmbed(p, names, typ, exprs, pragma.Embeds)
}
pragma.Embeds = nil
}
p.checkUnused(pragma)
}
@ -517,17 +545,17 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) *Node {
param := n.Name.Param
param.Ntype = typ
param.Alias = decl.Alias
param.SetAlias(decl.Alias)
if pragma, ok := decl.Pragma.(*Pragma); ok {
if !decl.Alias {
param.Pragma = pragma.Flag & TypePragmas
param.SetPragma(pragma.Flag & TypePragmas)
pragma.Flag &^= TypePragmas
}
p.checkUnused(pragma)
}
nod := p.nod(decl, ODCLTYPE, n, nil)
if param.Alias && !langSupported(1, 9, localpkg) {
if param.Alias() && !langSupported(1, 9, localpkg) {
yyerrorl(nod.Pos, "type aliases only supported as of -lang=go1.9")
}
return nod
@ -1555,6 +1583,7 @@ var allowedStdPragmas = map[string]bool{
"go:cgo_import_dynamic": true,
"go:cgo_ldflag": true,
"go:cgo_dynamic_linker": true,
"go:embed": true,
"go:generate": true,
}
@ -1562,6 +1591,7 @@ var allowedStdPragmas = map[string]bool{
type Pragma struct {
Flag PragmaFlag // collected bits
Pos []PragmaPos // position of each individual flag
Embeds []PragmaEmbed
}
type PragmaPos struct {
@ -1569,12 +1599,22 @@ type PragmaPos struct {
Pos syntax.Pos
}
type PragmaEmbed struct {
Pos syntax.Pos
Patterns []string
}
func (p *noder) checkUnused(pragma *Pragma) {
for _, pos := range pragma.Pos {
if pos.Flag&pragma.Flag != 0 {
p.yyerrorpos(pos.Pos, "misplaced compiler directive")
}
}
if len(pragma.Embeds) > 0 {
for _, e := range pragma.Embeds {
p.yyerrorpos(e.Pos, "misplaced go:embed directive")
}
}
}
func (p *noder) checkUnusedDuringParse(pragma *Pragma) {
@ -1583,6 +1623,11 @@ func (p *noder) checkUnusedDuringParse(pragma *Pragma) {
p.error(syntax.Error{Pos: pos.Pos, Msg: "misplaced compiler directive"})
}
}
if len(pragma.Embeds) > 0 {
for _, e := range pragma.Embeds {
p.error(syntax.Error{Pos: e.Pos, Msg: "misplaced go:embed directive"})
}
}
}
// pragma is called concurrently if files are parsed concurrently.
@ -1627,6 +1672,17 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P
}
p.linknames = append(p.linknames, linkname{pos, f[1], target})
case text == "go:embed", strings.HasPrefix(text, "go:embed "):
args, err := parseGoEmbed(text[len("go:embed"):])
if err != nil {
p.error(syntax.Error{Pos: pos, Msg: err.Error()})
}
if len(args) == 0 {
p.error(syntax.Error{Pos: pos, Msg: "usage: //go:embed pattern..."})
break
}
pragma.Embeds = append(pragma.Embeds, PragmaEmbed{pos, args})
case strings.HasPrefix(text, "go:cgo_import_dynamic "):
// This is permitted for general use because Solaris
// code relies on it in golang.org/x/sys/unix and others.
@ -1699,3 +1755,64 @@ func mkname(sym *types.Sym) *Node {
}
return n
}
// parseGoEmbed parses the text following "//go:embed" to extract the glob patterns.
// It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings.
// go/build/read.go also processes these strings and contains similar logic.
func parseGoEmbed(args string) ([]string, error) {
var list []string
for args = strings.TrimSpace(args); args != ""; args = strings.TrimSpace(args) {
var path string
Switch:
switch args[0] {
default:
i := len(args)
for j, c := range args {
if unicode.IsSpace(c) {
i = j
break
}
}
path = args[:i]
args = args[i:]
case '`':
i := strings.Index(args[1:], "`")
if i < 0 {
return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
}
path = args[1 : 1+i]
args = args[1+i+1:]
case '"':
i := 1
for ; i < len(args); i++ {
if args[i] == '\\' {
i++
continue
}
if args[i] == '"' {
q, err := strconv.Unquote(args[:i+1])
if err != nil {
return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args[:i+1])
}
path = q
args = args[i+1:]
break Switch
}
}
if i >= len(args) {
return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
}
}
if args != "" {
r, _ := utf8.DecodeRuneInString(args)
if !unicode.IsSpace(r) {
return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
}
}
list = append(list, path)
}
return list, nil
}

View file

@ -14,6 +14,8 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"sort"
"strconv"
)
@ -125,6 +127,7 @@ func dumpdata() {
itabsLen := len(itabs)
dumpimportstrings()
dumpbasictypes()
dumpembeds()
// Calls to dumpsignats can generate functions,
// like method wrappers and hash and equality routines.
@ -309,7 +312,7 @@ func addGCLocals() {
if fn == nil {
continue
}
for _, gcsym := range []*obj.LSym{fn.GCArgs, fn.GCLocals, fn.GCRegs} {
for _, gcsym := range []*obj.LSym{fn.GCArgs, fn.GCLocals} {
if gcsym != nil && !gcsym.OnList() {
ggloblsym(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
}
@ -358,28 +361,31 @@ func dbvec(s *obj.LSym, off int, bv bvec) int {
return off
}
const (
stringSymPrefix = "go.string."
stringSymPattern = ".gostring.%d.%x"
)
// stringsym returns a symbol containing the string s.
// The symbol contains the string data, not a string header.
func stringsym(pos src.XPos, s string) (data *obj.LSym) {
var symname string
if len(s) > 100 {
// Huge strings are hashed to avoid long names in object files.
// Indulge in some paranoia by writing the length of s, too,
// as protection against length extension attacks.
// Same pattern is known to fileStringSym below.
h := sha256.New()
io.WriteString(h, s)
symname = fmt.Sprintf(".gostring.%d.%x", len(s), h.Sum(nil))
symname = fmt.Sprintf(stringSymPattern, len(s), h.Sum(nil))
} else {
// Small strings get named directly by their contents.
symname = strconv.Quote(s)
}
const prefix = "go.string."
symdataname := prefix + symname
symdata := Ctxt.Lookup(symdataname)
symdata := Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() {
// string data
off := dsname(symdata, 0, s, pos, "string")
off := dstringdata(symdata, 0, s, pos, "string")
ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
symdata.Set(obj.AttrContentAddressable, true)
}
@ -387,26 +393,122 @@ func stringsym(pos src.XPos, s string) (data *obj.LSym) {
return symdata
}
var slicebytes_gen int
// fileStringSym returns a symbol for the contents and the size of file.
// If readonly is true, the symbol shares storage with any literal string
// or other file with the same content and is placed in a read-only section.
// If readonly is false, the symbol is a read-write copy separate from any other,
// for use as the backing store of a []byte.
// The content hash of file is copied into hash. (If hash is nil, nothing is copied.)
// The returned symbol contains the data itself, not a string header.
func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.LSym, int64, error) {
f, err := os.Open(file)
if err != nil {
return nil, 0, err
}
defer f.Close()
info, err := f.Stat()
if err != nil {
return nil, 0, err
}
if !info.Mode().IsRegular() {
return nil, 0, fmt.Errorf("not a regular file")
}
size := info.Size()
if size <= 1*1024 {
data, err := ioutil.ReadAll(f)
if err != nil {
return nil, 0, err
}
if int64(len(data)) != size {
return nil, 0, fmt.Errorf("file changed between reads")
}
var sym *obj.LSym
if readonly {
sym = stringsym(pos, string(data))
} else {
sym = slicedata(pos, string(data)).Sym.Linksym()
}
if len(hash) > 0 {
sum := sha256.Sum256(data)
copy(hash, sum[:])
}
return sym, size, nil
}
if size > 2e9 {
// ggloblsym takes an int32,
// and probably the rest of the toolchain
// can't handle such big symbols either.
// See golang.org/issue/9862.
return nil, 0, fmt.Errorf("file too large")
}
func slicebytes(nam *Node, s string) {
slicebytes_gen++
symname := fmt.Sprintf(".gobytes.%d", slicebytes_gen)
// File is too big to read and keep in memory.
// Compute hash if needed for read-only content hashing or if the caller wants it.
var sum []byte
if readonly || len(hash) > 0 {
h := sha256.New()
n, err := io.Copy(h, f)
if err != nil {
return nil, 0, err
}
if n != size {
return nil, 0, fmt.Errorf("file changed between reads")
}
sum = h.Sum(nil)
copy(hash, sum)
}
var symdata *obj.LSym
if readonly {
symname := fmt.Sprintf(stringSymPattern, size, sum)
symdata = Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() {
info := symdata.NewFileInfo()
info.Name = file
info.Size = size
ggloblsym(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL)
// Note: AttrContentAddressable cannot be set here,
// because the content-addressable-handling code
// does not know about file symbols.
}
} else {
// Emit a zero-length data symbol
// and then fix up length and content to use file.
symdata = slicedata(pos, "").Sym.Linksym()
symdata.Size = size
symdata.Type = objabi.SNOPTRDATA
info := symdata.NewFileInfo()
info.Name = file
info.Size = size
}
return symdata, size, nil
}
var slicedataGen int
func slicedata(pos src.XPos, s string) *Node {
slicedataGen++
symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
sym := localpkg.Lookup(symname)
symnode := newname(sym)
sym.Def = asTypesNode(symnode)
lsym := sym.Linksym()
off := dsname(lsym, 0, s, nam.Pos, "slice")
off := dstringdata(lsym, 0, s, pos, "slice")
ggloblsym(lsym, int32(off), obj.NOPTR|obj.LOCAL)
return symnode
}
func slicebytes(nam *Node, s string) {
if nam.Op != ONAME {
Fatalf("slicebytes %v", nam)
}
slicesym(nam, symnode, int64(len(s)))
slicesym(nam, slicedata(nam.Pos, s), int64(len(s)))
}
func dsname(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
// Objects that are too large will cause the data section to overflow right away,
// causing a cryptic error message by the linker. Check for oversize objects here
// and provide a useful error message instead.

View file

@ -24,16 +24,6 @@ import (
"strings"
)
// go115ReduceLiveness disables register maps and only produces stack
// maps at call sites.
//
// In Go 1.15, we changed debug call injection to use conservative
// scanning instead of precise pointer maps, so these are no longer
// necessary.
//
// Keep in sync with runtime/preempt.go:go115ReduceLiveness.
const go115ReduceLiveness = true
// OpVarDef is an annotation for the liveness analysis, marking a place
// where a complete initialization (definition) of a variable begins.
// Since the liveness analysis can see initialization of single-word
@ -96,15 +86,15 @@ type BlockEffects struct {
//
// uevar: upward exposed variables (used before set in block)
// varkill: killed variables (set in block)
uevar varRegVec
varkill varRegVec
uevar bvec
varkill bvec
// Computed during Liveness.solve using control flow information:
//
// livein: variables live at block entry
// liveout: variables live at block exit
livein varRegVec
liveout varRegVec
livein bvec
liveout bvec
}
// A collection of global state used by liveness analysis.
@ -128,16 +118,14 @@ type Liveness struct {
// current Block during Liveness.epilogue. Indexed in Value
// order for that block. Additionally, for the entry block
// livevars[0] is the entry bitmap. Liveness.compact moves
// these to stackMaps and regMaps.
livevars []varRegVec
// these to stackMaps.
livevars []bvec
// livenessMap maps from safe points (i.e., CALLs) to their
// liveness map indexes.
livenessMap LivenessMap
stackMapSet bvecSet
stackMaps []bvec
regMapSet map[liveRegMask]int
regMaps []liveRegMask
cache progeffectscache
}
@ -158,7 +146,7 @@ func (m *LivenessMap) reset() {
delete(m.vals, k)
}
}
m.deferreturn = LivenessInvalid
m.deferreturn = LivenessDontCare
}
func (m *LivenessMap) set(v *ssa.Value, i LivenessIndex) {
@ -166,27 +154,17 @@ func (m *LivenessMap) set(v *ssa.Value, i LivenessIndex) {
}
func (m LivenessMap) Get(v *ssa.Value) LivenessIndex {
if !go115ReduceLiveness {
// All safe-points are in the map, so if v isn't in
// the map, it's an unsafe-point.
if idx, ok := m.vals[v.ID]; ok {
return idx
}
return LivenessInvalid
}
// If v isn't in the map, then it's a "don't care" and not an
// unsafe-point.
if idx, ok := m.vals[v.ID]; ok {
return idx
}
return LivenessIndex{StackMapDontCare, StackMapDontCare, false}
return LivenessIndex{StackMapDontCare, false}
}
// LivenessIndex stores the liveness map information for a Value.
type LivenessIndex struct {
stackMapIndex int
regMapIndex int // only for !go115ReduceLiveness
// isUnsafePoint indicates that this is an unsafe-point.
//
@ -197,8 +175,10 @@ type LivenessIndex struct {
isUnsafePoint bool
}
// LivenessInvalid indicates an unsafe point with no stack map.
var LivenessInvalid = LivenessIndex{StackMapDontCare, StackMapDontCare, true} // only for !go115ReduceLiveness
// LivenessDontCare indicates that the liveness information doesn't
// matter. Currently it is used in deferreturn liveness when we don't
// actually need it. It should never be emitted to the PCDATA stream.
var LivenessDontCare = LivenessIndex{StackMapDontCare, true}
// StackMapDontCare indicates that the stack map index at a Value
// doesn't matter.
@ -212,46 +192,12 @@ func (idx LivenessIndex) StackMapValid() bool {
return idx.stackMapIndex != StackMapDontCare
}
func (idx LivenessIndex) RegMapValid() bool {
return idx.regMapIndex != StackMapDontCare
}
type progeffectscache struct {
retuevar []int32
tailuevar []int32
initialized bool
}
// varRegVec contains liveness bitmaps for variables and registers.
type varRegVec struct {
vars bvec
regs liveRegMask
}
func (v *varRegVec) Eq(v2 varRegVec) bool {
return v.vars.Eq(v2.vars) && v.regs == v2.regs
}
func (v *varRegVec) Copy(v2 varRegVec) {
v.vars.Copy(v2.vars)
v.regs = v2.regs
}
func (v *varRegVec) Clear() {
v.vars.Clear()
v.regs = 0
}
func (v *varRegVec) Or(v1, v2 varRegVec) {
v.vars.Or(v1.vars, v2.vars)
v.regs = v1.regs | v2.regs
}
func (v *varRegVec) AndNot(v1, v2 varRegVec) {
v.vars.AndNot(v1.vars, v2.vars)
v.regs = v1.regs &^ v2.regs
}
// livenessShouldTrack reports whether the liveness analysis
// should track the variable n.
// We don't care about variables that have no pointers,
@ -400,110 +346,6 @@ func affectedNode(v *ssa.Value) (*Node, ssa.SymEffect) {
}
}
// regEffects returns the registers affected by v.
func (lv *Liveness) regEffects(v *ssa.Value) (uevar, kill liveRegMask) {
if go115ReduceLiveness {
return 0, 0
}
if v.Op == ssa.OpPhi {
// All phi node arguments must come from the same
// register and the result must also go to that
// register, so there's no overall effect.
return 0, 0
}
addLocs := func(mask liveRegMask, v *ssa.Value, ptrOnly bool) liveRegMask {
if int(v.ID) >= len(lv.f.RegAlloc) {
// v has no allocated registers.
return mask
}
loc := lv.f.RegAlloc[v.ID]
if loc == nil {
// v has no allocated registers.
return mask
}
if v.Op == ssa.OpGetG {
// GetG represents the G register, which is a
// pointer, but not a valid GC register. The
// current G is always reachable, so it's okay
// to ignore this register.
return mask
}
// Collect registers and types from v's location.
var regs [2]*ssa.Register
nreg := 0
switch loc := loc.(type) {
case ssa.LocalSlot:
return mask
case *ssa.Register:
if ptrOnly && !v.Type.HasPointers() {
return mask
}
regs[0] = loc
nreg = 1
case ssa.LocPair:
// The value will have TTUPLE type, and the
// children are nil or *ssa.Register.
if v.Type.Etype != types.TTUPLE {
v.Fatalf("location pair %s has non-tuple type %v", loc, v.Type)
}
for i, loc1 := range &loc {
if loc1 == nil {
continue
}
if ptrOnly && !v.Type.FieldType(i).HasPointers() {
continue
}
regs[nreg] = loc1.(*ssa.Register)
nreg++
}
default:
v.Fatalf("weird RegAlloc location: %s (%T)", loc, loc)
}
// Add register locations to vars.
for _, reg := range regs[:nreg] {
if reg.GCNum() == -1 {
if ptrOnly {
v.Fatalf("pointer in non-pointer register %v", reg)
} else {
continue
}
}
mask |= 1 << uint(reg.GCNum())
}
return mask
}
// v clobbers all registers it writes to (whether or not the
// write is pointer-typed).
kill = addLocs(0, v, false)
for _, arg := range v.Args {
// v uses all registers is reads from, but we only
// care about marking those containing pointers.
uevar = addLocs(uevar, arg, true)
}
return uevar, kill
}
type liveRegMask uint32 // only if !go115ReduceLiveness
func (m liveRegMask) niceString(config *ssa.Config) string {
if m == 0 {
return "<none>"
}
str := ""
for i, reg := range config.GCRegMap {
if m&(1<<uint(i)) != 0 {
if str != "" {
str += ","
}
str += reg.String()
}
}
return str
}
type livenessFuncCache struct {
be []BlockEffects
livenessMap LivenessMap
@ -519,8 +361,6 @@ func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkpt
vars: vars,
idx: idx,
stkptrsize: stkptrsize,
regMapSet: make(map[liveRegMask]int),
}
// Significant sources of allocation are kept in the ssa.Cache
@ -533,7 +373,7 @@ func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkpt
if cap(lc.be) >= f.NumBlocks() {
lv.be = lc.be[:f.NumBlocks()]
}
lv.livenessMap = LivenessMap{vals: lc.livenessMap.vals, deferreturn: LivenessInvalid}
lv.livenessMap = LivenessMap{vals: lc.livenessMap.vals, deferreturn: LivenessDontCare}
lc.livenessMap.vals = nil
}
if lv.be == nil {
@ -546,10 +386,10 @@ func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkpt
for _, b := range f.Blocks {
be := lv.blockEffects(b)
be.uevar = varRegVec{vars: bulk.next()}
be.varkill = varRegVec{vars: bulk.next()}
be.livein = varRegVec{vars: bulk.next()}
be.liveout = varRegVec{vars: bulk.next()}
be.uevar = bulk.next()
be.varkill = bulk.next()
be.livein = bulk.next()
be.liveout = bulk.next()
}
lv.livenessMap.reset()
@ -637,20 +477,6 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) {
}
}
// usedRegs returns the maximum width of the live register map.
func (lv *Liveness) usedRegs() int32 {
var any liveRegMask
for _, live := range lv.regMaps {
any |= live
}
i := int32(0)
for any != 0 {
any >>= 1
i++
}
return i
}
// Generates live pointer value maps for arguments and local variables. The
// this argument and the in arguments are always assumed live. The vars
// argument is a slice of *Nodes.
@ -851,10 +677,6 @@ func (lv *Liveness) markUnsafePoints() {
// particular, call Values can have a stack map in case the callee
// grows the stack, but not themselves be a safe-point.
func (lv *Liveness) hasStackMap(v *ssa.Value) bool {
// The runtime only has safe-points in function prologues, so
// we only need stack maps at call sites. go:nosplit functions
// are similar.
if go115ReduceLiveness || compiling_runtime || lv.f.NoSplit {
if !v.Op.IsCall() {
return false
}
@ -865,17 +687,6 @@ func (lv *Liveness) hasStackMap(v *ssa.Value) bool {
return false
}
return true
}
switch v.Op {
case ssa.OpInitMem, ssa.OpArg, ssa.OpSP, ssa.OpSB,
ssa.OpSelect0, ssa.OpSelect1, ssa.OpGetG,
ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive,
ssa.OpPhi:
// These don't produce code (see genssa).
return false
}
return !lv.unsafePoints.Get(int32(v.ID))
}
// Initializes the sets for solving the live variables. Visits all the
@ -891,17 +702,13 @@ func (lv *Liveness) prologue() {
// effects with the each prog effects.
for j := len(b.Values) - 1; j >= 0; j-- {
pos, e := lv.valueEffects(b.Values[j])
regUevar, regKill := lv.regEffects(b.Values[j])
if e&varkill != 0 {
be.varkill.vars.Set(pos)
be.uevar.vars.Unset(pos)
be.varkill.Set(pos)
be.uevar.Unset(pos)
}
be.varkill.regs |= regKill
be.uevar.regs &^= regKill
if e&uevar != 0 {
be.uevar.vars.Set(pos)
be.uevar.Set(pos)
}
be.uevar.regs |= regUevar
}
}
}
@ -911,8 +718,8 @@ func (lv *Liveness) solve() {
// These temporary bitvectors exist to avoid successive allocations and
// frees within the loop.
nvars := int32(len(lv.vars))
newlivein := varRegVec{vars: bvalloc(nvars)}
newliveout := varRegVec{vars: bvalloc(nvars)}
newlivein := bvalloc(nvars)
newliveout := bvalloc(nvars)
// Walk blocks in postorder ordering. This improves convergence.
po := lv.f.Postorder()
@ -930,11 +737,11 @@ func (lv *Liveness) solve() {
switch b.Kind {
case ssa.BlockRet:
for _, pos := range lv.cache.retuevar {
newliveout.vars.Set(pos)
newliveout.Set(pos)
}
case ssa.BlockRetJmp:
for _, pos := range lv.cache.tailuevar {
newliveout.vars.Set(pos)
newliveout.Set(pos)
}
case ssa.BlockExit:
// panic exit - nothing to do
@ -969,7 +776,7 @@ func (lv *Liveness) solve() {
// variables at each safe point locations.
func (lv *Liveness) epilogue() {
nvars := int32(len(lv.vars))
liveout := varRegVec{vars: bvalloc(nvars)}
liveout := bvalloc(nvars)
livedefer := bvalloc(nvars) // always-live variables
// If there is a defer (that could recover), then all output
@ -1025,12 +832,11 @@ func (lv *Liveness) epilogue() {
{
// Reserve an entry for function entry.
live := bvalloc(nvars)
lv.livevars = append(lv.livevars, varRegVec{vars: live})
lv.livevars = append(lv.livevars, live)
}
for _, b := range lv.f.Blocks {
be := lv.blockEffects(b)
firstBitmapIndex := len(lv.livevars)
// Walk forward through the basic block instructions and
// allocate liveness maps for those instructions that need them.
@ -1040,7 +846,7 @@ func (lv *Liveness) epilogue() {
}
live := bvalloc(nvars)
lv.livevars = append(lv.livevars, varRegVec{vars: live})
lv.livevars = append(lv.livevars, live)
}
// walk backward, construct maps at each safe point
@ -1056,21 +862,18 @@ func (lv *Liveness) epilogue() {
live := &lv.livevars[index]
live.Or(*live, liveout)
live.vars.Or(live.vars, livedefer) // only for non-entry safe points
live.Or(*live, livedefer) // only for non-entry safe points
index--
}
// Update liveness information.
pos, e := lv.valueEffects(v)
regUevar, regKill := lv.regEffects(v)
if e&varkill != 0 {
liveout.vars.Unset(pos)
liveout.Unset(pos)
}
liveout.regs &^= regKill
if e&uevar != 0 {
liveout.vars.Set(pos)
liveout.Set(pos)
}
liveout.regs |= regUevar
}
if b == lv.f.Entry {
@ -1080,7 +883,7 @@ func (lv *Liveness) epilogue() {
// Check to make sure only input variables are live.
for i, n := range lv.vars {
if !liveout.vars.Get(int32(i)) {
if !liveout.Get(int32(i)) {
continue
}
if n.Class() == PPARAM {
@ -1094,32 +897,16 @@ func (lv *Liveness) epilogue() {
live.Or(*live, liveout)
}
// Check that no registers are live across calls.
// For closure calls, the CALLclosure is the last use
// of the context register, so it's dead after the call.
index = int32(firstBitmapIndex)
for _, v := range b.Values {
if lv.hasStackMap(v) {
live := lv.livevars[index]
if v.Op.IsCall() && live.regs != 0 {
lv.printDebug()
v.Fatalf("%v register %s recorded as live at call", lv.fn.Func.Nname, live.regs.niceString(lv.f.Config))
}
index++
}
}
// The liveness maps for this block are now complete. Compact them.
lv.compact(b)
}
// If we have an open-coded deferreturn call, make a liveness map for it.
if lv.fn.Func.OpenCodedDeferDisallowed() {
lv.livenessMap.deferreturn = LivenessInvalid
lv.livenessMap.deferreturn = LivenessDontCare
} else {
lv.livenessMap.deferreturn = LivenessIndex{
stackMapIndex: lv.stackMapSet.add(livedefer),
regMapIndex: 0, // entry regMap, containing no live registers
isUnsafePoint: false,
}
}
@ -1136,20 +923,10 @@ func (lv *Liveness) epilogue() {
lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Func.Nname, n)
}
}
if !go115ReduceLiveness {
// Check that no registers are live at function entry.
// The context register, if any, comes from a
// LoweredGetClosurePtr operation first thing in the function,
// so it doesn't appear live at entry.
if regs := lv.regMaps[0]; regs != 0 {
lv.printDebug()
lv.f.Fatalf("%v register %s recorded as live on entry", lv.fn.Func.Nname, regs.niceString(lv.f.Config))
}
}
}
// Compact coalesces identical bitmaps from lv.livevars into the sets
// lv.stackMapSet and lv.regMaps.
// lv.stackMapSet.
//
// Compact clears lv.livevars.
//
@ -1165,45 +942,23 @@ func (lv *Liveness) epilogue() {
// PCDATA tables cost about 100k. So for now we keep using a single index for
// both bitmap lists.
func (lv *Liveness) compact(b *ssa.Block) {
add := func(live varRegVec, isUnsafePoint bool) LivenessIndex { // only if !go115ReduceLiveness
// Deduplicate the stack map.
stackIndex := lv.stackMapSet.add(live.vars)
// Deduplicate the register map.
regIndex, ok := lv.regMapSet[live.regs]
if !ok {
regIndex = len(lv.regMapSet)
lv.regMapSet[live.regs] = regIndex
lv.regMaps = append(lv.regMaps, live.regs)
}
return LivenessIndex{stackIndex, regIndex, isUnsafePoint}
}
pos := 0
if b == lv.f.Entry {
// Handle entry stack map.
if !go115ReduceLiveness {
add(lv.livevars[0], false)
} else {
lv.stackMapSet.add(lv.livevars[0].vars)
}
lv.stackMapSet.add(lv.livevars[0])
pos++
}
for _, v := range b.Values {
if go115ReduceLiveness {
hasStackMap := lv.hasStackMap(v)
isUnsafePoint := lv.allUnsafe || lv.unsafePoints.Get(int32(v.ID))
idx := LivenessIndex{StackMapDontCare, StackMapDontCare, isUnsafePoint}
idx := LivenessIndex{StackMapDontCare, isUnsafePoint}
if hasStackMap {
idx.stackMapIndex = lv.stackMapSet.add(lv.livevars[pos].vars)
idx.stackMapIndex = lv.stackMapSet.add(lv.livevars[pos])
pos++
}
if hasStackMap || isUnsafePoint {
lv.livenessMap.set(v, idx)
}
} else if lv.hasStackMap(v) {
isUnsafePoint := lv.allUnsafe || lv.unsafePoints.Get(int32(v.ID))
lv.livenessMap.set(v, add(lv.livevars[pos], isUnsafePoint))
pos++
}
}
// Reset livevars.
@ -1250,8 +1005,8 @@ func (lv *Liveness) showlive(v *ssa.Value, live bvec) {
Warnl(pos, s)
}
func (lv *Liveness) printbvec(printed bool, name string, live varRegVec) bool {
if live.vars.IsEmpty() && live.regs == 0 {
func (lv *Liveness) printbvec(printed bool, name string, live bvec) bool {
if live.IsEmpty() {
return printed
}
@ -1264,19 +1019,18 @@ func (lv *Liveness) printbvec(printed bool, name string, live varRegVec) bool {
comma := ""
for i, n := range lv.vars {
if !live.vars.Get(int32(i)) {
if !live.Get(int32(i)) {
continue
}
fmt.Printf("%s%s", comma, n.Sym.Name)
comma = ","
}
fmt.Printf("%s%s", comma, live.regs.niceString(lv.f.Config))
return true
}
// printeffect is like printbvec, but for valueEffects and regEffects.
func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool, regMask liveRegMask) bool {
if !x && regMask == 0 {
// printeffect is like printbvec, but for valueEffects.
func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool) bool {
if !x {
return printed
}
if !printed {
@ -1288,15 +1042,7 @@ func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool, re
if x {
fmt.Printf("%s", lv.vars[pos].Sym.Name)
}
for j, reg := range lv.f.Config.GCRegMap {
if regMask&(1<<uint(j)) != 0 {
if x {
fmt.Printf(",")
}
x = true
fmt.Printf("%v", reg)
}
}
return true
}
@ -1364,15 +1110,14 @@ func (lv *Liveness) printDebug() {
pcdata := lv.livenessMap.Get(v)
pos, effect := lv.valueEffects(v)
regUevar, regKill := lv.regEffects(v)
printed = false
printed = lv.printeffect(printed, "uevar", pos, effect&uevar != 0, regUevar)
printed = lv.printeffect(printed, "varkill", pos, effect&varkill != 0, regKill)
printed = lv.printeffect(printed, "uevar", pos, effect&uevar != 0)
printed = lv.printeffect(printed, "varkill", pos, effect&varkill != 0)
if printed {
fmt.Printf("\n")
}
if pcdata.StackMapValid() || pcdata.RegMapValid() {
if pcdata.StackMapValid() {
fmt.Printf("\tlive=")
printed = false
if pcdata.StackMapValid() {
@ -1388,16 +1133,6 @@ func (lv *Liveness) printDebug() {
printed = true
}
}
if pcdata.RegMapValid() { // only if !go115ReduceLiveness
regLive := lv.regMaps[pcdata.regMapIndex]
if regLive != 0 {
if printed {
fmt.Printf(",")
}
fmt.Printf("%s", regLive.niceString(lv.f.Config))
printed = true
}
}
fmt.Printf("\n")
}
@ -1423,7 +1158,7 @@ func (lv *Liveness) printDebug() {
// first word dumped is the total number of bitmaps. The second word is the
// length of the bitmaps. All bitmaps are assumed to be of equal length. The
// remaining bytes are the raw bitmaps.
func (lv *Liveness) emit() (argsSym, liveSym, regsSym *obj.LSym) {
func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
// Size args bitmaps to be just large enough to hold the largest pointer.
// First, find the largest Xoffset node we care about.
// (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.)
@ -1452,7 +1187,7 @@ func (lv *Liveness) emit() (argsSym, liveSym, regsSym *obj.LSym) {
maxLocals := lv.stkptrsize
// Temporary symbols for encoding bitmaps.
var argsSymTmp, liveSymTmp, regsSymTmp obj.LSym
var argsSymTmp, liveSymTmp obj.LSym
args := bvalloc(int32(maxArgs / int64(Widthptr)))
aoff := duint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
@ -1472,24 +1207,6 @@ func (lv *Liveness) emit() (argsSym, liveSym, regsSym *obj.LSym) {
loff = dbvec(&liveSymTmp, loff, locals)
}
if !go115ReduceLiveness {
regs := bvalloc(lv.usedRegs())
roff := duint32(&regsSymTmp, 0, uint32(len(lv.regMaps))) // number of bitmaps
roff = duint32(&regsSymTmp, roff, uint32(regs.n)) // number of bits in each bitmap
if regs.n > 32 {
// Our uint32 conversion below won't work.
Fatalf("GP registers overflow uint32")
}
if regs.n > 0 {
for _, live := range lv.regMaps {
regs.Clear()
regs.b[0] = uint32(live)
roff = dbvec(&regsSymTmp, roff, regs)
}
}
}
// Give these LSyms content-addressable names,
// so that they can be de-duplicated.
// This provides significant binary size savings.
@ -1502,11 +1219,7 @@ func (lv *Liveness) emit() (argsSym, liveSym, regsSym *obj.LSym) {
lsym.Set(obj.AttrContentAddressable, true)
})
}
if !go115ReduceLiveness {
return makeSym(&argsSymTmp), makeSym(&liveSymTmp), makeSym(&regsSymTmp)
}
// TODO(go115ReduceLiveness): Remove regsSym result
return makeSym(&argsSymTmp), makeSym(&liveSymTmp), nil
return makeSym(&argsSymTmp), makeSym(&liveSymTmp)
}
// Entry pointer for liveness analysis. Solves for the liveness of
@ -1553,7 +1266,7 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap {
// Emit the live pointer map data structures
ls := e.curfn.Func.lsym
fninfo := ls.Func()
fninfo.GCArgs, fninfo.GCLocals, fninfo.GCRegs = lv.emit()
fninfo.GCArgs, fninfo.GCLocals = lv.emit()
p := pp.Prog(obj.AFUNCDATA)
Addrconst(&p.From, objabi.FUNCDATA_ArgsPointerMaps)
@ -1567,14 +1280,6 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap {
p.To.Name = obj.NAME_EXTERN
p.To.Sym = fninfo.GCLocals
if !go115ReduceLiveness {
p = pp.Prog(obj.AFUNCDATA)
Addrconst(&p.From, objabi.FUNCDATA_RegPointerMaps)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = fninfo.GCRegs
}
return lv.livenessMap
}

View file

@ -1275,9 +1275,8 @@ func dtypesym(t *types.Type) *obj.LSym {
}
ot = dgopkgpath(lsym, ot, tpkg)
xcount := sort.Search(n, func(i int) bool { return !types.IsExported(m[i].name.Name) })
ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t))
ot = duintptr(lsym, ot, uint64(xcount))
ot = duintptr(lsym, ot, uint64(n))
ot = duintptr(lsym, ot, uint64(n))
dataAdd := imethodSize() * n
ot = dextratype(lsym, ot, t, dataAdd)

View file

@ -75,8 +75,19 @@ func (v *bottomUpVisitor) visit(n *Node) uint32 {
inspectList(n.Nbody, func(n *Node) bool {
switch n.Op {
case OCALLFUNC, OCALLMETH:
fn := asNode(n.Left.Type.Nname())
case ONAME:
if n.Class() == PFUNC {
if n.isMethodExpression() {
n = asNode(n.Type.Nname())
}
if n != nil && n.Name.Defn != nil {
if m := v.visit(n.Name.Defn); m < min {
min = m
}
}
}
case ODOTMETH:
fn := asNode(n.Type.Nname())
if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil {
if m := v.visit(fn.Name.Defn); m < min {
min = m

View file

@ -375,11 +375,6 @@ func readonlystaticname(t *types.Type) *Node {
return n
}
func isLiteral(n *Node) bool {
// Treat nils as zeros rather than literals.
return n.Op == OLITERAL && n.Val().Ctype() != CTNIL
}
func (n *Node) isSimpleName() bool {
return n.Op == ONAME && n.Class() != PAUTOHEAP && n.Class() != PEXTERN
}
@ -404,7 +399,7 @@ const (
func getdyn(n *Node, top bool) initGenType {
switch n.Op {
default:
if isLiteral(n) {
if n.isGoConst() {
return initConst
}
return initDynamic
@ -559,7 +554,7 @@ func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes)
continue
}
islit := isLiteral(value)
islit := value.isGoConst()
if (kind == initKindStatic && !islit) || (kind == initKindDynamic && islit) {
continue
}
@ -732,7 +727,7 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
continue
}
if vstat != nil && isLiteral(value) { // already set by copy from static value
if vstat != nil && value.isGoConst() { // already set by copy from static value
continue
}

View file

@ -72,9 +72,9 @@ func initssaconfig() {
deferproc = sysfunc("deferproc")
deferprocStack = sysfunc("deferprocStack")
Deferreturn = sysfunc("deferreturn")
Duffcopy = sysvar("duffcopy") // asm func with special ABI
Duffzero = sysvar("duffzero") // asm func with special ABI
gcWriteBarrier = sysvar("gcWriteBarrier") // asm func with special ABI
Duffcopy = sysfunc("duffcopy")
Duffzero = sysfunc("duffzero")
gcWriteBarrier = sysfunc("gcWriteBarrier")
goschedguarded = sysfunc("goschedguarded")
growslice = sysfunc("growslice")
msanread = sysfunc("msanread")
@ -105,51 +105,51 @@ func initssaconfig() {
// asm funcs with special ABI
if thearch.LinkArch.Name == "amd64" {
GCWriteBarrierReg = map[int16]*obj.LSym{
x86.REG_AX: sysvar("gcWriteBarrier"),
x86.REG_CX: sysvar("gcWriteBarrierCX"),
x86.REG_DX: sysvar("gcWriteBarrierDX"),
x86.REG_BX: sysvar("gcWriteBarrierBX"),
x86.REG_BP: sysvar("gcWriteBarrierBP"),
x86.REG_SI: sysvar("gcWriteBarrierSI"),
x86.REG_R8: sysvar("gcWriteBarrierR8"),
x86.REG_R9: sysvar("gcWriteBarrierR9"),
x86.REG_AX: sysfunc("gcWriteBarrier"),
x86.REG_CX: sysfunc("gcWriteBarrierCX"),
x86.REG_DX: sysfunc("gcWriteBarrierDX"),
x86.REG_BX: sysfunc("gcWriteBarrierBX"),
x86.REG_BP: sysfunc("gcWriteBarrierBP"),
x86.REG_SI: sysfunc("gcWriteBarrierSI"),
x86.REG_R8: sysfunc("gcWriteBarrierR8"),
x86.REG_R9: sysfunc("gcWriteBarrierR9"),
}
}
if thearch.LinkArch.Family == sys.Wasm {
BoundsCheckFunc[ssa.BoundsIndex] = sysvar("goPanicIndex")
BoundsCheckFunc[ssa.BoundsIndexU] = sysvar("goPanicIndexU")
BoundsCheckFunc[ssa.BoundsSliceAlen] = sysvar("goPanicSliceAlen")
BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysvar("goPanicSliceAlenU")
BoundsCheckFunc[ssa.BoundsSliceAcap] = sysvar("goPanicSliceAcap")
BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysvar("goPanicSliceAcapU")
BoundsCheckFunc[ssa.BoundsSliceB] = sysvar("goPanicSliceB")
BoundsCheckFunc[ssa.BoundsSliceBU] = sysvar("goPanicSliceBU")
BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysvar("goPanicSlice3Alen")
BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysvar("goPanicSlice3AlenU")
BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysvar("goPanicSlice3Acap")
BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysvar("goPanicSlice3AcapU")
BoundsCheckFunc[ssa.BoundsSlice3B] = sysvar("goPanicSlice3B")
BoundsCheckFunc[ssa.BoundsSlice3BU] = sysvar("goPanicSlice3BU")
BoundsCheckFunc[ssa.BoundsSlice3C] = sysvar("goPanicSlice3C")
BoundsCheckFunc[ssa.BoundsSlice3CU] = sysvar("goPanicSlice3CU")
BoundsCheckFunc[ssa.BoundsIndex] = sysfunc("goPanicIndex")
BoundsCheckFunc[ssa.BoundsIndexU] = sysfunc("goPanicIndexU")
BoundsCheckFunc[ssa.BoundsSliceAlen] = sysfunc("goPanicSliceAlen")
BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysfunc("goPanicSliceAlenU")
BoundsCheckFunc[ssa.BoundsSliceAcap] = sysfunc("goPanicSliceAcap")
BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysfunc("goPanicSliceAcapU")
BoundsCheckFunc[ssa.BoundsSliceB] = sysfunc("goPanicSliceB")
BoundsCheckFunc[ssa.BoundsSliceBU] = sysfunc("goPanicSliceBU")
BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysfunc("goPanicSlice3Alen")
BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysfunc("goPanicSlice3AlenU")
BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysfunc("goPanicSlice3Acap")
BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysfunc("goPanicSlice3AcapU")
BoundsCheckFunc[ssa.BoundsSlice3B] = sysfunc("goPanicSlice3B")
BoundsCheckFunc[ssa.BoundsSlice3BU] = sysfunc("goPanicSlice3BU")
BoundsCheckFunc[ssa.BoundsSlice3C] = sysfunc("goPanicSlice3C")
BoundsCheckFunc[ssa.BoundsSlice3CU] = sysfunc("goPanicSlice3CU")
} else {
BoundsCheckFunc[ssa.BoundsIndex] = sysvar("panicIndex")
BoundsCheckFunc[ssa.BoundsIndexU] = sysvar("panicIndexU")
BoundsCheckFunc[ssa.BoundsSliceAlen] = sysvar("panicSliceAlen")
BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysvar("panicSliceAlenU")
BoundsCheckFunc[ssa.BoundsSliceAcap] = sysvar("panicSliceAcap")
BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysvar("panicSliceAcapU")
BoundsCheckFunc[ssa.BoundsSliceB] = sysvar("panicSliceB")
BoundsCheckFunc[ssa.BoundsSliceBU] = sysvar("panicSliceBU")
BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysvar("panicSlice3Alen")
BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysvar("panicSlice3AlenU")
BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysvar("panicSlice3Acap")
BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysvar("panicSlice3AcapU")
BoundsCheckFunc[ssa.BoundsSlice3B] = sysvar("panicSlice3B")
BoundsCheckFunc[ssa.BoundsSlice3BU] = sysvar("panicSlice3BU")
BoundsCheckFunc[ssa.BoundsSlice3C] = sysvar("panicSlice3C")
BoundsCheckFunc[ssa.BoundsSlice3CU] = sysvar("panicSlice3CU")
BoundsCheckFunc[ssa.BoundsIndex] = sysfunc("panicIndex")
BoundsCheckFunc[ssa.BoundsIndexU] = sysfunc("panicIndexU")
BoundsCheckFunc[ssa.BoundsSliceAlen] = sysfunc("panicSliceAlen")
BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysfunc("panicSliceAlenU")
BoundsCheckFunc[ssa.BoundsSliceAcap] = sysfunc("panicSliceAcap")
BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysfunc("panicSliceAcapU")
BoundsCheckFunc[ssa.BoundsSliceB] = sysfunc("panicSliceB")
BoundsCheckFunc[ssa.BoundsSliceBU] = sysfunc("panicSliceBU")
BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysfunc("panicSlice3Alen")
BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysfunc("panicSlice3AlenU")
BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysfunc("panicSlice3Acap")
BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysfunc("panicSlice3AcapU")
BoundsCheckFunc[ssa.BoundsSlice3B] = sysfunc("panicSlice3B")
BoundsCheckFunc[ssa.BoundsSlice3BU] = sysfunc("panicSlice3BU")
BoundsCheckFunc[ssa.BoundsSlice3C] = sysfunc("panicSlice3C")
BoundsCheckFunc[ssa.BoundsSlice3CU] = sysfunc("panicSlice3CU")
}
if thearch.LinkArch.PtrSize == 4 {
ExtendCheckFunc[ssa.BoundsIndex] = sysvar("panicExtendIndex")
@ -409,11 +409,17 @@ func buildssa(fn *Node, worker int) *ssa.Func {
// Generate addresses of local declarations
s.decladdrs = map[*Node]*ssa.Value{}
var args []ssa.Param
var results []ssa.Param
for _, n := range fn.Func.Dcl {
switch n.Class() {
case PPARAM, PPARAMOUT:
case PPARAM:
s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem)
if n.Class() == PPARAMOUT && s.canSSA(n) {
args = append(args, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)})
case PPARAMOUT:
s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem)
results = append(results, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)})
if s.canSSA(n) {
// Save ssa-able PPARAMOUT variables so we can
// store them back to the stack at the end of
// the function.
@ -2472,6 +2478,11 @@ func (s *state) expr(n *Node) *ssa.Value {
a := s.expr(n.Left)
b := s.expr(n.Right)
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case OANDNOT:
a := s.expr(n.Left)
b := s.expr(n.Right)
b = s.newValue1(s.ssaOp(OBITNOT, b.Type), b.Type, b)
return s.newValue2(s.ssaOp(OAND, n.Type), a.Type, a, b)
case OLSH, ORSH:
a := s.expr(n.Left)
b := s.expr(n.Right)
@ -3541,12 +3552,24 @@ func init() {
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "And",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "Or8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "Or",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
@ -3557,13 +3580,17 @@ func init() {
alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...)
alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...)
alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...)
alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed
alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...)
alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...)
alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...)
alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...)
alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed
alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...)
alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
@ -4728,7 +4755,7 @@ func (s *state) getClosureAndRcvr(fn *Node) (*ssa.Value, *ssa.Value) {
s.nilCheck(itab)
itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
rcvr := s.newValue1(ssa.OpIData, types.Types[TUINTPTR], i)
rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
return closure, rcvr
}
@ -4888,7 +4915,7 @@ func (s *state) canSSA(n *Node) bool {
if n.Class() == PPARAM && n.Sym != nil && n.Sym.Name == ".this" {
// wrappers generated by genwrapper need to update
// the .this pointer in place.
// TODO: treat as a PPARMOUT?
// TODO: treat as a PPARAMOUT?
return false
}
return canSSAType(n.Type)
@ -5201,7 +5228,10 @@ func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip ski
case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
s.store(t, left, right)
case t.IsPtrShaped():
// no scalar fields.
if t.IsPtr() && t.Elem().NotInHeap() {
s.store(t, left, right) // see issue 42032
}
// otherwise, no scalar fields.
case t.IsString():
if skip&skipLen != 0 {
return
@ -5245,6 +5275,9 @@ func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip ski
func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
switch {
case t.IsPtrShaped():
if t.IsPtr() && t.Elem().NotInHeap() {
break // see issue 42032
}
s.store(t, left, right)
case t.IsString():
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
@ -6232,7 +6265,7 @@ func genssa(f *ssa.Func, pp *Progs) {
// instruction. We won't use the actual liveness map on a
// control instruction. Just mark it something that is
// preemptible, unless this function is "all unsafe".
s.pp.nextLive = LivenessIndex{-1, -1, allUnsafe(f)}
s.pp.nextLive = LivenessIndex{-1, allUnsafe(f)}
// Emit values in block
thearch.SSAMarkMoves(&s, b)
@ -6892,56 +6925,38 @@ func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode {
}
func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
ptrType := types.NewPtr(types.Types[TUINT8])
lenType := types.Types[TINT]
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this string up into two separate variables.
p := e.splitSlot(&name, ".ptr", 0, ptrType)
l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
p := e.SplitSlot(&name, ".ptr", 0, ptrType)
l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
return p, l
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}
}
func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
u := types.Types[TUINTPTR]
t := types.NewPtr(types.Types[TUINT8])
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this interface up into two separate variables.
f := ".itab"
if n.Type.IsEmptyInterface() {
f = ".type"
}
c := e.splitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1.
d := e.splitSlot(&name, ".data", u.Size(), t)
c := e.SplitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1.
d := e.SplitSlot(&name, ".data", u.Size(), t)
return c, d
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{N: n, Type: u, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)}
}
func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
ptrType := types.NewPtr(name.Type.Elem())
lenType := types.Types[TINT]
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this slice up into three separate variables.
p := e.splitSlot(&name, ".ptr", 0, ptrType)
l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
c := e.splitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType)
p := e.SplitSlot(&name, ".ptr", 0, ptrType)
l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
c := e.SplitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType)
return p, l, c
}
// Return the three parts of the larger variable.
return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off},
ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)},
ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)}
}
func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
s := name.Type.Size() / 2
var t *types.Type
if s == 8 {
@ -6949,53 +6964,30 @@ func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot)
} else {
t = types.Types[TFLOAT32]
}
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this complex up into two separate variables.
r := e.splitSlot(&name, ".real", 0, t)
i := e.splitSlot(&name, ".imag", t.Size(), t)
r := e.SplitSlot(&name, ".real", 0, t)
i := e.SplitSlot(&name, ".imag", t.Size(), t)
return r, i
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s}
}
func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
var t *types.Type
if name.Type.IsSigned() {
t = types.Types[TINT32]
} else {
t = types.Types[TUINT32]
}
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this int64 up into two separate variables.
if thearch.LinkArch.ByteOrder == binary.BigEndian {
return e.splitSlot(&name, ".hi", 0, t), e.splitSlot(&name, ".lo", t.Size(), types.Types[TUINT32])
return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[TUINT32])
}
return e.splitSlot(&name, ".hi", t.Size(), t), e.splitSlot(&name, ".lo", 0, types.Types[TUINT32])
}
// Return the two parts of the larger variable.
if thearch.LinkArch.ByteOrder == binary.BigEndian {
return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off + 4}
}
return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off}
return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[TUINT32])
}
func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
n := name.N.(*Node)
st := name.Type
ft := st.FieldType(i)
var offset int64
for f := 0; f < i; f++ {
offset += st.FieldType(f).Size()
}
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Note: the _ field may appear several times. But
// have no fear, identically-named but distinct Autos are
// ok, albeit maybe confusing for a debugger.
return e.splitSlot(&name, "."+st.FieldName(i), offset, ft)
}
return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)}
return e.SplitSlot(&name, "."+st.FieldName(i), st.FieldOff(i), st.FieldType(i))
}
func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
@ -7005,19 +6997,23 @@ func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
e.Fatalf(n.Pos, "bad array size")
}
et := at.Elem()
if n.Class() == PAUTO && !n.Name.Addrtaken() {
return e.splitSlot(&name, "[0]", 0, et)
}
return ssa.LocalSlot{N: n, Type: et, Off: name.Off}
return e.SplitSlot(&name, "[0]", 0, et)
}
func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
return itabsym(it, offset)
}
// splitSlot returns a slot representing the data of parent starting at offset.
func (e *ssafn) splitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
s := &types.Sym{Name: parent.N.(*Node).Sym.Name + suffix, Pkg: localpkg}
// SplitSlot returns a slot representing the data of parent starting at offset.
func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
node := parent.N.(*Node)
if node.Class() != PAUTO || node.Name.Addrtaken() {
// addressed things and non-autos retain their parents (i.e., cannot truly be split)
return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
}
s := &types.Sym{Name: node.Sym.Name + suffix, Pkg: localpkg}
n := &Node{
Name: new(Name),

View file

@ -1854,8 +1854,10 @@ func isdirectiface(t *types.Type) bool {
}
switch t.Etype {
case TPTR,
TCHAN,
case TPTR:
// Pointers to notinheap types must be stored indirectly. See issue 42076.
return !t.Elem().NotInHeap()
case TCHAN,
TMAP,
TFUNC,
TUNSAFEPTR:

View file

@ -142,7 +142,7 @@ const (
_, _ // second nodeInitorder bit
_, nodeHasBreak
_, nodeNoInline // used internally by inliner to indicate that a function call should not be inlined; set for OCALLFUNC and OCALLMETH only
_, nodeImplicit // implicit OADDR or ODEREF; ++/-- statement represented as OASOP; or ANDNOT lowered to OAND
_, nodeImplicit // implicit OADDR or ODEREF; ++/-- statement represented as OASOP
_, nodeIsDDD // is the argument variadic
_, nodeDiag // already printed error about this
_, nodeColas // OAS resulting from :=
@ -480,11 +480,87 @@ type Param struct {
Innermost *Node
Outer *Node
// OTYPE
//
// TODO: Should Func pragmas also be stored on the Name?
Pragma PragmaFlag
Alias bool // node is alias for Ntype (only used when type-checking ODCLTYPE)
// OTYPE & ONAME //go:embed info,
// sharing storage to reduce gc.Param size.
// Extra is nil, or else *Extra is a *paramType or an *embedFileList.
Extra *interface{}
}
type paramType struct {
flag PragmaFlag
alias bool
}
type embedFileList []string
// Pragma returns the PragmaFlag for p, which must be for an OTYPE.
func (p *Param) Pragma() PragmaFlag {
if p.Extra == nil {
return 0
}
return (*p.Extra).(*paramType).flag
}
// SetPragma sets the PragmaFlag for p, which must be for an OTYPE.
func (p *Param) SetPragma(flag PragmaFlag) {
if p.Extra == nil {
if flag == 0 {
return
}
p.Extra = new(interface{})
*p.Extra = &paramType{flag: flag}
return
}
(*p.Extra).(*paramType).flag = flag
}
// Alias reports whether p, which must be for an OTYPE, is a type alias.
func (p *Param) Alias() bool {
if p.Extra == nil {
return false
}
t, ok := (*p.Extra).(*paramType)
if !ok {
return false
}
return t.alias
}
// SetAlias sets whether p, which must be for an OTYPE, is a type alias.
func (p *Param) SetAlias(alias bool) {
if p.Extra == nil {
if !alias {
return
}
p.Extra = new(interface{})
*p.Extra = &paramType{alias: alias}
return
}
(*p.Extra).(*paramType).alias = alias
}
// EmbedFiles returns the list of embedded files for p,
// which must be for an ONAME var.
func (p *Param) EmbedFiles() []string {
if p.Extra == nil {
return nil
}
return *(*p.Extra).(*embedFileList)
}
// SetEmbedFiles sets the list of embedded files for p,
// which must be for an ONAME var.
func (p *Param) SetEmbedFiles(list []string) {
if p.Extra == nil {
if len(list) == 0 {
return
}
f := embedFileList(list)
p.Extra = new(interface{})
*p.Extra = &f
return
}
*(*p.Extra).(*embedFileList) = list
}
// Functions
@ -555,7 +631,7 @@ type Func struct {
Ntype *Node // signature
Top int // top context (ctxCallee, etc)
Closure *Node // OCLOSURE <-> ODCLFUNC
Nname *Node
Nname *Node // The ONAME node associated with an ODCLFUNC (both have same Type)
lsym *obj.LSym
Inl *Inline
@ -697,7 +773,7 @@ const (
OCALLPART // Left.Right (method expression x.Method, not called)
OCAP // cap(Left)
OCLOSE // close(Left)
OCLOSURE // func Type { Body } (func literal)
OCLOSURE // func Type { Func.Closure.Nbody } (func literal)
OCOMPLIT // Right{List} (composite literal, not yet lowered to specific form)
OMAPLIT // Type{List} (composite literal, Type is map)
OSTRUCTLIT // Type{List} (composite literal, Type is struct)
@ -789,7 +865,12 @@ const (
// statements
OBLOCK // { List } (block of code)
OBREAK // break [Sym]
OCASE // case List: Nbody (List==nil means default)
// OCASE: case List: Nbody (List==nil means default)
// For OTYPESW, List is a OTYPE node for the specified type (or OLITERAL
// for nil), and, if a type-switch variable is specified, Rlist is an
// ONAME for the version of the type-switch variable with the specified
// type.
OCASE
OCONTINUE // continue [Sym]
ODEFER // defer Left (Left must be call)
OEMPTY // no-op (empty statement)
@ -813,14 +894,18 @@ const (
ORETURN // return List
OSELECT // select { List } (List is list of OCASE)
OSWITCH // switch Ninit; Left { List } (List is a list of OCASE)
OTYPESW // Left = Right.(type) (appears as .Left of OSWITCH)
// OTYPESW: Left := Right.(type) (appears as .Left of OSWITCH)
// Left is nil if there is no type-switch variable
OTYPESW
// types
OTCHAN // chan int
OTMAP // map[string]int
OTSTRUCT // struct{}
OTINTER // interface{}
OTFUNC // func()
// OTFUNC: func() - Left is receiver field, List is list of param fields, Rlist is
// list of result fields.
OTFUNC
OTARRAY // []int, [8]int, [N]int or [...]int
// misc

View file

@ -257,12 +257,12 @@ func typecheck(n *Node, top int) (res *Node) {
// are substituted.
cycle := cycleFor(n)
for _, n1 := range cycle {
if n1.Name != nil && !n1.Name.Param.Alias {
if n1.Name != nil && !n1.Name.Param.Alias() {
// Cycle is ok. But if n is an alias type and doesn't
// have a type yet, we have a recursive type declaration
// with aliases that we can't handle properly yet.
// Report an error rather than crashing later.
if n.Name != nil && n.Name.Param.Alias && n.Type == nil {
if n.Name != nil && n.Name.Param.Alias() && n.Type == nil {
lineno = n.Pos
Fatalf("cannot handle alias type declaration (issue #25838): %v", n)
}
@ -2516,7 +2516,7 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field {
n.Left = nod(OADDR, n.Left, nil)
n.Left.SetImplicit(true)
n.Left = typecheck(n.Left, ctxType|ctxExpr)
} else if tt.IsPtr() && !rcvr.IsPtr() && types.Identical(tt.Elem(), rcvr) {
} else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) {
n.Left = nod(ODEREF, n.Left, nil)
n.Left.SetImplicit(true)
n.Left = typecheck(n.Left, ctxType|ctxExpr)
@ -3504,7 +3504,7 @@ func setUnderlying(t, underlying *types.Type) {
}
// Propagate go:notinheap pragma from the Name to the Type.
if n.Name != nil && n.Name.Param != nil && n.Name.Param.Pragma&NotInHeap != 0 {
if n.Name != nil && n.Name.Param != nil && n.Name.Param.Pragma()&NotInHeap != 0 {
t.SetNotInHeap(true)
}
@ -3676,7 +3676,7 @@ func typecheckdef(n *Node) {
n.Name.Defn = typecheck(n.Name.Defn, ctxStmt) // fills in n.Type
case OTYPE:
if p := n.Name.Param; p.Alias {
if p := n.Name.Param; p.Alias() {
// Type alias declaration: Simply use the rhs type - no need
// to create a new type.
// If we have a syntax error, p.Ntype may be nil.

View file

@ -474,7 +474,7 @@ opswitch:
ODEREF, OSPTR, OITAB, OIDATA, OADDR:
n.Left = walkexpr(n.Left, init)
case OEFACE, OAND, OSUB, OMUL, OADD, OOR, OXOR, OLSH, ORSH:
case OEFACE, OAND, OANDNOT, OSUB, OMUL, OADD, OOR, OXOR, OLSH, ORSH:
n.Left = walkexpr(n.Left, init)
n.Right = walkexpr(n.Right, init)
@ -965,14 +965,6 @@ opswitch:
fn := basicnames[param] + "to" + basicnames[result]
n = conv(mkcall(fn, types.Types[result], init, conv(n.Left, types.Types[param])), n.Type)
case OANDNOT:
n.Left = walkexpr(n.Left, init)
n.Op = OAND
n.SetImplicit(true) // for walkCheckPtrArithmetic
n.Right = nod(OBITNOT, n.Right, nil)
n.Right = typecheck(n.Right, ctxExpr)
n.Right = walkexpr(n.Right, init)
case ODIV, OMOD:
n.Left = walkexpr(n.Left, init)
n.Right = walkexpr(n.Right, init)
@ -997,7 +989,7 @@ opswitch:
// runtime calls late in SSA processing.
if Widthreg < 8 && (et == TINT64 || et == TUINT64) {
if n.Right.Op == OLITERAL {
// Leave div/mod by constant powers of 2.
// Leave div/mod by constant powers of 2 or small 16-bit constants.
// The SSA backend will handle those.
switch et {
case TINT64:
@ -1010,6 +1002,9 @@ opswitch:
}
case TUINT64:
c := uint64(n.Right.Int64Val())
if c < 1<<16 {
break opswitch
}
if c != 0 && c&(c-1) == 0 {
break opswitch
}
@ -1965,7 +1960,17 @@ func walkprint(nn *Node, init *Nodes) *Node {
on = syslook("printiface")
}
on = substArgTypes(on, n.Type) // any-1
case TPTR, TCHAN, TMAP, TFUNC, TUNSAFEPTR:
case TPTR:
if n.Type.Elem().NotInHeap() {
on = syslook("printuintptr")
n = nod(OCONV, n, nil)
n.Type = types.Types[TUNSAFEPTR]
n = nod(OCONV, n, nil)
n.Type = types.Types[TUINTPTR]
break
}
fallthrough
case TCHAN, TMAP, TFUNC, TUNSAFEPTR:
on = syslook("printpointer")
on = substArgTypes(on, n.Type) // any-1
case TSLICE:
@ -3609,14 +3614,20 @@ func bounded(n *Node, max int64) bool {
}
switch n.Op {
case OAND:
case OAND, OANDNOT:
v := int64(-1)
if smallintconst(n.Left) {
switch {
case smallintconst(n.Left):
v = n.Left.Int64Val()
} else if smallintconst(n.Right) {
case smallintconst(n.Right):
v = n.Right.Int64Val()
if n.Op == OANDNOT {
v = ^v
if !sign {
v &= 1<<uint(bits) - 1
}
}
}
if 0 <= v && v < max {
return true
}
@ -4045,12 +4056,8 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node {
case OADD:
walk(n.Left)
walk(n.Right)
case OSUB:
case OSUB, OANDNOT:
walk(n.Left)
case OAND:
if n.Implicit() { // was OANDNOT
walk(n.Left)
}
case OCONVNOP:
if n.Left.Type.IsUnsafePtr() {
n.Left = cheapexpr(n.Left, init)

View file

@ -213,15 +213,15 @@ func s15a8(x *[15]int64) [15]int64 {
`"relatedInformation":[`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: y = z:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y := z (assign-pair)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: ~r1 = y:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: ~R0 = y:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y.b (dot of pointer)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from \u0026y.b (address-of)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":9},"end":{"line":4,"character":9}}},"message":"inlineLoc"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from ~r1 = \u003cN\u003e (assign-pair)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r2 = ~r1:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return (*int)(~r1) (return)"}]}`)
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from ~R0 = \u003cN\u003e (assign-pair)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r2 = ~R0:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return (*int)(~R0) (return)"}]}`)
})
}

View file

@ -166,34 +166,46 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p2.To.Reg = v.Reg1()
case ssa.OpPPC64LoweredAtomicAnd8,
ssa.OpPPC64LoweredAtomicOr8:
ssa.OpPPC64LoweredAtomicAnd32,
ssa.OpPPC64LoweredAtomicOr8,
ssa.OpPPC64LoweredAtomicOr32:
// LWSYNC
// LBAR (Rarg0), Rtmp
// LBAR/LWAR (Rarg0), Rtmp
// AND/OR Rarg1, Rtmp
// STBCCC Rtmp, (Rarg0)
// STBCCC/STWCCC Rtmp, (Rarg0)
// BNE -3(PC)
ld := ppc64.ALBAR
st := ppc64.ASTBCCC
if v.Op == ssa.OpPPC64LoweredAtomicAnd32 || v.Op == ssa.OpPPC64LoweredAtomicOr32 {
ld = ppc64.ALWAR
st = ppc64.ASTWCCC
}
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
// LWSYNC - Assuming shared data not write-through-required nor
// caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b.
plwsync := s.Prog(ppc64.ALWSYNC)
plwsync.To.Type = obj.TYPE_NONE
p := s.Prog(ppc64.ALBAR)
// LBAR or LWAR
p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP
// AND/OR reg1,out
p1 := s.Prog(v.Op.Asm())
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.To.Type = obj.TYPE_REG
p1.To.Reg = ppc64.REGTMP
p2 := s.Prog(ppc64.ASTBCCC)
// STBCCC or STWCCC
p2 := s.Prog(st)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = ppc64.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = r0
p2.RegTo2 = ppc64.REGTMP
// BNE retry
p3 := s.Prog(ppc64.ABNE)
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
@ -637,6 +649,24 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
// Auxint holds encoded rotate + mask
case ssa.OpPPC64RLWINM, ssa.OpPPC64RLWMI:
rot, _, _, mask := ssa.DecodePPC64RotateMask(v.AuxInt)
p := s.Prog(v.Op.Asm())
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
p.Reg = v.Args[0].Reg()
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(rot)}
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(mask)})
// Auxint holds mask
case ssa.OpPPC64RLWNM:
_, _, _, mask := ssa.DecodePPC64RotateMask(v.AuxInt)
p := s.Prog(v.Op.Asm())
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
p.Reg = v.Args[0].Reg()
p.From = obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[1].Reg()}
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(mask)})
case ssa.OpPPC64MADDLD:
r := v.Reg()
r1 := v.Args[0].Reg()

View file

@ -25,7 +25,15 @@ func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
return p
}
// TODO(jsing): Add a duff zero implementation for medium sized ranges.
if cnt <= int64(128*gc.Widthptr) {
p = pp.Appendpp(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_A0, 0)
p.Reg = riscv.REG_SP
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr))
return p
}
// Loop, zeroing pointer width bytes at a time.
// ADD $(off), SP, T0

View file

@ -190,7 +190,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// input args need no code
case ssa.OpPhi:
gc.CheckLoweredPhi(v)
case ssa.OpCopy, ssa.OpRISCV64MOVconvert:
case ssa.OpCopy, ssa.OpRISCV64MOVconvert, ssa.OpRISCV64MOVDreg:
if v.Type.IsMemory() {
return
}
@ -208,6 +208,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = rs
p.To.Type = obj.TYPE_REG
p.To.Reg = rd
case ssa.OpRISCV64MOVDnop:
if v.Reg() != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
// nothing to do
case ssa.OpLoadReg:
if v.Type.IsFlags() {
v.Fatalf("load flags not implemented: %v", v.LongString())
@ -228,6 +233,37 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.AddrAuto(&p.To, v)
case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
// nothing to do
case ssa.OpRISCV64MOVBreg, ssa.OpRISCV64MOVHreg, ssa.OpRISCV64MOVWreg,
ssa.OpRISCV64MOVBUreg, ssa.OpRISCV64MOVHUreg, ssa.OpRISCV64MOVWUreg:
a := v.Args[0]
for a.Op == ssa.OpCopy || a.Op == ssa.OpRISCV64MOVDreg {
a = a.Args[0]
}
as := v.Op.Asm()
rs := v.Args[0].Reg()
rd := v.Reg()
if a.Op == ssa.OpLoadReg {
t := a.Type
switch {
case v.Op == ssa.OpRISCV64MOVBreg && t.Size() == 1 && t.IsSigned(),
v.Op == ssa.OpRISCV64MOVHreg && t.Size() == 2 && t.IsSigned(),
v.Op == ssa.OpRISCV64MOVWreg && t.Size() == 4 && t.IsSigned(),
v.Op == ssa.OpRISCV64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
v.Op == ssa.OpRISCV64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
v.Op == ssa.OpRISCV64MOVWUreg && t.Size() == 4 && !t.IsSigned():
// arg is a proper-typed load and already sign/zero-extended
if rs == rd {
return
}
as = riscv.AMOV
default:
}
}
p := s.Prog(as)
p.From.Type = obj.TYPE_REG
p.From.Reg = rs
p.To.Type = obj.TYPE_REG
p.To.Reg = rd
case ssa.OpRISCV64ADD, ssa.OpRISCV64SUB, ssa.OpRISCV64SUBW, ssa.OpRISCV64XOR, ssa.OpRISCV64OR, ssa.OpRISCV64AND,
ssa.OpRISCV64SLL, ssa.OpRISCV64SRA, ssa.OpRISCV64SRL,
ssa.OpRISCV64SLT, ssa.OpRISCV64SLTU, ssa.OpRISCV64MUL, ssa.OpRISCV64MULW, ssa.OpRISCV64MULH,
@ -572,6 +608,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpRISCV64DUFFZERO:
p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpRISCV64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffcopy
p.To.Offset = v.AuxInt
default:
v.Fatalf("Unhandled op %v", v.Op)
}

View file

@ -182,11 +182,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
i := v.Aux.(s390x.RotateParams)
p := s.Prog(v.Op.Asm())
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(i.Start)}
p.RestArgs = []obj.Addr{
p.SetRestArgs([]obj.Addr{
{Type: obj.TYPE_CONST, Offset: int64(i.End)},
{Type: obj.TYPE_CONST, Offset: int64(i.Amount)},
{Type: obj.TYPE_REG, Reg: r2},
}
})
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: r1}
case ssa.OpS390XADD, ssa.OpS390XADDW,
ssa.OpS390XSUB, ssa.OpS390XSUBW,
@ -761,6 +761,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
case ssa.OpS390XLAN, ssa.OpS390XLAO:
// LA(N|O) Ry, TMP, 0(Rx)
op := s.Prog(v.Op.Asm())
op.From.Type = obj.TYPE_REG
op.From.Reg = v.Args[1].Reg()
op.Reg = s390x.REGTMP
op.To.Type = obj.TYPE_MEM
op.To.Reg = v.Args[0].Reg()
case ssa.OpS390XLANfloor, ssa.OpS390XLAOfloor:
r := v.Args[0].Reg() // clobbered, assumed R1 in comments
@ -905,7 +913,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(s390x.NotEqual & s390x.NotUnordered) // unordered is not possible
p.Reg = s390x.REG_R3
p.RestArgs = []obj.Addr{{Type: obj.TYPE_CONST, Offset: 0}}
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: 0})
if b.Succs[0].Block() != next {
s.Br(s390x.ABR, b.Succs[0].Block())
}
@ -948,17 +956,17 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible
p.Reg = b.Controls[0].Reg()
p.RestArgs = []obj.Addr{{Type: obj.TYPE_REG, Reg: b.Controls[1].Reg()}}
p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: b.Controls[1].Reg()})
case ssa.BlockS390XCGIJ, ssa.BlockS390XCIJ:
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible
p.Reg = b.Controls[0].Reg()
p.RestArgs = []obj.Addr{{Type: obj.TYPE_CONST, Offset: int64(int8(b.AuxInt))}}
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(int8(b.AuxInt))})
case ssa.BlockS390XCLGIJ, ssa.BlockS390XCLIJ:
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible
p.Reg = b.Controls[0].Reg()
p.RestArgs = []obj.Addr{{Type: obj.TYPE_CONST, Offset: int64(uint8(b.AuxInt))}}
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(uint8(b.AuxInt))})
default:
b.Fatalf("branch not implemented: %s", b.LongString())
}

View file

@ -59,22 +59,22 @@ func addressingModes(f *Func) {
v.AuxInt += p.AuxInt
case [2]auxType{auxSymValAndOff, auxInt32}:
vo := ValAndOff(v.AuxInt)
if !vo.canAdd(p.AuxInt) {
if !vo.canAdd64(p.AuxInt) {
continue
}
v.AuxInt = vo.add(p.AuxInt)
v.AuxInt = int64(vo.addOffset64(p.AuxInt))
case [2]auxType{auxSymValAndOff, auxSymOff}:
vo := ValAndOff(v.AuxInt)
if v.Aux != nil && p.Aux != nil {
continue
}
if !vo.canAdd(p.AuxInt) {
if !vo.canAdd64(p.AuxInt) {
continue
}
if p.Aux != nil {
v.Aux = p.Aux
}
v.AuxInt = vo.add(p.AuxInt)
v.AuxInt = int64(vo.addOffset64(p.AuxInt))
case [2]auxType{auxSymOff, auxNone}:
// nothing to do
case [2]auxType{auxSymValAndOff, auxNone}:

View file

@ -304,37 +304,39 @@ commas. For example:
`
}
if phase == "check" && flag == "on" {
if phase == "check" {
switch flag {
case "on":
checkEnabled = val != 0
debugPoset = checkEnabled // also turn on advanced self-checking in prove's datastructure
return ""
}
if phase == "check" && flag == "off" {
case "off":
checkEnabled = val == 0
debugPoset = checkEnabled
return ""
}
if phase == "check" && flag == "seed" {
case "seed":
checkEnabled = true
checkRandSeed = val
debugPoset = checkEnabled
return ""
}
}
alltime := false
allmem := false
alldump := false
if phase == "all" {
if flag == "time" {
switch flag {
case "time":
alltime = val != 0
} else if flag == "mem" {
case "mem":
allmem = val != 0
} else if flag == "dump" {
case "dump":
alldump = val != 0
if alldump {
BuildDump = valString
}
} else {
default:
return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase)
}
}
@ -429,7 +431,7 @@ var passes = [...]pass{
{name: "early copyelim", fn: copyelim},
{name: "early deadcode", fn: deadcode}, // remove generated dead code to avoid doing pointless work during opt
{name: "short circuit", fn: shortcircuit},
{name: "decompose args", fn: decomposeArgs, required: true},
{name: "decompose args", fn: decomposeArgs, required: !go116lateCallExpansion, disabled: go116lateCallExpansion}, // handled by late call lowering
{name: "decompose user", fn: decomposeUser, required: true},
{name: "pre-opt deadcode", fn: deadcode},
{name: "opt", fn: opt, required: true}, // NB: some generic rules know the name of the opt pass. TODO: split required rules and optimizing rules
@ -441,8 +443,8 @@ var passes = [...]pass{
{name: "nilcheckelim", fn: nilcheckelim},
{name: "prove", fn: prove},
{name: "early fuse", fn: fuseEarly},
{name: "expand calls", fn: expandCalls, required: true},
{name: "decompose builtin", fn: decomposeBuiltIn, required: true},
{name: "expand calls", fn: expandCalls, required: true},
{name: "softfloat", fn: softfloat, required: true},
{name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
{name: "dead auto elim", fn: elimDeadAutosGeneric},

View file

@ -149,6 +149,7 @@ type Frontend interface {
SplitStruct(LocalSlot, int) LocalSlot
SplitArray(LocalSlot) LocalSlot // array must be length 1
SplitInt64(LocalSlot) (LocalSlot, LocalSlot) // returns (hi, lo)
SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot
// DerefItab dereferences an itab function
// entry, given the symbol of the itab and
@ -198,9 +199,9 @@ const (
const go116lateCallExpansion = true
// LateCallExpansionEnabledWithin returns true if late call expansion should be tested
// within compilation of a function/method triggered by GOSSAHASH (defaults to "yes").
// within compilation of a function/method.
func LateCallExpansionEnabledWithin(f *Func) bool {
return go116lateCallExpansion && f.DebugTest // Currently set up for GOSSAHASH bug searches
return go116lateCallExpansion
}
// NewConfig returns a new configuration object for the given architecture.

View file

@ -6,6 +6,7 @@ package ssa
import (
"cmd/compile/internal/types"
"sort"
)
// decompose converts phi ops on compound builtin types into phi
@ -31,77 +32,79 @@ func decomposeBuiltIn(f *Func) {
}
// Split up named values into their components.
// accumulate old names for aggregates (that are decomposed) in toDelete for efficient bulk deletion,
// accumulate new LocalSlots in newNames for addition after the iteration. This decomposition is for
// builtin types with leaf components, and thus there is no need to reprocess the newly create LocalSlots.
var toDelete []namedVal
var newNames []LocalSlot
for _, name := range f.Names {
for i, name := range f.Names {
t := name.Type
switch {
case t.IsInteger() && t.Size() > f.Config.RegSize:
hiName, loName := f.fe.SplitInt64(name)
newNames = append(newNames, hiName, loName)
for _, v := range f.NamedValues[name] {
for j, v := range f.NamedValues[name] {
if v.Op != OpInt64Make {
continue
}
f.NamedValues[hiName] = append(f.NamedValues[hiName], v.Args[0])
f.NamedValues[loName] = append(f.NamedValues[loName], v.Args[1])
toDelete = append(toDelete, namedVal{i, j})
}
delete(f.NamedValues, name)
case t.IsComplex():
rName, iName := f.fe.SplitComplex(name)
newNames = append(newNames, rName, iName)
for _, v := range f.NamedValues[name] {
for j, v := range f.NamedValues[name] {
if v.Op != OpComplexMake {
continue
}
f.NamedValues[rName] = append(f.NamedValues[rName], v.Args[0])
f.NamedValues[iName] = append(f.NamedValues[iName], v.Args[1])
toDelete = append(toDelete, namedVal{i, j})
}
delete(f.NamedValues, name)
case t.IsString():
ptrName, lenName := f.fe.SplitString(name)
newNames = append(newNames, ptrName, lenName)
for _, v := range f.NamedValues[name] {
for j, v := range f.NamedValues[name] {
if v.Op != OpStringMake {
continue
}
f.NamedValues[ptrName] = append(f.NamedValues[ptrName], v.Args[0])
f.NamedValues[lenName] = append(f.NamedValues[lenName], v.Args[1])
toDelete = append(toDelete, namedVal{i, j})
}
delete(f.NamedValues, name)
case t.IsSlice():
ptrName, lenName, capName := f.fe.SplitSlice(name)
newNames = append(newNames, ptrName, lenName, capName)
for _, v := range f.NamedValues[name] {
for j, v := range f.NamedValues[name] {
if v.Op != OpSliceMake {
continue
}
f.NamedValues[ptrName] = append(f.NamedValues[ptrName], v.Args[0])
f.NamedValues[lenName] = append(f.NamedValues[lenName], v.Args[1])
f.NamedValues[capName] = append(f.NamedValues[capName], v.Args[2])
toDelete = append(toDelete, namedVal{i, j})
}
delete(f.NamedValues, name)
case t.IsInterface():
typeName, dataName := f.fe.SplitInterface(name)
newNames = append(newNames, typeName, dataName)
for _, v := range f.NamedValues[name] {
for j, v := range f.NamedValues[name] {
if v.Op != OpIMake {
continue
}
f.NamedValues[typeName] = append(f.NamedValues[typeName], v.Args[0])
f.NamedValues[dataName] = append(f.NamedValues[dataName], v.Args[1])
toDelete = append(toDelete, namedVal{i, j})
}
delete(f.NamedValues, name)
case t.IsFloat():
// floats are never decomposed, even ones bigger than RegSize
newNames = append(newNames, name)
case t.Size() > f.Config.RegSize:
f.Fatalf("undecomposed named type %s %v", name, t)
default:
newNames = append(newNames, name)
}
}
f.Names = newNames
deleteNamedVals(f, toDelete)
f.Names = append(f.Names, newNames...)
}
func decomposeBuiltInPhi(v *Value) {
@ -263,14 +266,20 @@ func decomposeUserArrayInto(f *Func, name LocalSlot, slots []LocalSlot) []LocalS
f.Fatalf("array not of size 1")
}
elemName := f.fe.SplitArray(name)
var keep []*Value
for _, v := range f.NamedValues[name] {
if v.Op != OpArrayMake1 {
keep = append(keep, v)
continue
}
f.NamedValues[elemName] = append(f.NamedValues[elemName], v.Args[0])
}
if len(keep) == 0 {
// delete the name for the array as a whole
delete(f.NamedValues, name)
} else {
f.NamedValues[name] = keep
}
if t.Elem().IsArray() {
return decomposeUserArrayInto(f, elemName, slots)
@ -300,17 +309,23 @@ func decomposeUserStructInto(f *Func, name LocalSlot, slots []LocalSlot) []Local
}
makeOp := StructMakeOp(n)
var keep []*Value
// create named values for each struct field
for _, v := range f.NamedValues[name] {
if v.Op != makeOp {
keep = append(keep, v)
continue
}
for i := 0; i < len(fnames); i++ {
f.NamedValues[fnames[i]] = append(f.NamedValues[fnames[i]], v.Args[i])
}
}
// remove the name of the struct as a whole
if len(keep) == 0 {
// delete the name for the struct as a whole
delete(f.NamedValues, name)
} else {
f.NamedValues[name] = keep
}
// now that this f.NamedValues contains values for the struct
// fields, recurse into nested structs
@ -400,3 +415,35 @@ func StructMakeOp(nf int) Op {
}
panic("too many fields in an SSAable struct")
}
type namedVal struct {
locIndex, valIndex int // f.NamedValues[f.Names[locIndex]][valIndex] = key
}
// deleteNamedVals removes particular values with debugger names from f's naming data structures
func deleteNamedVals(f *Func, toDelete []namedVal) {
// Arrange to delete from larger indices to smaller, to ensure swap-with-end deletion does not invalid pending indices.
sort.Slice(toDelete, func(i, j int) bool {
if toDelete[i].locIndex != toDelete[j].locIndex {
return toDelete[i].locIndex > toDelete[j].locIndex
}
return toDelete[i].valIndex > toDelete[j].valIndex
})
// Get rid of obsolete names
for _, d := range toDelete {
loc := f.Names[d.locIndex]
vals := f.NamedValues[loc]
l := len(vals) - 1
if l > 0 {
vals[d.valIndex] = vals[l]
f.NamedValues[loc] = vals[:l]
} else {
delete(f.NamedValues, loc)
l = len(f.Names) - 1
f.Names[d.locIndex] = f.Names[l]
f.Names = f.Names[:l]
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -125,6 +125,10 @@ func (d DummyFrontend) SplitStruct(s LocalSlot, i int) LocalSlot {
func (d DummyFrontend) SplitArray(s LocalSlot) LocalSlot {
return LocalSlot{N: s.N, Type: s.Type.Elem(), Off: s.Off}
}
func (d DummyFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot {
return LocalSlot{N: parent.N, Type: t, Off: offset}
}
func (DummyFrontend) Line(_ src.XPos) string {
return "unknown.go:0"
}

View file

@ -310,7 +310,7 @@
(Const32 ...) => (MOVLconst ...)
(Const(32|64)F ...) => (MOVS(S|D)const ...)
(ConstNil) => (MOVLconst [0])
(ConstBool [c]) => (MOVLconst [int32(b2i(c))])
(ConstBool [c]) => (MOVLconst [b2i32(c)])
// Lowering calls
(StaticCall ...) => (CALLstatic ...)
@ -640,31 +640,31 @@
// it compiles to a thunk call).
(MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
&& (base.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOV(L|W|B|SS|SD)store [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
&& (base.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOV(L|W|B|SS|SD)store [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOV(L|W|B|SS|SD)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOV(L|W|B)storeconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off)
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOV(L|W|B)storeconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
(MOV(L|W|B)storeconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
&& valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
((ADD|AND|OR|XOR)Lconstmodify [valoff1.addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
((ADD|AND|OR|XOR)Lconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
// Merge load/store to op
((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|AND|OR|XOR|SUB|MUL)Lload x [off] {sym} ptr mem)
@ -679,37 +679,37 @@
// fold LEALs together
(LEAL [off1] {sym1} (LEAL [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAL [off1+off2] {mergeSymTyped(sym1,sym2)} x)
(LEAL [off1+off2] {mergeSym(sym1,sym2)} x)
// LEAL into LEAL1
(LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
(LEAL1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAL1 into LEAL
(LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAL1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAL into LEAL[248]
(LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
(LEAL2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
(LEAL4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
(LEAL8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAL[248] into LEAL
(LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAL2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAL4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAL8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAL[1248] into LEAL[1248]. Only some such merges are possible.
(LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} y y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAL2 [off1+off2] {mergeSymTyped(sym1, sym2)} x y)
(LEAL2 [off1+off2] {mergeSym(sym1, sym2)} x y)
(LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAL2 [off1+off2] {mergeSymTyped(sym1, sym2)} y x)
(LEAL2 [off1+off2] {mergeSym(sym1, sym2)} y x)
(LEAL2 [off1] {sym} x (LEAL1 [off2] {nil} y y)) && is32Bit(int64(off1)+2*int64(off2)) =>
(LEAL4 [off1+2*off2] {sym} x y)
(LEAL4 [off1] {sym} x (LEAL1 [off2] {nil} y y)) && is32Bit(int64(off1)+4*int64(off2)) =>
@ -993,49 +993,49 @@
&& x.Uses == 1
&& a.Off() + 1 == c.Off()
&& clobber(x)
=> (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), int32(a.Off()))] {s} p mem)
=> (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p mem)
(MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
&& x.Uses == 1
&& a.Off() + 1 == c.Off()
&& clobber(x)
=> (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), int32(a.Off()))] {s} p mem)
=> (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p mem)
(MOVBstoreconst [c] {s} p1 x:(MOVBstoreconst [a] {s} p0 mem))
&& x.Uses == 1
&& a.Off() == c.Off()
&& sequentialAddresses(p0, p1, 1)
&& clobber(x)
=> (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), int32(a.Off()))] {s} p0 mem)
=> (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p0 mem)
(MOVBstoreconst [a] {s} p0 x:(MOVBstoreconst [c] {s} p1 mem))
&& x.Uses == 1
&& a.Off() == c.Off()
&& sequentialAddresses(p0, p1, 1)
&& clobber(x)
=> (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), int32(a.Off()))] {s} p0 mem)
=> (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p0 mem)
(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
&& x.Uses == 1
&& a.Off() + 2 == c.Off()
&& clobber(x)
=> (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), int32(a.Off()))] {s} p mem)
=> (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p mem)
(MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
&& x.Uses == 1
&& ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
&& clobber(x)
=> (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), int32(a.Off()))] {s} p mem)
=> (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p mem)
(MOVWstoreconst [c] {s} p1 x:(MOVWstoreconst [a] {s} p0 mem))
&& x.Uses == 1
&& a.Off() == c.Off()
&& sequentialAddresses(p0, p1, 2)
&& clobber(x)
=> (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), int32(a.Off()))] {s} p0 mem)
=> (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p0 mem)
(MOVWstoreconst [a] {s} p0 x:(MOVWstoreconst [c] {s} p1 mem))
&& x.Uses == 1
&& a.Off() == c.Off()
&& sequentialAddresses(p0, p1, 2)
&& clobber(x)
=> (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), int32(a.Off()))] {s} p0 mem)
=> (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p0 mem)
// Combine stores into larger (unaligned) stores.
(MOVBstore [i] {s} p (SHR(W|L)const [8] w) x:(MOVBstore [i-1] {s} p w mem))

View file

@ -401,7 +401,7 @@
(Const32F ...) => (MOVSSconst ...)
(Const64F ...) => (MOVSDconst ...)
(ConstNil ) => (MOVQconst [0])
(ConstBool [c]) => (MOVLconst [int32(b2i(c))])
(ConstBool [c]) => (MOVLconst [b2i32(c)])
// Lowering calls
(StaticCall ...) => (CALLstatic ...)
@ -531,7 +531,9 @@
// Atomic memory updates.
(AtomicAnd8 ptr val mem) => (ANDBlock ptr val mem)
(AtomicAnd32 ptr val mem) => (ANDLlock ptr val mem)
(AtomicOr8 ptr val mem) => (ORBlock ptr val mem)
(AtomicOr32 ptr val mem) => (ORLlock ptr val mem)
// Write barrier.
(WB ...) => (LoweredWB ...)
@ -581,7 +583,7 @@
((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
=> ((ULT|UGE) (BTQconst [int8(log32(c))] x))
((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
=> ((ULT|UGE) (BTQconst [int8(log2(c))] x))
=> ((ULT|UGE) (BTQconst [int8(log64(c))] x))
(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => (SET(B|AE) (BTL x y))
(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => (SET(B|AE) (BTQ x y))
(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
@ -589,7 +591,7 @@
(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
=> (SET(B|AE) (BTQconst [int8(log32(c))] x))
(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
=> (SET(B|AE) (BTQconst [int8(log2(c))] x))
=> (SET(B|AE) (BTQconst [int8(log64(c))] x))
// SET..store variant
(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
=> (SET(B|AE)store [off] {sym} ptr (BTL x y) mem)
@ -600,7 +602,7 @@
(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(int64(c))
=> (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c)
=> (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log2(c))] x) mem)
=> (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
// Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules
// and further combining shifts.
@ -629,7 +631,7 @@
((ORL|XORL)const [c] x) && isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
=> (BT(S|C)Lconst [int8(log32(c))] x)
((ORQ|XORQ) (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 128
=> (BT(S|C)Qconst [int8(log2(c))] x)
=> (BT(S|C)Qconst [int8(log64(c))] x)
((ORL|XORL) (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
=> (BT(S|C)Lconst [int8(log32(c))] x)
@ -640,7 +642,7 @@
(ANDLconst [c] x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
=> (BTRLconst [int8(log32(^c))] x)
(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128
=> (BTRQconst [int8(log2(^c))] x)
=> (BTRQconst [int8(log64(^c))] x)
(ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
=> (BTRLconst [int8(log32(^c))] x)
@ -957,7 +959,7 @@
(MUL(Q|L)const [73] x) => (LEA(Q|L)8 x (LEA(Q|L)8 <v.Type> x x))
(MUL(Q|L)const [81] x) => (LEA(Q|L)8 (LEA(Q|L)8 <v.Type> x x) (LEA(Q|L)8 <v.Type> x x))
(MUL(Q|L)const [c] x) && isPowerOfTwo(int64(c)+1) && c >= 15 => (SUB(Q|L) (SHL(Q|L)const <v.Type> [int8(log2(int64(c)+1))] x) x)
(MUL(Q|L)const [c] x) && isPowerOfTwo64(int64(c)+1) && c >= 15 => (SUB(Q|L) (SHL(Q|L)const <v.Type> [int8(log64(int64(c)+1))] x) x)
(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-1) && c >= 17 => (LEA(Q|L)1 (SHL(Q|L)const <v.Type> [int8(log32(c-1))] x) x)
(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-2) && c >= 34 => (LEA(Q|L)2 (SHL(Q|L)const <v.Type> [int8(log32(c-2))] x) x)
(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-4) && c >= 68 => (LEA(Q|L)4 (SHL(Q|L)const <v.Type> [int8(log32(c-4))] x) x)
@ -1135,80 +1137,80 @@
// what variables are being read/written by the ops.
(MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOV(Q|L|W|B|SS|SD|O)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOV(Q|L|W|B)storeconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) =>
(MOV(Q|L|W|B)storeconst [ValAndOff(sc).addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
(MOV(Q|L|W|B)storeconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
((ADD|SUB|AND|OR|XOR)Qload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
((ADD|SUB|AND|OR|XOR)Lload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
(CMP(Q|L|W|B)load [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(CMP(Q|L|W|B)load [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(CMP(Q|L|W|B)load [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(CMP(Q|L|W|B)constload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
&& ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
(CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
(CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
&& ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
&& ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
// fold LEAQs together
(LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAQ [off1+off2] {mergeSymTyped(sym1,sym2)} x)
(LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
// LEAQ into LEAQ1
(LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
(LEAQ1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAQ1 into LEAQ
(LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAQ1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAQ into LEAQ[248]
(LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
(LEAQ2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
(LEAQ4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
(LEAQ8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAQ[248] into LEAQ
(LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAQ2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAQ4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
(LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAQ8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
// LEAQ[1248] into LEAQ[1248]. Only some such merges are possible.
(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAQ2 [off1+off2] {mergeSymTyped(sym1, sym2)} x y)
(LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y)
(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(LEAQ2 [off1+off2] {mergeSymTyped(sym1, sym2)} y x)
(LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x)
(LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil =>
(LEAQ4 [off1+2*off2] {sym1} x y)
(LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil =>
@ -1998,31 +2000,31 @@
=> (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
(MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVQload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVLload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVQstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVLstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
(MOVQstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
(MOVQstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
(MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
(MOVLstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
(MOVLstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
(MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
(MOVWstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
(MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
(MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
(MOVBstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
(MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
(MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (MOVQload [off1+off2] {sym} ptr mem)
(MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (MOVLload [off1+off2] {sym} ptr mem)
@ -2058,17 +2060,17 @@
(MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
(MOV(Q|L|B)atomicload [off1+off2] {sym} ptr mem)
(MOV(Q|L|B)atomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOV(Q|L|B)atomicload [off1+off2] {mergeSymTyped(sym1, sym2)} ptr mem)
(MOV(Q|L|B)atomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
// Merge ADDQconst and LEAQ into atomic stores.
(XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
(XCHGQ [off1+off2] {sym} val ptr mem)
(XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
(XCHGQ [off1+off2] {mergeSymTyped(sym1,sym2)} val ptr mem)
(XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
(XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
(XCHGL [off1+off2] {sym} val ptr mem)
(XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
(XCHGL [off1+off2] {mergeSymTyped(sym1,sym2)} val ptr mem)
(XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
// Merge ADDQconst into atomic adds.
// TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.

View file

@ -902,7 +902,9 @@ func init() {
// Atomic memory updates.
{name: "ANDBlock", argLength: 3, reg: gpstore, asm: "ANDB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) &= arg1
{name: "ANDLlock", argLength: 3, reg: gpstore, asm: "ANDL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) &= arg1
{name: "ORBlock", argLength: 3, reg: gpstore, asm: "ORB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) |= arg1
{name: "ORLlock", argLength: 3, reg: gpstore, asm: "ORL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) |= arg1
}
var AMD64blocks = []blockData{

View file

@ -169,10 +169,10 @@
(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
// constants
(Const(8|16|32) ...) -> (MOVWconst ...)
(Const(32F|64F) ...) -> (MOV(F|D)const ...)
(Const(8|16|32) [val]) => (MOVWconst [int32(val)])
(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)])
(ConstNil) => (MOVWconst [0])
(ConstBool ...) -> (MOVWconst ...)
(ConstBool [b]) => (MOVWconst [b2i32(b)])
// truncations
// Because we ignore high parts of registers, truncates are just copies.
@ -243,10 +243,10 @@
(Leq16U x y) => (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
(Leq32U x y) => (LessEqualU (CMP x y))
(OffPtr [off] ptr:(SP)) -> (MOVWaddr [off] ptr)
(OffPtr [off] ptr) -> (ADDconst [off] ptr)
(OffPtr [off] ptr:(SP)) => (MOVWaddr [int32(off)] ptr)
(OffPtr [off] ptr) => (ADDconst [int32(off)] ptr)
(Addr ...) -> (MOVWaddr ...)
(Addr {sym} base) => (MOVWaddr {sym} base)
(LocalAddr {sym} base _) => (MOVWaddr {sym} base)
// loads
@ -433,30 +433,30 @@
(MOVDstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVDstore [off1-off2] {sym} ptr val mem)
(MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVBUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVHUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVFload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
(MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
(MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
(MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
(MOVFstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
(MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBreg x)
@ -1470,6 +1470,6 @@
(GE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (GE (TEQshiftRLreg x y z) yes no)
(GE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (GE (TEQshiftRAreg x y z) yes no)
(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVWconst [int64(read8(sym, off))])
(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVWconst [int64(read16(sym, off, config.ctxt.Arch.ByteOrder))])
(MOVWload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVWconst [int64(int32(read32(sym, off, config.ctxt.Arch.ByteOrder)))])
(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read8(sym, int64(off)))])
(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])

View file

@ -549,7 +549,9 @@
// Currently the updated value is not used, but we need a register to temporarily hold it.
(AtomicAnd8 ptr val mem) => (Select1 (LoweredAtomicAnd8 ptr val mem))
(AtomicAnd32 ptr val mem) => (Select1 (LoweredAtomicAnd32 ptr val mem))
(AtomicOr8 ptr val mem) => (Select1 (LoweredAtomicOr8 ptr val mem))
(AtomicOr32 ptr val mem) => (Select1 (LoweredAtomicOr32 ptr val mem))
(AtomicAdd(32|64)Variant ...) => (LoweredAtomicAdd(32|64)Variant ...)
@ -859,88 +861,88 @@
(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVBUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVHUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVWUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(FMOVSload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(FMOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(STP [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val1 val2 mem)
(STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem)
(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(FMOVSstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(FMOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVBstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVHstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVWstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVDstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVQstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
(MOVQstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVQstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
// store zero
(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
@ -1171,145 +1173,147 @@
(MUL x (MOVDconst [-1])) => (NEG x)
(MUL _ (MOVDconst [0])) => (MOVDconst [0])
(MUL x (MOVDconst [1])) => x
(MUL x (MOVDconst [c])) && isPowerOfTwo(c) => (SLLconst [log2(c)] x)
(MUL x (MOVDconst [c])) && isPowerOfTwo(c-1) && c >= 3 => (ADDshiftLL x x [log2(c-1)])
(MUL x (MOVDconst [c])) && isPowerOfTwo(c+1) && c >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
(MUL x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
(MUL x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
(MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
(MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
(MUL x (MOVDconst [c])) && isPowerOfTwo64(c) => (SLLconst [log64(c)] x)
(MUL x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c >= 3 => (ADDshiftLL x x [log64(c-1)])
(MUL x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
(MUL x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
(MUL x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
(MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
(MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
(MULW x (MOVDconst [c])) && int32(c)==-1 => (NEG x)
(MULW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
(MULW x (MOVDconst [c])) && int32(c)==1 => x
(MULW x (MOVDconst [c])) && isPowerOfTwo(c) => (SLLconst [log2(c)] x)
(MULW x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 => (ADDshiftLL x x [log2(c-1)])
(MULW x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
(MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
(MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
(MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
(MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
(MULW x (MOVDconst [c])) && isPowerOfTwo64(c) => (SLLconst [log64(c)] x)
(MULW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (ADDshiftLL x x [log64(c-1)])
(MULW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
(MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
(MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
(MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
(MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
// mneg by constant
(MNEG x (MOVDconst [-1])) => x
(MNEG _ (MOVDconst [0])) => (MOVDconst [0])
(MNEG x (MOVDconst [1])) => (NEG x)
(MNEG x (MOVDconst [c])) && isPowerOfTwo(c) => (NEG (SLLconst <x.Type> [log2(c)] x))
(MNEG x (MOVDconst [c])) && isPowerOfTwo(c-1) && c >= 3 => (NEG (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MNEG x (MOVDconst [c])) && isPowerOfTwo(c+1) && c >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log2(c+1)]))
(MNEG x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SLLconst <x.Type> [log2(c/3)] (SUBshiftLL <x.Type> x x [2]))
(MNEG x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (NEG (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])))
(MNEG x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SLLconst <x.Type> [log2(c/7)] (SUBshiftLL <x.Type> x x [3]))
(MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (NEG (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])))
(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c >= 3 => (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
(MNEG x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
(MNEG x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
(MNEG x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
(MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
(MNEGW x (MOVDconst [c])) && int32(c)==-1 => x
(MNEGW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
(MNEGW x (MOVDconst [c])) && int32(c)==1 => (NEG x)
(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c) => (NEG (SLLconst <x.Type> [log2(c)] x))
(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 => (NEG (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log2(c+1)]))
(MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (SLLconst <x.Type> [log2(c/3)] (SUBshiftLL <x.Type> x x [2]))
(MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (NEG (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])))
(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (SLLconst <x.Type> [log2(c/7)] (SUBshiftLL <x.Type> x x [3]))
(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (NEG (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])))
(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
(MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
(MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
(MADD a x (MOVDconst [-1])) => (SUB a x)
(MADD a _ (MOVDconst [0])) => a
(MADD a x (MOVDconst [1])) => (ADD a x)
(MADD a x (MOVDconst [c])) && isPowerOfTwo(c) => (ADDshiftLL a x [log2(c)])
(MADD a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MADD a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MADD a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MADD a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MADD a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MADD a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
(MADD a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
(MADD a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
(MADD a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
(MADD a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
(MADD a (MOVDconst [-1]) x) => (SUB a x)
(MADD a (MOVDconst [0]) _) => a
(MADD a (MOVDconst [1]) x) => (ADD a x)
(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c) => (ADDshiftLL a x [log2(c)])
(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MADD a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MADD a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
(MADD a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
(MADD a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
(MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
(MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
(MADDW a x (MOVDconst [c])) && int32(c)==-1 => (SUB a x)
(MADDW a _ (MOVDconst [c])) && int32(c)==0 => a
(MADDW a x (MOVDconst [c])) && int32(c)==1 => (ADD a x)
(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c) => (ADDshiftLL a x [log2(c)])
(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
(MADDW a (MOVDconst [c]) x) && int32(c)==-1 => (SUB a x)
(MADDW a (MOVDconst [c]) _) && int32(c)==0 => a
(MADDW a (MOVDconst [c]) x) && int32(c)==1 => (ADD a x)
(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c) => (ADDshiftLL a x [log2(c)])
(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
(MSUB a x (MOVDconst [-1])) => (ADD a x)
(MSUB a _ (MOVDconst [0])) => a
(MSUB a x (MOVDconst [1])) => (SUB a x)
(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c) => (SUBshiftLL a x [log2(c)])
(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MSUB a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MSUB a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MSUB a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MSUB a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
(MSUB a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
(MSUB a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
(MSUB a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
(MSUB a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
(MSUB a (MOVDconst [-1]) x) => (ADD a x)
(MSUB a (MOVDconst [0]) _) => a
(MSUB a (MOVDconst [1]) x) => (SUB a x)
(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c) => (SUBshiftLL a x [log2(c)])
(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MSUB a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MSUB a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
(MSUB a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
(MSUB a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
(MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
(MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
(MSUBW a x (MOVDconst [c])) && int32(c)==-1 => (ADD a x)
(MSUBW a _ (MOVDconst [c])) && int32(c)==0 => a
(MSUBW a x (MOVDconst [c])) && int32(c)==1 => (SUB a x)
(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c) => (SUBshiftLL a x [log2(c)])
(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
(MSUBW a (MOVDconst [c]) x) && int32(c)==-1 => (ADD a x)
(MSUBW a (MOVDconst [c]) _) && int32(c)==0 => a
(MSUBW a (MOVDconst [c]) x) && int32(c)==1 => (SUB a x)
(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c) => (SUBshiftLL a x [log2(c)])
(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
// div by constant
(UDIV x (MOVDconst [1])) => x
(UDIV x (MOVDconst [c])) && isPowerOfTwo(c) => (SRLconst [log2(c)] x)
(UDIV x (MOVDconst [c])) && isPowerOfTwo64(c) => (SRLconst [log64(c)] x)
(UDIVW x (MOVDconst [c])) && uint32(c)==1 => x
(UDIVW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) => (SRLconst [log2(c)] x)
(UDIVW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (SRLconst [log64(c)] x)
(UMOD _ (MOVDconst [1])) => (MOVDconst [0])
(UMOD x (MOVDconst [c])) && isPowerOfTwo(c) => (ANDconst [c-1] x)
(UMOD x (MOVDconst [c])) && isPowerOfTwo64(c) => (ANDconst [c-1] x)
(UMODW _ (MOVDconst [c])) && uint32(c)==1 => (MOVDconst [0])
(UMODW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) => (ANDconst [c-1] x)
(UMODW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (ANDconst [c-1] x)
// generic simplifications
(ADD x (NEG y)) => (SUB x y)

View file

@ -656,12 +656,14 @@ func init() {
// atomic and/or.
// *arg0 &= (|=) arg1. arg2=mem. returns <new content of *arg0, memory>. auxint must be zero.
// LDAXRB (Rarg0), Rout
// LDAXR (Rarg0), Rout
// AND/OR Rarg1, Rout
// STLXRB Rout, (Rarg0), Rtmp
// STLXR Rout, (Rarg0), Rtmp
// CBNZ Rtmp, -3(PC)
{name: "LoweredAtomicAnd8", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "AND", typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
{name: "LoweredAtomicAnd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "AND", typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
{name: "LoweredAtomicOr8", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "ORR", typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
{name: "LoweredAtomicOr32", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "ORR", typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
// LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
// It saves all GP registers if necessary,

View file

@ -143,7 +143,7 @@
(Const(32|16|8) [val]) => (MOVWconst [int32(val)])
(Const(32|64)F ...) => (MOV(F|D)const ...)
(ConstNil) => (MOVWconst [0])
(ConstBool [b]) => (MOVWconst [int32(b2i(b))])
(ConstBool [b]) => (MOVWconst [b2i32(b)])
// truncations
// Because we ignore high parts of registers, truncates are just copies.
@ -383,6 +383,9 @@
(ANDconst <typ.UInt32> [3]
(XORconst <typ.UInt32> [3] ptr)))))) mem)
(AtomicAnd32 ...) => (LoweredAtomicAnd ...)
(AtomicOr32 ...) => (LoweredAtomicOr ...)
// checks
(NilCheck ...) => (LoweredNilCheck ...)
@ -459,36 +462,36 @@
(MOVWstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVWstorezero [off1+off2] {sym} ptr mem)
(MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVBUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVHUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVFload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
(MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
(MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
(MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
(MOVFstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
(MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVBstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVHstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
(MOVWstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBreg x)
@ -581,13 +584,13 @@
(Select0 (MULTU (MOVWconst [1]) _ )) => (MOVWconst [0])
(Select1 (MULTU (MOVWconst [-1]) x )) => (NEG <x.Type> x)
(Select0 (MULTU (MOVWconst [-1]) x )) => (CMOVZ (ADDconst <x.Type> [-1] x) (MOVWconst [0]) x)
(Select1 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
(Select0 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo(int64(uint32(c))) => (SRLconst [int32(32-log2uint32(int64(c)))] x)
(Select1 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo64(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
(Select0 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo64(int64(uint32(c))) => (SRLconst [int32(32-log2uint32(int64(c)))] x)
(MUL (MOVWconst [0]) _ ) => (MOVWconst [0])
(MUL (MOVWconst [1]) x ) => x
(MUL (MOVWconst [-1]) x ) => (NEG x)
(MUL (MOVWconst [c]) x ) && isPowerOfTwo(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
(MUL (MOVWconst [c]) x ) && isPowerOfTwo64(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
// generic simplifications
(ADD x (NEG y)) => (SUB x y)

View file

@ -462,44 +462,44 @@
(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem)
(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVBload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVBUload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVHload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVHUload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVWload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVWUload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVVload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVFload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVDload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVBstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVHstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVWstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVVstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVFstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVDstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVBstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVHstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVWstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVVstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
// store zero
(MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
@ -580,13 +580,13 @@
(Select1 (MULVU x (MOVVconst [-1]))) => (NEGV x)
(Select1 (MULVU _ (MOVVconst [0]))) => (MOVVconst [0])
(Select1 (MULVU x (MOVVconst [1]))) => x
(Select1 (MULVU x (MOVVconst [c]))) && isPowerOfTwo(c) => (SLLVconst [log2(c)] x)
(Select1 (MULVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SLLVconst [log64(c)] x)
// div by constant
(Select1 (DIVVU x (MOVVconst [1]))) => x
(Select1 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo(c) => (SRLVconst [log2(c)] x)
(Select1 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SRLVconst [log64(c)] x)
(Select0 (DIVVU _ (MOVVconst [1]))) => (MOVVconst [0]) // mod
(Select0 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo(c) => (ANDconst [c-1] x) // mod
(Select0 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (ANDconst [c-1] x) // mod
// generic simplifications
(ADDV x (NEGV y)) => (SUBV x y)

View file

@ -150,6 +150,31 @@
(ROTLW x (MOVDconst [c])) => (ROTLWconst x [c&31])
(ROTL x (MOVDconst [c])) => (ROTLconst x [c&63])
// Combine rotate and mask operations
(ANDconst [m] (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
(AND (MOVDconst [m]) (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
(ANDconst [m] (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
(AND (MOVDconst [m]) (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
// Note, any rotated word bitmask is still a valid word bitmask.
(ROTLWconst [r] (AND (MOVDconst [m]) x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
(ROTLWconst [r] (ANDconst [m] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
(ANDconst [m] (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
(ANDconst [m] (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
(SRWconst (ANDconst [m] x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
(SRWconst (ANDconst [m] x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
// Merge shift right + shift left and clear left (e.g for a table lookup)
(CLRLSLDI [c] (SRWconst [s] x)) && mergePPC64ClrlsldiSrw(int64(c),s) != 0 => (RLWINM [mergePPC64ClrlsldiSrw(int64(c),s)] x)
(SLDconst [l] (SRWconst [r] x)) && mergePPC64SldiSrw(l,r) != 0 => (RLWINM [mergePPC64SldiSrw(l,r)] x)
// The following reduction shows up frequently too. e.g b[(x>>14)&0xFF]
(CLRLSLDI [c] i:(RLWINM [s] x)) && mergePPC64ClrlsldiRlwinm(c,s) != 0 => (RLWINM [mergePPC64ClrlsldiRlwinm(c,s)] x)
// large constant shifts
(Lsh64x64 _ (MOVDconst [c])) && uint64(c) >= 64 => (MOVDconst [0])
@ -863,48 +888,48 @@
// is only one use.
(MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 =>
(MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(FMOVSstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(FMOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(MOVBZload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(MOVHZload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 =>
(MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(MOVWZload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 =>
(MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(FMOVSload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(FMOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
// Fold offsets for loads.
(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(int64(off1)+off2) => (FMOVSload [off1+int32(off2)] {sym} ptr mem)
@ -954,16 +979,16 @@
// Fold symbols into storezero
(MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
&& (x.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 =>
(MOVDstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} x mem)
(MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
(MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
&& (x.Op != OpSB || p.Uses == 1) =>
(MOVWstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} x mem)
(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
(MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
&& (x.Op != OpSB || p.Uses == 1) =>
(MOVHstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} x mem)
(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
(MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
&& (x.Op != OpSB || p.Uses == 1) =>
(MOVBstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} x mem)
(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
// atomic intrinsics
(AtomicLoad(8|32|64|Ptr) ptr mem) => (LoweredAtomicLoad(8|32|64|Ptr) [1] ptr mem)
@ -981,7 +1006,9 @@
(AtomicCompareAndSwapRel32 ptr old new_ mem) => (LoweredAtomicCas32 [0] ptr old new_ mem)
(AtomicAnd8 ...) => (LoweredAtomicAnd8 ...)
(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
(AtomicOr8 ...) => (LoweredAtomicOr8 ...)
(AtomicOr32 ...) => (LoweredAtomicOr32 ...)
(Slicemask <t> x) => (SRADconst (NEG <t> x) [63])

View file

@ -137,6 +137,7 @@ func init() {
gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
gp11 = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}
gp21 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}}
gp21a0 = regInfo{inputs: []regMask{gp, gp | sp | sb}, outputs: []regMask{gp}}
gp31 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}}
gp22 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp, gp}}
gp32 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp, gp}}
@ -227,6 +228,10 @@ func init() {
{name: "ROTLWconst", argLength: 1, reg: gp11, asm: "ROTLW", aux: "Int64"}, // uint32(arg0) rotate left by auxInt bits
{name: "EXTSWSLconst", argLength: 1, reg: gp11, asm: "EXTSWSLI", aux: "Int64"},
{name: "RLWINM", argLength: 1, reg: gp11, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by immediate "rlwinm". encodePPC64RotateMask describes aux
{name: "RLWNM", argLength: 2, reg: gp21, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by "rlwnm". encodePPC64RotateMask describes aux
{name: "RLWMI", argLength: 2, reg: gp21a0, asm: "RLWMI", aux: "Int64", resultInArg0: true}, // "rlwimi" similar aux encoding as above
{name: "CNTLZD", argLength: 1, reg: gp11, asm: "CNTLZD", clobberFlags: true}, // count leading zeros
{name: "CNTLZW", argLength: 1, reg: gp11, asm: "CNTLZW", clobberFlags: true}, // count leading zeros (32 bit)
@ -602,25 +607,22 @@ func init() {
{name: "LoweredAtomicLoadPtr", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
// atomic add32, 64
// SYNC
// LWSYNC
// LDAR (Rarg0), Rout
// ADD Rarg1, Rout
// STDCCC Rout, (Rarg0)
// BNE -3(PC)
// ISYNC
// return new sum
{name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
// atomic exchange32, 64
// SYNC
// LWSYNC
// LDAR (Rarg0), Rout
// STDCCC Rarg1, (Rarg0)
// BNE -2(PC)
// ISYNC
// return old val
{name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
@ -643,15 +645,16 @@ func init() {
{name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
// atomic 8 and/or.
// atomic 8/32 and/or.
// *arg0 &= (|=) arg1. arg2=mem. returns memory. auxint must be zero.
// LBAR (Rarg0), Rtmp
// LBAR/LWAT (Rarg0), Rtmp
// AND/OR Rarg1, Rtmp
// STBCCC Rtmp, (Rarg0), Rtmp
// STBCCC/STWCCC Rtmp, (Rarg0), Rtmp
// BNE Rtmp, -3(PC)
{name: "LoweredAtomicAnd8", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicAnd32", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicOr8", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicOr32", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true},
// LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
// It preserves R0 through R17 (except special registers R1, R2, R11, R12, R13), g, and its arguments R20 and R21,

View file

@ -3,17 +3,12 @@
// license that can be found in the LICENSE file.
// Optimizations TODO:
// * Somehow track when values are already zero/signed-extended, avoid re-extending.
// * Use SLTI and SLTIU for comparisons to constants, instead of SLT/SLTU with constants in registers
// * Find a more efficient way to do zero/sign extension than left+right shift.
// There are many other options (store then load-extend, LUI+ANDI for zero extend, special case 32->64, ...),
// but left+right shift is simple and uniform, and we don't have real hardware to do perf testing on anyway.
// * Use the zero register instead of moving 0 into a register.
// * Add rules to avoid generating a temp bool value for (If (SLT[U] ...) ...).
// * Optimize left and right shift by simplifying SLTIU, Neg, and ADD for constants.
// * Arrange for non-trivial Zero and Move lowerings to use aligned loads and stores.
// * Eliminate zero immediate shifts, adds, etc.
// * Use a Duff's device for some moves and zeros.
// * Avoid using Neq32 for writeBarrier.enabled checks.
// Lowering arithmetic
@ -98,25 +93,21 @@
(Sqrt ...) => (FSQRTD ...)
// Zero and sign extension
// Shift left until the bits we want are at the top of the register.
// Then logical/arithmetic shift right for zero/sign extend.
// We always extend to 64 bits; there's no reason not to,
// and optimization rules can then collapse some extensions.
// Sign and zero extension.
(SignExt8to16 <t> x) => (SRAI [56] (SLLI <t> [56] x))
(SignExt8to32 <t> x) => (SRAI [56] (SLLI <t> [56] x))
(SignExt8to64 <t> x) => (SRAI [56] (SLLI <t> [56] x))
(SignExt16to32 <t> x) => (SRAI [48] (SLLI <t> [48] x))
(SignExt16to64 <t> x) => (SRAI [48] (SLLI <t> [48] x))
(SignExt32to64 <t> x) => (ADDIW [0] x)
(SignExt8to16 ...) => (MOVBreg ...)
(SignExt8to32 ...) => (MOVBreg ...)
(SignExt8to64 ...) => (MOVBreg ...)
(SignExt16to32 ...) => (MOVHreg ...)
(SignExt16to64 ...) => (MOVHreg ...)
(SignExt32to64 ...) => (MOVWreg ...)
(ZeroExt8to16 <t> x) => (SRLI [56] (SLLI <t> [56] x))
(ZeroExt8to32 <t> x) => (SRLI [56] (SLLI <t> [56] x))
(ZeroExt8to64 <t> x) => (SRLI [56] (SLLI <t> [56] x))
(ZeroExt16to32 <t> x) => (SRLI [48] (SLLI <t> [48] x))
(ZeroExt16to64 <t> x) => (SRLI [48] (SLLI <t> [48] x))
(ZeroExt32to64 <t> x) => (SRLI [32] (SLLI <t> [32] x))
(ZeroExt8to16 ...) => (MOVBUreg ...)
(ZeroExt8to32 ...) => (MOVBUreg ...)
(ZeroExt8to64 ...) => (MOVBUreg ...)
(ZeroExt16to32 ...) => (MOVHUreg ...)
(ZeroExt16to64 ...) => (MOVHUreg ...)
(ZeroExt32to64 ...) => (MOVWUreg ...)
(Cvt32to32F ...) => (FCVTSW ...)
(Cvt32to64F ...) => (FCVTDW ...)
@ -261,16 +252,16 @@
(EqPtr x y) => (SEQZ (SUB <x.Type> x y))
(Eq64 x y) => (SEQZ (SUB <x.Type> x y))
(Eq32 x y) => (SEQZ (SUBW <x.Type> x y))
(Eq16 x y) => (SEQZ (ZeroExt16to64 (SUB <x.Type> x y)))
(Eq8 x y) => (SEQZ (ZeroExt8to64 (SUB <x.Type> x y)))
(Eq16 x y) => (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
(Eq8 x y) => (SEQZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
(Eq64F ...) => (FEQD ...)
(Eq32F ...) => (FEQS ...)
(NeqPtr x y) => (SNEZ (SUB <x.Type> x y))
(Neq64 x y) => (SNEZ (SUB <x.Type> x y))
(Neq32 x y) => (SNEZ (SUBW <x.Type> x y))
(Neq16 x y) => (SNEZ (ZeroExt16to64 (SUB <x.Type> x y)))
(Neq8 x y) => (SNEZ (ZeroExt8to64 (SUB <x.Type> x y)))
(Neq16 x y) => (SNEZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
(Neq8 x y) => (SNEZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
(Neq64F ...) => (FNED ...)
(Neq32F ...) => (FNES ...)
@ -297,36 +288,36 @@
// We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis
// knows what variables are being read/written by the ops.
(MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVBUload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVHUload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVWUload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVBstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVHstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVWstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVDstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
(MOVBUload [off1+int32(off2)] {sym} base mem)
@ -360,13 +351,66 @@
// with OffPtr -> ADDI.
(ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+int64(d)) => (MOVaddr [int32(c)+d] {s} x)
// Zeroing
// TODO: more optimized zeroing, including attempting to use aligned accesses.
// Small zeroing
(Zero [0] _ mem) => mem
(Zero [1] ptr mem) => (MOVBstore ptr (MOVBconst) mem)
(Zero [2] ptr mem) => (MOVHstore ptr (MOVHconst) mem)
(Zero [4] ptr mem) => (MOVWstore ptr (MOVWconst) mem)
(Zero [8] ptr mem) => (MOVDstore ptr (MOVDconst) mem)
(Zero [1] ptr mem) => (MOVBstore ptr (MOVBconst [0]) mem)
(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
(MOVHstore ptr (MOVHconst [0]) mem)
(Zero [2] ptr mem) =>
(MOVBstore [1] ptr (MOVBconst [0])
(MOVBstore ptr (MOVBconst [0]) mem))
(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
(MOVWstore ptr (MOVWconst [0]) mem)
(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
(MOVHstore [2] ptr (MOVHconst [0])
(MOVHstore ptr (MOVHconst [0]) mem))
(Zero [4] ptr mem) =>
(MOVBstore [3] ptr (MOVBconst [0])
(MOVBstore [2] ptr (MOVBconst [0])
(MOVBstore [1] ptr (MOVBconst [0])
(MOVBstore ptr (MOVBconst [0]) mem))))
(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 =>
(MOVDstore ptr (MOVDconst [0]) mem)
(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
(MOVWstore [4] ptr (MOVWconst [0])
(MOVWstore ptr (MOVWconst [0]) mem))
(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 =>
(MOVHstore [6] ptr (MOVHconst [0])
(MOVHstore [4] ptr (MOVHconst [0])
(MOVHstore [2] ptr (MOVHconst [0])
(MOVHstore ptr (MOVHconst [0]) mem))))
(Zero [3] ptr mem) =>
(MOVBstore [2] ptr (MOVBconst [0])
(MOVBstore [1] ptr (MOVBconst [0])
(MOVBstore ptr (MOVBconst [0]) mem)))
(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
(MOVHstore [4] ptr (MOVHconst [0])
(MOVHstore [2] ptr (MOVHconst [0])
(MOVHstore ptr (MOVHconst [0]) mem)))
(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
(MOVWstore [8] ptr (MOVWconst [0])
(MOVWstore [4] ptr (MOVWconst [0])
(MOVWstore ptr (MOVWconst [0]) mem)))
(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 =>
(MOVDstore [8] ptr (MOVDconst [0])
(MOVDstore ptr (MOVDconst [0]) mem))
(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 =>
(MOVDstore [16] ptr (MOVDconst [0])
(MOVDstore [8] ptr (MOVDconst [0])
(MOVDstore ptr (MOVDconst [0]) mem)))
(Zero [32] {t} ptr mem) && t.Alignment()%8 == 0 =>
(MOVDstore [24] ptr (MOVDconst [0])
(MOVDstore [16] ptr (MOVDconst [0])
(MOVDstore [8] ptr (MOVDconst [0])
(MOVDstore ptr (MOVDconst [0]) mem))))
// Medium 8-aligned zeroing uses a Duff's device
// 8 and 128 are magic constants, see runtime/mkduff.go
(Zero [s] {t} ptr mem)
&& s%8 == 0 && s <= 8*128
&& t.Alignment()%8 == 0 && !config.noDuffDevice =>
(DUFFZERO [8 * (128 - s/8)] ptr mem)
// Generic zeroing uses a loop
(Zero [s] {t} ptr mem) =>
@ -378,7 +422,7 @@
(Convert ...) => (MOVconvert ...)
// Checks
(IsNonNil p) => (NeqPtr (MOVDconst) p)
(IsNonNil p) => (NeqPtr (MOVDconst [0]) p)
(IsInBounds ...) => (Less64U ...)
(IsSliceInBounds ...) => (Leq64U ...)
@ -395,13 +439,66 @@
(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
// Moves
// TODO: more optimized moves, including attempting to use aligned accesses.
// Small moves
(Move [0] _ _ mem) => mem
(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
(Move [2] dst src mem) => (MOVHstore dst (MOVHload src mem) mem)
(Move [4] dst src mem) => (MOVWstore dst (MOVWload src mem) mem)
(Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem)
(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
(MOVHstore dst (MOVHload src mem) mem)
(Move [2] dst src mem) =>
(MOVBstore [1] dst (MOVBload [1] src mem)
(MOVBstore dst (MOVBload src mem) mem))
(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
(MOVWstore dst (MOVWload src mem) mem)
(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
(MOVHstore [2] dst (MOVHload [2] src mem)
(MOVHstore dst (MOVHload src mem) mem))
(Move [4] dst src mem) =>
(MOVBstore [3] dst (MOVBload [3] src mem)
(MOVBstore [2] dst (MOVBload [2] src mem)
(MOVBstore [1] dst (MOVBload [1] src mem)
(MOVBstore dst (MOVBload src mem) mem))))
(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 =>
(MOVDstore dst (MOVDload src mem) mem)
(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
(MOVWstore [4] dst (MOVWload [4] src mem)
(MOVWstore dst (MOVWload src mem) mem))
(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
(MOVHstore [6] dst (MOVHload [6] src mem)
(MOVHstore [4] dst (MOVHload [4] src mem)
(MOVHstore [2] dst (MOVHload [2] src mem)
(MOVHstore dst (MOVHload src mem) mem))))
(Move [3] dst src mem) =>
(MOVBstore [2] dst (MOVBload [2] src mem)
(MOVBstore [1] dst (MOVBload [1] src mem)
(MOVBstore dst (MOVBload src mem) mem)))
(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
(MOVHstore [4] dst (MOVHload [4] src mem)
(MOVHstore [2] dst (MOVHload [2] src mem)
(MOVHstore dst (MOVHload src mem) mem)))
(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
(MOVWstore [8] dst (MOVWload [8] src mem)
(MOVWstore [4] dst (MOVWload [4] src mem)
(MOVWstore dst (MOVWload src mem) mem)))
(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 =>
(MOVDstore [8] dst (MOVDload [8] src mem)
(MOVDstore dst (MOVDload src mem) mem))
(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 =>
(MOVDstore [16] dst (MOVDload [16] src mem)
(MOVDstore [8] dst (MOVDload [8] src mem)
(MOVDstore dst (MOVDload src mem) mem)))
(Move [32] {t} dst src mem) && t.Alignment()%8 == 0 =>
(MOVDstore [24] dst (MOVDload [24] src mem)
(MOVDstore [16] dst (MOVDload [16] src mem)
(MOVDstore [8] dst (MOVDload [8] src mem)
(MOVDstore dst (MOVDload src mem) mem))))
// Medium 8-aligned move uses a Duff's device
// 16 and 128 are magic constants, see runtime/mkduff.go
(Move [s] {t} dst src mem)
&& s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0
&& !config.noDuffDevice && logLargeCopy(v, s) =>
(DUFFCOPY [16 * (128 - s/8)] dst src mem)
// Generic move uses a loop
(Move [s] {t} dst src mem) && (s <= 16 || logLargeCopy(v, s)) =>
@ -424,6 +521,8 @@
(OffPtr [off] ptr) && is32Bit(off) => (ADDI [off] ptr)
(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
// TODO(jsing): Check if we actually need MOV{B,H,W}const as most platforms
// use a single MOVDconst op.
(Const8 ...) => (MOVBconst ...)
(Const16 ...) => (MOVHconst ...)
(Const32 ...) => (MOVWconst ...)
@ -501,6 +600,79 @@
(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
// Avoid sign/zero extension for consts.
(MOVBreg (MOVBconst [c])) => (MOVDconst [int64(c)])
(MOVHreg (MOVBconst [c])) => (MOVDconst [int64(c)])
(MOVHreg (MOVHconst [c])) => (MOVDconst [int64(c)])
(MOVWreg (MOVBconst [c])) => (MOVDconst [int64(c)])
(MOVWreg (MOVHconst [c])) => (MOVDconst [int64(c)])
(MOVWreg (MOVWconst [c])) => (MOVDconst [int64(c)])
(MOVBUreg (MOVBconst [c])) => (MOVDconst [int64(uint8(c))])
(MOVHUreg (MOVBconst [c])) => (MOVDconst [int64(uint16(c))])
(MOVHUreg (MOVHconst [c])) => (MOVDconst [int64(uint16(c))])
(MOVWUreg (MOVBconst [c])) => (MOVDconst [int64(uint32(c))])
(MOVWUreg (MOVHconst [c])) => (MOVDconst [int64(uint32(c))])
(MOVWUreg (MOVWconst [c])) => (MOVDconst [int64(uint32(c))])
// Avoid sign/zero extension after properly typed load.
(MOVBreg x:(MOVBload _ _)) => (MOVDreg x)
(MOVHreg x:(MOVBload _ _)) => (MOVDreg x)
(MOVHreg x:(MOVBUload _ _)) => (MOVDreg x)
(MOVHreg x:(MOVHload _ _)) => (MOVDreg x)
(MOVWreg x:(MOVBload _ _)) => (MOVDreg x)
(MOVWreg x:(MOVBUload _ _)) => (MOVDreg x)
(MOVWreg x:(MOVHload _ _)) => (MOVDreg x)
(MOVWreg x:(MOVHUload _ _)) => (MOVDreg x)
(MOVWreg x:(MOVWload _ _)) => (MOVDreg x)
(MOVBUreg x:(MOVBUload _ _)) => (MOVDreg x)
(MOVHUreg x:(MOVBUload _ _)) => (MOVDreg x)
(MOVHUreg x:(MOVHUload _ _)) => (MOVDreg x)
(MOVWUreg x:(MOVBUload _ _)) => (MOVDreg x)
(MOVWUreg x:(MOVHUload _ _)) => (MOVDreg x)
(MOVWUreg x:(MOVWUload _ _)) => (MOVDreg x)
// Fold double extensions.
(MOVBreg x:(MOVBreg _)) => (MOVDreg x)
(MOVHreg x:(MOVBreg _)) => (MOVDreg x)
(MOVHreg x:(MOVBUreg _)) => (MOVDreg x)
(MOVHreg x:(MOVHreg _)) => (MOVDreg x)
(MOVWreg x:(MOVBreg _)) => (MOVDreg x)
(MOVWreg x:(MOVBUreg _)) => (MOVDreg x)
(MOVWreg x:(MOVHreg _)) => (MOVDreg x)
(MOVWreg x:(MOVWreg _)) => (MOVDreg x)
(MOVBUreg x:(MOVBUreg _)) => (MOVDreg x)
(MOVHUreg x:(MOVBUreg _)) => (MOVDreg x)
(MOVHUreg x:(MOVHUreg _)) => (MOVDreg x)
(MOVWUreg x:(MOVBUreg _)) => (MOVDreg x)
(MOVWUreg x:(MOVHUreg _)) => (MOVDreg x)
(MOVWUreg x:(MOVWUreg _)) => (MOVDreg x)
// Do not extend before store.
(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
// Replace extend after load with alternate load where possible.
(MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <t> [off] {sym} ptr mem)
(MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload <t> [off] {sym} ptr mem)
(MOVWreg <t> x:(MOVWUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <t> [off] {sym} ptr mem)
(MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload <t> [off] {sym} ptr mem)
(MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload <t> [off] {sym} ptr mem)
(MOVWUreg <t> x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload <t> [off] {sym} ptr mem)
// If a register move has only 1 use, just use the same register without emitting instruction
// MOVnop does not emit an instruction, only for ensuring the type.
(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
// Fold constant into immediate instructions where possible.
(ADD (MOVBconst [val]) x) => (ADDI [int64(val)] x)
(ADD (MOVHconst [val]) x) => (ADDI [int64(val)] x)

View file

@ -193,6 +193,17 @@ func init() {
{name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits
{name: "MOVDstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits
// Conversions
{name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
{name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half
{name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word
{name: "MOVDreg", argLength: 1, reg: gp11, asm: "MOV"}, // move from arg0
{name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
{name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
{name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word
{name: "MOVDnop", argLength: 1, reg: regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}, resultInArg0: true}, // nop, return arg0 in same register
// Shift ops
{name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << (aux1 & 63)
{name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> (aux1 & 63), signed
@ -229,6 +240,44 @@ func init() {
{name: "CALLclosure", argLength: 3, reg: callClosure, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: callInter, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
// duffzero
// arg0 = address of memory to zero (in X10, changed as side effect)
// arg1 = mem
// auxint = offset into duffzero code to start executing
// X1 (link register) changed because of function call
// returns mem
{
name: "DUFFZERO",
aux: "Int64",
argLength: 2,
reg: regInfo{
inputs: []regMask{regNamed["X10"]},
clobbers: regNamed["X1"] | regNamed["X10"],
},
typ: "Mem",
faultOnNilArg0: true,
},
// duffcopy
// arg0 = address of dst memory (in X11, changed as side effect)
// arg1 = address of src memory (in X10, changed as side effect)
// arg2 = mem
// auxint = offset into duffcopy code to start executing
// X1 (link register) changed because of function call
// returns mem
{
name: "DUFFCOPY",
aux: "Int64",
argLength: 3,
reg: regInfo{
inputs: []regMask{regNamed["X11"], regNamed["X10"]},
clobbers: regNamed["X1"] | regNamed["X10"] | regNamed["X11"],
},
typ: "Mem",
faultOnNilArg0: true,
faultOnNilArg1: true,
},
// Generic moves and zeros
// general unaligned zeroing

View file

@ -198,6 +198,9 @@
(RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr))
mem)
(AtomicAnd32 ...) => (LAN ...)
(AtomicOr32 ...) => (LAO ...)
// Lowering extension
// Note: we always extend to 64 bits even though some ops don't need that many result bits.
(SignExt8to(16|32|64) ...) => (MOVBreg ...)
@ -871,67 +874,67 @@
// loads/stores using PC-relative addressing directly must be aligned to the
// size of the target.
(MOVDload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) =>
(MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVWZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
(MOVWZload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVHZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
(MOVHZload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVBZload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVBZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(FMOVSload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(FMOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVWload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
(MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVHload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
(MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVDstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) =>
(MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVWstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
(MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVHstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
(MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(FMOVSstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
(FMOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
(FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
(ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
(MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLDload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
(MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
(SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
(SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
(ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
(ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
(MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
(MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
(SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
(SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
(ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
(ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
(ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
(ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
(XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
(XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
(ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
(ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
(ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
(ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
(XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
(XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
// Cannot store constant to SB directly (no 'move relative long immediate' instructions).
(MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
(MOVDstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
(MOVDstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
(MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
(MOVWstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
(MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
(MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
(MOVHstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
(MOVHstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
(MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
(MOVBstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
(MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
// MOVDaddr into MOVDaddridx
(MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
(MOVDaddridx [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
(MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB =>
(MOVDaddridx [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
(MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
// Absorb InvertFlags into branches.
(BRC {c} (InvertFlags cmp) yes no) => (BRC {c.ReverseComparison()} cmp yes no)

View file

@ -547,8 +547,10 @@ func init() {
// Atomic bitwise operations.
// Note: 'floor' operations round the pointer down to the nearest word boundary
// which reflects how they are used in the runtime.
{name: "LAOfloor", argLength: 3, reg: gpstorelab, asm: "LAO", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *(floor(arg0, 4)) |= arg1. arg2 = mem.
{name: "LAN", argLength: 3, reg: gpstore, asm: "LAN", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *arg0 &= arg1. arg2 = mem.
{name: "LANfloor", argLength: 3, reg: gpstorelab, asm: "LAN", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *(floor(arg0, 4)) &= arg1. arg2 = mem.
{name: "LAO", argLength: 3, reg: gpstore, asm: "LAO", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *arg0 |= arg1. arg2 = mem.
{name: "LAOfloor", argLength: 3, reg: gpstorelab, asm: "LAO", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *(floor(arg0, 4)) |= arg1. arg2 = mem.
// Compare and swap.
// arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.

View file

@ -9,7 +9,6 @@
(Int64Hi (Int64Make hi _)) => hi
(Int64Lo (Int64Make _ lo)) => lo
(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && t.IsSigned() =>
(Int64Make
(Load <typ.Int32> (OffPtr <typ.Int32Ptr> [4] ptr) mem)
@ -42,20 +41,21 @@
lo
(Store {hi.Type} dst hi mem))
(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() =>
// These are not enabled during decomposeBuiltin if late call expansion, but they are always enabled for softFloat
(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") =>
(Int64Make
(Arg <typ.Int32> {n} [off+4])
(Arg <typ.UInt32> {n} [off]))
(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() =>
(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") =>
(Int64Make
(Arg <typ.UInt32> {n} [off+4])
(Arg <typ.UInt32> {n} [off]))
(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() =>
(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") =>
(Int64Make
(Arg <typ.Int32> {n} [off])
(Arg <typ.UInt32> {n} [off+4]))
(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() =>
(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") =>
(Int64Make
(Arg <typ.UInt32> {n} [off])
(Arg <typ.UInt32> {n} [off+4]))
@ -143,6 +143,10 @@
(Trunc64to32 (Int64Make _ lo)) => lo
(Trunc64to16 (Int64Make _ lo)) => (Trunc32to16 lo)
(Trunc64to8 (Int64Make _ lo)) => (Trunc32to8 lo)
// Most general
(Trunc64to32 x) => (Int64Lo x)
(Trunc64to16 x) => (Trunc32to16 (Int64Lo x))
(Trunc64to8 x) => (Trunc32to8 (Int64Lo x))
(Lsh32x64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
(Rsh32x64 x (Int64Make (Const32 [c]) _)) && c != 0 => (Signmask x)
@ -199,132 +203,150 @@
(Rsh8Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
(Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
// Most general
(Lsh64x64 x y) => (Lsh64x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
(Rsh64x64 x y) => (Rsh64x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
(Rsh64Ux64 x y) => (Rsh64Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
(Lsh32x64 x y) => (Lsh32x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
(Rsh32x64 x y) => (Rsh32x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
(Rsh32Ux64 x y) => (Rsh32Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
(Lsh16x64 x y) => (Lsh16x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
(Rsh16x64 x y) => (Rsh16x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
(Rsh16Ux64 x y) => (Rsh16Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
(Lsh8x64 x y) => (Lsh8x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
(Rsh8x64 x y) => (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
(Rsh8Ux64 x y) => (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
// Clean up constants a little
(Or32 <typ.UInt32> (Zeromask (Const32 [c])) y) && c == 0 => y
(Or32 <typ.UInt32> (Zeromask (Const32 [c])) y) && c != 0 => (Const32 <typ.UInt32> [-1])
// 64x left shift
// result.hi = hi<<s | lo>>(32-s) | lo<<(s-32) // >> is unsigned, large shifts result 0
// result.lo = lo<<s
(Lsh64x32 (Int64Make hi lo) s) =>
(Lsh64x32 x s) =>
(Int64Make
(Or32 <typ.UInt32>
(Or32 <typ.UInt32>
(Lsh32x32 <typ.UInt32> hi s)
(Lsh32x32 <typ.UInt32> (Int64Hi x) s)
(Rsh32Ux32 <typ.UInt32>
lo
(Int64Lo x)
(Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s)))
(Lsh32x32 <typ.UInt32>
lo
(Int64Lo x)
(Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32]))))
(Lsh32x32 <typ.UInt32> lo s))
(Lsh64x16 (Int64Make hi lo) s) =>
(Lsh32x32 <typ.UInt32> (Int64Lo x) s))
(Lsh64x16 x s) =>
(Int64Make
(Or32 <typ.UInt32>
(Or32 <typ.UInt32>
(Lsh32x16 <typ.UInt32> hi s)
(Lsh32x16 <typ.UInt32> (Int64Hi x) s)
(Rsh32Ux16 <typ.UInt32>
lo
(Int64Lo x)
(Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s)))
(Lsh32x16 <typ.UInt32>
lo
(Int64Lo x)
(Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32]))))
(Lsh32x16 <typ.UInt32> lo s))
(Lsh64x8 (Int64Make hi lo) s) =>
(Lsh32x16 <typ.UInt32> (Int64Lo x) s))
(Lsh64x8 x s) =>
(Int64Make
(Or32 <typ.UInt32>
(Or32 <typ.UInt32>
(Lsh32x8 <typ.UInt32> hi s)
(Lsh32x8 <typ.UInt32> (Int64Hi x) s)
(Rsh32Ux8 <typ.UInt32>
lo
(Int64Lo x)
(Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s)))
(Lsh32x8 <typ.UInt32>
lo
(Int64Lo x)
(Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32]))))
(Lsh32x8 <typ.UInt32> lo s))
(Lsh32x8 <typ.UInt32> (Int64Lo x) s))
// 64x unsigned right shift
// result.hi = hi>>s
// result.lo = lo>>s | hi<<(32-s) | hi>>(s-32) // >> is unsigned, large shifts result 0
(Rsh64Ux32 (Int64Make hi lo) s) =>
(Rsh64Ux32 x s) =>
(Int64Make
(Rsh32Ux32 <typ.UInt32> hi s)
(Rsh32Ux32 <typ.UInt32> (Int64Hi x) s)
(Or32 <typ.UInt32>
(Or32 <typ.UInt32>
(Rsh32Ux32 <typ.UInt32> lo s)
(Rsh32Ux32 <typ.UInt32> (Int64Lo x) s)
(Lsh32x32 <typ.UInt32>
hi
(Int64Hi x)
(Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s)))
(Rsh32Ux32 <typ.UInt32>
hi
(Int64Hi x)
(Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))))
(Rsh64Ux16 (Int64Make hi lo) s) =>
(Rsh64Ux16 x s) =>
(Int64Make
(Rsh32Ux16 <typ.UInt32> hi s)
(Rsh32Ux16 <typ.UInt32> (Int64Hi x) s)
(Or32 <typ.UInt32>
(Or32 <typ.UInt32>
(Rsh32Ux16 <typ.UInt32> lo s)
(Rsh32Ux16 <typ.UInt32> (Int64Lo x) s)
(Lsh32x16 <typ.UInt32>
hi
(Int64Hi x)
(Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s)))
(Rsh32Ux16 <typ.UInt32>
hi
(Int64Hi x)
(Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))))
(Rsh64Ux8 (Int64Make hi lo) s) =>
(Rsh64Ux8 x s) =>
(Int64Make
(Rsh32Ux8 <typ.UInt32> hi s)
(Rsh32Ux8 <typ.UInt32> (Int64Hi x) s)
(Or32 <typ.UInt32>
(Or32 <typ.UInt32>
(Rsh32Ux8 <typ.UInt32> lo s)
(Rsh32Ux8 <typ.UInt32> (Int64Lo x) s)
(Lsh32x8 <typ.UInt32>
hi
(Int64Hi x)
(Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s)))
(Rsh32Ux8 <typ.UInt32>
hi
(Int64Hi x)
(Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))))
// 64x signed right shift
// result.hi = hi>>s
// result.lo = lo>>s | hi<<(32-s) | (hi>>(s-32))&zeromask(s>>5) // hi>>(s-32) is signed, large shifts result 0/-1
(Rsh64x32 (Int64Make hi lo) s) =>
(Rsh64x32 x s) =>
(Int64Make
(Rsh32x32 <typ.UInt32> hi s)
(Rsh32x32 <typ.UInt32> (Int64Hi x) s)
(Or32 <typ.UInt32>
(Or32 <typ.UInt32>
(Rsh32Ux32 <typ.UInt32> lo s)
(Rsh32Ux32 <typ.UInt32> (Int64Lo x) s)
(Lsh32x32 <typ.UInt32>
hi
(Int64Hi x)
(Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s)))
(And32 <typ.UInt32>
(Rsh32x32 <typ.UInt32>
hi
(Int64Hi x)
(Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))
(Zeromask
(Rsh32Ux32 <typ.UInt32> s (Const32 <typ.UInt32> [5]))))))
(Rsh64x16 (Int64Make hi lo) s) =>
(Rsh64x16 x s) =>
(Int64Make
(Rsh32x16 <typ.UInt32> hi s)
(Rsh32x16 <typ.UInt32> (Int64Hi x) s)
(Or32 <typ.UInt32>
(Or32 <typ.UInt32>
(Rsh32Ux16 <typ.UInt32> lo s)
(Rsh32Ux16 <typ.UInt32> (Int64Lo x) s)
(Lsh32x16 <typ.UInt32>
hi
(Int64Hi x)
(Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s)))
(And32 <typ.UInt32>
(Rsh32x16 <typ.UInt32>
hi
(Int64Hi x)
(Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))
(Zeromask
(ZeroExt16to32
(Rsh16Ux32 <typ.UInt16> s (Const32 <typ.UInt32> [5])))))))
(Rsh64x8 (Int64Make hi lo) s) =>
(Rsh64x8 x s) =>
(Int64Make
(Rsh32x8 <typ.UInt32> hi s)
(Rsh32x8 <typ.UInt32> (Int64Hi x) s)
(Or32 <typ.UInt32>
(Or32 <typ.UInt32>
(Rsh32Ux8 <typ.UInt32> lo s)
(Rsh32Ux8 <typ.UInt32> (Int64Lo x) s)
(Lsh32x8 <typ.UInt32>
hi
(Int64Hi x)
(Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s)))
(And32 <typ.UInt32>
(Rsh32x8 <typ.UInt32>
hi
(Int64Hi x)
(Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))
(Zeromask
(ZeroExt8to32

View file

@ -1040,6 +1040,46 @@
(ZeroExt32to64 x)))
(Const64 <typ.UInt64> [32+umagic32(c).s-1])))
// For unsigned 64-bit divides on 32-bit machines,
// if the constant fits in 16 bits (so that the last term
// fits in 32 bits), convert to three 32-bit divides by a constant.
//
// If 1<<32 = Q * c + R
// and x = hi << 32 + lo
//
// Then x = (hi/c*c + hi%c) << 32 + lo
// = hi/c*c<<32 + hi%c<<32 + lo
// = hi/c*c<<32 + (hi%c)*(Q*c+R) + lo/c*c + lo%c
// = hi/c*c<<32 + (hi%c)*Q*c + lo/c*c + (hi%c*R+lo%c)
// and x / c = (hi/c)<<32 + (hi%c)*Q + lo/c + (hi%c*R+lo%c)/c
(Div64u x (Const64 [c])) && c > 0 && c <= 0xFFFF && umagicOK32(int32(c)) && config.RegSize == 4 && config.useHmul =>
(Add64
(Add64 <typ.UInt64>
(Add64 <typ.UInt64>
(Lsh64x64 <typ.UInt64>
(ZeroExt32to64
(Div32u <typ.UInt32>
(Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32])))
(Const32 <typ.UInt32> [int32(c)])))
(Const64 <typ.UInt64> [32]))
(ZeroExt32to64 (Div32u <typ.UInt32> (Trunc64to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(c)]))))
(Mul64 <typ.UInt64>
(ZeroExt32to64 <typ.UInt64>
(Mod32u <typ.UInt32>
(Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32])))
(Const32 <typ.UInt32> [int32(c)])))
(Const64 <typ.UInt64> [int64((1<<32)/c)])))
(ZeroExt32to64
(Div32u <typ.UInt32>
(Add32 <typ.UInt32>
(Mod32u <typ.UInt32> (Trunc64to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(c)]))
(Mul32 <typ.UInt32>
(Mod32u <typ.UInt32>
(Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32])))
(Const32 <typ.UInt32> [int32(c)]))
(Const32 <typ.UInt32> [int32((1<<32)%c)])))
(Const32 <typ.UInt32> [int32(c)]))))
// For 64-bit divides on 64-bit machines
// (64-bit divides on 32-bit machines are lowered to a runtime call by the walk pass.)
(Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul =>
@ -1961,7 +2001,12 @@
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
=> (Invalid)
// for late-expanded calls
// for rewriting results of some late-expanded rewrites (below)
(SelectN [0] (MakeResult a ___)) => a
(SelectN [1] (MakeResult a b ___)) => b
(SelectN [2] (MakeResult a b c ___)) => c
// for late-expanded calls, recognize newobject and remove zeroing and nilchecks
(Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call))
&& isSameCall(call.Aux, "runtime.newobject")
=> mem
@ -1986,6 +2031,13 @@
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
=> (Invalid)
// for late-expanded calls, recognize memequal applied to a single constant byte
// TODO figure out breakeven number of bytes for this optimization.
(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [1]) mem)
&& isSameCall(callAux, "runtime.memequal")
&& symIsRO(scon)
=> (MakeResult (Eq8 (Load <typ.Int8> sptr mem) (Const8 <typ.Int8> [int8(read8(scon,0))])) mem)
// Evaluate constant address comparisons.
(EqPtr x x) => (ConstBool [true])
(NeqPtr x x) => (ConstBool [false])

View file

@ -538,8 +538,9 @@ var genericOps = []opData{
// pseudo-ops for breaking Tuple
{name: "Select0", argLength: 1, zeroWidth: true}, // the first component of a tuple
{name: "Select1", argLength: 1, zeroWidth: true}, // the second component of a tuple
{name: "SelectN", argLength: 1, aux: "Int64"}, // arg0=tuple, auxint=field index. Returns the auxint'th member.
{name: "SelectNAddr", argLength: 1, aux: "Int64"}, // arg0=tuple, auxint=field index. Returns the address of auxint'th member. Used for un-SSA-able result types.
{name: "SelectN", argLength: 1, aux: "Int64"}, // arg0=result, auxint=field index. Returns the auxint'th member.
{name: "SelectNAddr", argLength: 1, aux: "Int64"}, // arg0=result, auxint=field index. Returns the address of auxint'th member. Used for un-SSA-able result types.
{name: "MakeResult", argLength: -1}, // arg0 .. are components of a "Result" (like the result from a Call). The last arg should be memory (like the result from a call).
// Atomic operations used for semantically inlining sync/atomic and
// runtime/internal/atomic. Atomic loads return a new memory so that
@ -565,7 +566,9 @@ var genericOps = []opData{
{name: "AtomicCompareAndSwap64", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
{name: "AtomicCompareAndSwapRel32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Lock release, reports whether store happens and new memory.
{name: "AtomicAnd8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
{name: "AtomicAnd32", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
{name: "AtomicOr8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
{name: "AtomicOr32", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
// Atomic operation variants
// These variants have the same semantics as above atomic operations.

View file

@ -35,8 +35,7 @@ import (
)
// rule syntax:
// sexpr [&& extra conditions] -> [@block] sexpr (untyped)
// sexpr [&& extra conditions] => [@block] sexpr (typed)
// sexpr [&& extra conditions] => [@block] sexpr
//
// sexpr are s-expressions (lisp-like parenthesized groupings)
// sexpr ::= [variable:](opcode sexpr*)
@ -79,14 +78,8 @@ func normalizeSpaces(s string) string {
}
// parse returns the matching part of the rule, additional conditions, and the result.
// parse also reports whether the generated code should use strongly typed aux and auxint fields.
func (r Rule) parse() (match, cond, result string, typed bool) {
arrow := "->"
if strings.Contains(r.Rule, "=>") {
arrow = "=>"
typed = true
}
s := strings.Split(r.Rule, arrow)
func (r Rule) parse() (match, cond, result string) {
s := strings.Split(r.Rule, "=>")
match = normalizeSpaces(s[0])
result = normalizeSpaces(s[1])
cond = ""
@ -94,7 +87,7 @@ func (r Rule) parse() (match, cond, result string, typed bool) {
cond = normalizeSpaces(match[i+2:])
match = normalizeSpaces(match[:i])
}
return match, cond, result, typed
return match, cond, result
}
func genRules(arch arch) { genRulesSuffix(arch, "") }
@ -120,7 +113,7 @@ func genRulesSuffix(arch arch, suff string) {
scanner := bufio.NewScanner(text)
rule := ""
var lineno int
var ruleLineno int // line number of "->" or "=>"
var ruleLineno int // line number of "=>"
for scanner.Scan() {
lineno++
line := scanner.Text()
@ -134,13 +127,13 @@ func genRulesSuffix(arch arch, suff string) {
if rule == "" {
continue
}
if !strings.Contains(rule, "->") && !strings.Contains(rule, "=>") {
if !strings.Contains(rule, "=>") {
continue
}
if ruleLineno == 0 {
ruleLineno = lineno
}
if strings.HasSuffix(rule, "->") || strings.HasSuffix(rule, "=>") {
if strings.HasSuffix(rule, "=>") {
continue // continue on the next line
}
if n := balance(rule); n > 0 {
@ -157,7 +150,7 @@ func genRulesSuffix(arch arch, suff string) {
continue
}
// Do fancier value op matching.
match, _, _, _ := r.parse()
match, _, _ := r.parse()
op, oparch, _, _, _, _ := parseValue(match, arch, loc)
opname := fmt.Sprintf("Op%s%s", oparch, op.name)
oprules[opname] = append(oprules[opname], r)
@ -231,7 +224,7 @@ func genRulesSuffix(arch arch, suff string) {
log.Fatalf("unconditional rule %s is followed by other rules", rr.Match)
}
rr = &RuleRewrite{Loc: rule.Loc}
rr.Match, rr.Cond, rr.Result, rr.Typed = rule.parse()
rr.Match, rr.Cond, rr.Result = rule.parse()
pos, _ := genMatch(rr, arch, rr.Match, fn.ArgLen >= 0)
if pos == "" {
pos = "v.Pos"
@ -790,7 +783,6 @@ type (
Alloc int // for unique var names
Loc string // file name & line number of the original rule
CommuteDepth int // used to track depth of commute loops
Typed bool // aux and auxint fields should be strongly typed
}
Declare struct {
Name string
@ -844,7 +836,7 @@ func breakf(format string, a ...interface{}) *CondBreak {
func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
rr := &RuleRewrite{Loc: rule.Loc}
rr.Match, rr.Cond, rr.Result, rr.Typed = rule.parse()
rr.Match, rr.Cond, rr.Result = rule.parse()
_, _, auxint, aux, s := extract(rr.Match) // remove parens, then split
// check match of control values
@ -888,15 +880,6 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
if e.name == "" {
continue
}
if !rr.Typed {
if !token.IsIdentifier(e.name) || rr.declared(e.name) {
// code or variable
rr.add(breakf("b.%s != %s", e.field, e.name))
} else {
rr.add(declf(e.name, "b.%s", e.field))
}
continue
}
if e.dclType == "" {
log.Fatalf("op %s has no declared type for %s", data.name, e.field)
@ -965,20 +948,12 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
}
if auxint != "" {
if rr.Typed {
// Make sure auxint value has the right type.
rr.add(stmtf("b.AuxInt = %sToAuxInt(%s)", unTitle(outdata.auxIntType()), auxint))
} else {
rr.add(stmtf("b.AuxInt = %s", auxint))
}
}
if aux != "" {
if rr.Typed {
// Make sure aux value has the right type.
rr.add(stmtf("b.Aux = %sToAux(%s)", unTitle(outdata.auxType()), aux))
} else {
rr.add(stmtf("b.Aux = %s", aux))
}
}
succChanged := false
@ -1046,15 +1021,6 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int,
if e.name == "" {
continue
}
if !rr.Typed {
if !token.IsIdentifier(e.name) || rr.declared(e.name) {
// code or variable
rr.add(breakf("%s.%s != %s", v, e.field, e.name))
} else {
rr.add(declf(e.name, "%s.%s", v, e.field))
}
continue
}
if e.dclType == "" {
log.Fatalf("op %s has no declared type for %s", op.name, e.field)
@ -1244,20 +1210,12 @@ func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos s
}
if auxint != "" {
if rr.Typed {
// Make sure auxint value has the right type.
rr.add(stmtf("%s.AuxInt = %sToAuxInt(%s)", v, unTitle(op.auxIntType()), auxint))
} else {
rr.add(stmtf("%s.AuxInt = %s", v, auxint))
}
}
if aux != "" {
if rr.Typed {
// Make sure aux value has the right type.
rr.add(stmtf("%s.Aux = %sToAux(%s)", v, unTitle(op.auxType()), aux))
} else {
rr.add(stmtf("%s.Aux = %s", v, aux))
}
}
all := new(strings.Builder)
for i, arg := range args {
@ -1538,7 +1496,7 @@ func excludeFromExpansion(s string, idx []int) bool {
return true
}
right := s[idx[1]:]
if strings.Contains(left, "&&") && (strings.Contains(right, "->") || strings.Contains(right, "=>")) {
if strings.Contains(left, "&&") && strings.Contains(right, "=>") {
// Inside && conditions.
return true
}
@ -1640,7 +1598,6 @@ func normalizeWhitespace(x string) string {
x = strings.Replace(x, " )", ")", -1)
x = strings.Replace(x, "[ ", "[", -1)
x = strings.Replace(x, " ]", "]", -1)
x = strings.Replace(x, ")->", ") ->", -1)
x = strings.Replace(x, ")=>", ") =>", -1)
return x
}
@ -1697,7 +1654,7 @@ func parseEllipsisRules(rules []Rule, arch arch) (newop string, ok bool) {
return "", false
}
rule := rules[0]
match, cond, result, _ := rule.parse()
match, cond, result := rule.parse()
if cond != "" || !isEllipsisValue(match) || !isEllipsisValue(result) {
if strings.Contains(rule.Rule, "...") {
log.Fatalf("%s: found ellipsis in non-ellipsis rule", rule.Loc)
@ -1722,7 +1679,7 @@ func isEllipsisValue(s string) bool {
}
func checkEllipsisRuleCandidate(rule Rule, arch arch) {
match, cond, result, _ := rule.parse()
match, cond, result := rule.parse()
if cond != "" {
return
}
@ -1732,7 +1689,7 @@ func checkEllipsisRuleCandidate(rule Rule, arch arch) {
var usingCopy string
var eop opData
if result[0] != '(' {
// Check for (Foo x) -> x, which can be converted to (Foo ...) -> (Copy ...).
// Check for (Foo x) => x, which can be converted to (Foo ...) => (Copy ...).
args2 = []string{result}
usingCopy = " using Copy"
} else {

View file

@ -113,7 +113,7 @@ func (lca *lcaRange) find(a, b *Block) *Block {
// on the tour from p1 to p2. We've precomputed minimum
// depth blocks for powers-of-two subsequences of the tour.
// Combine the right two precomputed values to get the answer.
logS := uint(log2(int64(p2 - p1)))
logS := uint(log64(int64(p2 - p1)))
bid1 := lca.rangeMin[logS][p1]
bid2 := lca.rangeMin[logS][p2-1<<logS+1]
if lca.blocks[bid1].depth < lca.blocks[bid2].depth {

View file

@ -266,9 +266,6 @@ func (x ValAndOff) Val8() int8 { return int8(int64(x) >> 32) }
func (x ValAndOff) Off() int64 { return int64(int32(x)) }
func (x ValAndOff) Off32() int32 { return int32(x) }
func (x ValAndOff) Int64() int64 {
return int64(x)
}
func (x ValAndOff) String() string {
return fmt.Sprintf("val=%d,off=%d", x.Val(), x.Off())
}
@ -297,17 +294,9 @@ func validValAndOff(val, off int64) bool {
return true
}
// makeValAndOff encodes a ValAndOff into an int64 suitable for storing in an AuxInt field.
func makeValAndOff(val, off int64) int64 {
if !validValAndOff(val, off) {
panic("invalid makeValAndOff")
}
return ValAndOff(val<<32 + int64(uint32(off))).Int64()
}
func makeValAndOff32(val, off int32) ValAndOff {
return ValAndOff(int64(val)<<32 + int64(uint32(off)))
}
func makeValAndOff64(val, off int64) ValAndOff {
if !validValAndOff(val, off) {
panic("invalid makeValAndOff64")
@ -315,35 +304,26 @@ func makeValAndOff64(val, off int64) ValAndOff {
return ValAndOff(val<<32 + int64(uint32(off)))
}
func (x ValAndOff) canAdd(off int64) bool {
newoff := x.Off() + off
return newoff == int64(int32(newoff))
}
func (x ValAndOff) canAdd32(off int32) bool {
newoff := x.Off() + int64(off)
return newoff == int64(int32(newoff))
}
func (x ValAndOff) add(off int64) int64 {
if !x.canAdd(off) {
panic("invalid ValAndOff.add")
}
return makeValAndOff(x.Val(), x.Off()+off)
func (x ValAndOff) canAdd64(off int64) bool {
newoff := x.Off() + off
return newoff == int64(int32(newoff))
}
func (x ValAndOff) addOffset32(off int32) ValAndOff {
if !x.canAdd32(off) {
panic("invalid ValAndOff.add")
panic("invalid ValAndOff.addOffset32")
}
return ValAndOff(makeValAndOff(x.Val(), x.Off()+int64(off)))
return makeValAndOff64(x.Val(), x.Off()+int64(off))
}
func (x ValAndOff) addOffset64(off int64) ValAndOff {
if !x.canAdd(off) {
panic("invalid ValAndOff.add")
if !x.canAdd64(off) {
panic("invalid ValAndOff.addOffset64")
}
return ValAndOff(makeValAndOff(x.Val(), x.Off()+off))
return makeValAndOff64(x.Val(), x.Off()+off)
}
// int128 is a type that stores a 128-bit constant.

View file

@ -1034,7 +1034,9 @@ const (
OpAMD64CMPXCHGLlock
OpAMD64CMPXCHGQlock
OpAMD64ANDBlock
OpAMD64ANDLlock
OpAMD64ORBlock
OpAMD64ORLlock
OpARMADD
OpARMADDconst
@ -1586,7 +1588,9 @@ const (
OpARM64LoweredAtomicCas64
OpARM64LoweredAtomicCas32
OpARM64LoweredAtomicAnd8
OpARM64LoweredAtomicAnd32
OpARM64LoweredAtomicOr8
OpARM64LoweredAtomicOr32
OpARM64LoweredWB
OpARM64LoweredPanicBoundsA
OpARM64LoweredPanicBoundsB
@ -1867,6 +1871,9 @@ const (
OpPPC64ROTLconst
OpPPC64ROTLWconst
OpPPC64EXTSWSLconst
OpPPC64RLWINM
OpPPC64RLWNM
OpPPC64RLWMI
OpPPC64CNTLZD
OpPPC64CNTLZW
OpPPC64CNTTZD
@ -2022,7 +2029,9 @@ const (
OpPPC64LoweredAtomicCas64
OpPPC64LoweredAtomicCas32
OpPPC64LoweredAtomicAnd8
OpPPC64LoweredAtomicAnd32
OpPPC64LoweredAtomicOr8
OpPPC64LoweredAtomicOr32
OpPPC64LoweredWB
OpPPC64LoweredPanicBoundsA
OpPPC64LoweredPanicBoundsB
@ -2071,6 +2080,14 @@ const (
OpRISCV64MOVHstorezero
OpRISCV64MOVWstorezero
OpRISCV64MOVDstorezero
OpRISCV64MOVBreg
OpRISCV64MOVHreg
OpRISCV64MOVWreg
OpRISCV64MOVDreg
OpRISCV64MOVBUreg
OpRISCV64MOVHUreg
OpRISCV64MOVWUreg
OpRISCV64MOVDnop
OpRISCV64SLL
OpRISCV64SRA
OpRISCV64SRL
@ -2094,6 +2111,8 @@ const (
OpRISCV64CALLstatic
OpRISCV64CALLclosure
OpRISCV64CALLinter
OpRISCV64DUFFZERO
OpRISCV64DUFFCOPY
OpRISCV64LoweredZero
OpRISCV64LoweredMove
OpRISCV64LoweredAtomicLoad8
@ -2368,8 +2387,10 @@ const (
OpS390XLAAG
OpS390XAddTupleFirst32
OpS390XAddTupleFirst64
OpS390XLAOfloor
OpS390XLAN
OpS390XLANfloor
OpS390XLAO
OpS390XLAOfloor
OpS390XLoweredAtomicCas32
OpS390XLoweredAtomicCas64
OpS390XLoweredAtomicExchange32
@ -2834,6 +2855,7 @@ const (
OpSelect1
OpSelectN
OpSelectNAddr
OpMakeResult
OpAtomicLoad8
OpAtomicLoad32
OpAtomicLoad64
@ -2854,7 +2876,9 @@ const (
OpAtomicCompareAndSwap64
OpAtomicCompareAndSwapRel32
OpAtomicAnd8
OpAtomicAnd32
OpAtomicOr8
OpAtomicOr32
OpAtomicAdd32Variant
OpAtomicAdd64Variant
OpClobber
@ -13575,6 +13599,22 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "ANDLlock",
auxType: auxSymOff,
argLen: 3,
clobberFlags: true,
faultOnNilArg0: true,
hasSideEffects: true,
symEffect: SymRdWr,
asm: x86.AANDL,
reg: regInfo{
inputs: []inputInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
},
},
{
name: "ORBlock",
auxType: auxSymOff,
@ -13591,6 +13631,22 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "ORLlock",
auxType: auxSymOff,
argLen: 3,
clobberFlags: true,
faultOnNilArg0: true,
hasSideEffects: true,
symEffect: SymRdWr,
asm: x86.AORL,
reg: regInfo{
inputs: []inputInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
},
},
{
name: "ADD",
@ -21060,6 +21116,24 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "LoweredAtomicAnd32",
argLen: 3,
resultNotInArgs: true,
faultOnNilArg0: true,
hasSideEffects: true,
unsafePoint: true,
asm: arm64.AAND,
reg: regInfo{
inputs: []inputInfo{
{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
},
outputs: []outputInfo{
{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
},
},
},
{
name: "LoweredAtomicOr8",
argLen: 3,
@ -21078,6 +21152,24 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "LoweredAtomicOr32",
argLen: 3,
resultNotInArgs: true,
faultOnNilArg0: true,
hasSideEffects: true,
unsafePoint: true,
asm: arm64.AORR,
reg: regInfo{
inputs: []inputInfo{
{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
},
outputs: []outputInfo{
{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
},
},
},
{
name: "LoweredWB",
auxType: auxSym,
@ -24885,6 +24977,51 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "RLWINM",
auxType: auxInt64,
argLen: 1,
asm: ppc64.ARLWNM,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
outputs: []outputInfo{
{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "RLWNM",
auxType: auxInt64,
argLen: 2,
asm: ppc64.ARLWNM,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
outputs: []outputInfo{
{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "RLWMI",
auxType: auxInt64,
argLen: 2,
resultInArg0: true,
asm: ppc64.ARLWMI,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
outputs: []outputInfo{
{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "CNTLZD",
argLen: 1,
@ -26948,6 +27085,19 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "LoweredAtomicAnd32",
argLen: 3,
faultOnNilArg0: true,
hasSideEffects: true,
asm: ppc64.AAND,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "LoweredAtomicOr8",
argLen: 3,
@ -26961,6 +27111,19 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "LoweredAtomicOr32",
argLen: 3,
faultOnNilArg0: true,
hasSideEffects: true,
asm: ppc64.AOR,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "LoweredWB",
auxType: auxSym,
@ -27585,6 +27748,110 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "MOVBreg",
argLen: 1,
asm: riscv.AMOVB,
reg: regInfo{
inputs: []inputInfo{
{0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
{0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
{
name: "MOVHreg",
argLen: 1,
asm: riscv.AMOVH,
reg: regInfo{
inputs: []inputInfo{
{0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
{0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
{
name: "MOVWreg",
argLen: 1,
asm: riscv.AMOVW,
reg: regInfo{
inputs: []inputInfo{
{0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
{0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
{
name: "MOVDreg",
argLen: 1,
asm: riscv.AMOV,
reg: regInfo{
inputs: []inputInfo{
{0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
{0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
{
name: "MOVBUreg",
argLen: 1,
asm: riscv.AMOVBU,
reg: regInfo{
inputs: []inputInfo{
{0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
{0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
{
name: "MOVHUreg",
argLen: 1,
asm: riscv.AMOVHU,
reg: regInfo{
inputs: []inputInfo{
{0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
{0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
{
name: "MOVWUreg",
argLen: 1,
asm: riscv.AMOVWU,
reg: regInfo{
inputs: []inputInfo{
{0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
{0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
{
name: "MOVDnop",
argLen: 1,
resultInArg0: true,
reg: regInfo{
inputs: []inputInfo{
{0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
{0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
{
name: "SLL",
argLen: 2,
@ -27898,6 +28165,32 @@ var opcodeTable = [...]opInfo{
clobbers: 9223372035781033972, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
},
{
name: "DUFFZERO",
auxType: auxInt64,
argLen: 2,
faultOnNilArg0: true,
reg: regInfo{
inputs: []inputInfo{
{0, 512}, // X10
},
clobbers: 512, // X10
},
},
{
name: "DUFFCOPY",
auxType: auxInt64,
argLen: 3,
faultOnNilArg0: true,
faultOnNilArg1: true,
reg: regInfo{
inputs: []inputInfo{
{0, 1024}, // X11
{1, 512}, // X10
},
clobbers: 1536, // X10 X11
},
},
{
name: "LoweredZero",
auxType: auxInt64,
@ -31803,11 +32096,24 @@ var opcodeTable = [...]opInfo{
reg: regInfo{},
},
{
name: "LAOfloor",
name: "LAN",
argLen: 3,
clobberFlags: true,
hasSideEffects: true,
asm: s390x.ALAO,
asm: s390x.ALAN,
reg: regInfo{
inputs: []inputInfo{
{0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
{1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
},
},
},
{
name: "LANfloor",
argLen: 3,
clobberFlags: true,
hasSideEffects: true,
asm: s390x.ALAN,
reg: regInfo{
inputs: []inputInfo{
{0, 2}, // R1
@ -31817,11 +32123,24 @@ var opcodeTable = [...]opInfo{
},
},
{
name: "LANfloor",
name: "LAO",
argLen: 3,
clobberFlags: true,
hasSideEffects: true,
asm: s390x.ALAN,
asm: s390x.ALAO,
reg: regInfo{
inputs: []inputInfo{
{0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
{1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
},
},
},
{
name: "LAOfloor",
argLen: 3,
clobberFlags: true,
hasSideEffects: true,
asm: s390x.ALAO,
reg: regInfo{
inputs: []inputInfo{
{0, 2}, // R1
@ -35406,6 +35725,11 @@ var opcodeTable = [...]opInfo{
argLen: 1,
generic: true,
},
{
name: "MakeResult",
argLen: -1,
generic: true,
},
{
name: "AtomicLoad8",
argLen: 2,
@ -35520,12 +35844,24 @@ var opcodeTable = [...]opInfo{
hasSideEffects: true,
generic: true,
},
{
name: "AtomicAnd32",
argLen: 3,
hasSideEffects: true,
generic: true,
},
{
name: "AtomicOr8",
argLen: 3,
hasSideEffects: true,
generic: true,
},
{
name: "AtomicOr32",
argLen: 3,
hasSideEffects: true,
generic: true,
},
{
name: "AtomicAdd32Variant",
argLen: 3,

View file

@ -212,21 +212,7 @@ func isSigned(t *types.Type) bool {
// mergeSym merges two symbolic offsets. There is no real merging of
// offsets, we just pick the non-nil one.
func mergeSym(x, y interface{}) interface{} {
if x == nil {
return y
}
if y == nil {
return x
}
panic(fmt.Sprintf("mergeSym with two non-nil syms %s %s", x, y))
}
func canMergeSym(x, y interface{}) bool {
return x == nil || y == nil
}
func mergeSymTyped(x, y Sym) Sym {
func mergeSym(x, y Sym) Sym {
if x == nil {
return y
}
@ -236,6 +222,10 @@ func mergeSymTyped(x, y Sym) Sym {
panic(fmt.Sprintf("mergeSym with two non-nil syms %v %v", x, y))
}
func canMergeSym(x, y Sym) bool {
return x == nil || y == nil
}
// canMergeLoadClobber reports whether the load can be merged into target without
// invalidating the schedule.
// It also checks that the other non-load argument x is something we
@ -422,12 +412,6 @@ func nto(x int64) int64 {
return int64(ntz64(^x))
}
// log2 returns logarithm in base 2 of uint64(n), with log2(0) = -1.
// Rounds down.
func log2(n int64) int64 {
return int64(bits.Len64(uint64(n))) - 1
}
// logX returns logarithm of n base 2.
// n must be a positive power of 2 (isPowerOfTwoX returns true).
func log8(n int8) int64 {
@ -449,10 +433,7 @@ func log2uint32(n int64) int64 {
return int64(bits.Len32(uint32(n))) - 1
}
// isPowerOfTwo reports whether n is a power of 2.
func isPowerOfTwo(n int64) bool {
return n > 0 && n&(n-1) == 0
}
// isPowerOfTwo functions report whether n is a power of 2.
func isPowerOfTwo8(n int8) bool {
return n > 0 && n&(n-1) == 0
}
@ -1381,6 +1362,71 @@ func GetPPC64Shiftme(auxint int64) int64 {
return int64(int8(auxint))
}
// Test if this value can encoded as a mask for a rlwinm like
// operation. Masks can also extend from the msb and wrap to
// the lsb too. That is, the valid masks are 32 bit strings
// of the form: 0..01..10..0 or 1..10..01..1 or 1...1
func isPPC64WordRotateMask(v64 int64) bool {
// Isolate rightmost 1 (if none 0) and add.
v := uint32(v64)
vp := (v & -v) + v
// Likewise, for the wrapping case.
vn := ^v
vpn := (vn & -vn) + vn
return (v&vp == 0 || vn&vpn == 0) && v != 0
}
// Compress mask and and shift into single value of the form
// me | mb<<8 | rotate<<16 | nbits<<24 where me and mb can
// be used to regenerate the input mask.
func encodePPC64RotateMask(rotate, mask, nbits int64) int64 {
var mb, me, mbn, men int
// Determine boundaries and then decode them
if mask == 0 || ^mask == 0 || rotate >= nbits {
panic("Invalid PPC64 rotate mask")
} else if nbits == 32 {
mb = bits.LeadingZeros32(uint32(mask))
me = 32 - bits.TrailingZeros32(uint32(mask))
mbn = bits.LeadingZeros32(^uint32(mask))
men = 32 - bits.TrailingZeros32(^uint32(mask))
} else {
mb = bits.LeadingZeros64(uint64(mask))
me = 64 - bits.TrailingZeros64(uint64(mask))
mbn = bits.LeadingZeros64(^uint64(mask))
men = 64 - bits.TrailingZeros64(^uint64(mask))
}
// Check for a wrapping mask (e.g bits at 0 and 63)
if mb == 0 && me == int(nbits) {
// swap the inverted values
mb, me = men, mbn
}
return int64(me) | int64(mb<<8) | int64(rotate<<16) | int64(nbits<<24)
}
// The inverse operation of encodePPC64RotateMask. The values returned as
// mb and me satisfy the POWER ISA definition of MASK(x,y) where MASK(mb,me) = mask.
func DecodePPC64RotateMask(sauxint int64) (rotate, mb, me int64, mask uint64) {
auxint := uint64(sauxint)
rotate = int64((auxint >> 16) & 0xFF)
mb = int64((auxint >> 8) & 0xFF)
me = int64((auxint >> 0) & 0xFF)
nbits := int64((auxint >> 24) & 0xFF)
mask = ((1 << uint(nbits-mb)) - 1) ^ ((1 << uint(nbits-me)) - 1)
if mb > me {
mask = ^mask
}
if nbits == 32 {
mask = uint64(uint32(mask))
}
// Fixup ME to match ISA definition. The second argument to MASK(..,me)
// is inclusive.
me = (me - 1) & (nbits - 1)
return
}
// This verifies that the mask occupies the
// rightmost bits.
func isPPC64ValidShiftMask(v int64) bool {
@ -1394,6 +1440,78 @@ func getPPC64ShiftMaskLength(v int64) int64 {
return int64(bits.Len64(uint64(v)))
}
// Decompose a shift right into an equivalent rotate/mask,
// and return mask & m.
func mergePPC64RShiftMask(m, s, nbits int64) int64 {
smask := uint64((1<<uint(nbits))-1) >> uint(s)
return m & int64(smask)
}
// Combine (ANDconst [m] (SRWconst [s])) into (RLWINM [y]) or return 0
func mergePPC64AndSrwi(m, s int64) int64 {
mask := mergePPC64RShiftMask(m, s, 32)
if !isPPC64WordRotateMask(mask) {
return 0
}
return encodePPC64RotateMask(32-s, mask, 32)
}
// Test if a shift right feeding into a CLRLSLDI can be merged into RLWINM.
// Return the encoded RLWINM constant, or 0 if they cannot be merged.
func mergePPC64ClrlsldiSrw(sld, srw int64) int64 {
mask_1 := uint64(0xFFFFFFFF >> uint(srw))
// for CLRLSLDI, it's more convient to think of it as a mask left bits then rotate left.
mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld)))
// Rewrite mask to apply after the final left shift.
mask_3 := (mask_1 & mask_2) << uint(GetPPC64Shiftsh(sld))
r_1 := 32 - srw
r_2 := GetPPC64Shiftsh(sld)
r_3 := (r_1 + r_2) & 31 // This can wrap.
if uint64(uint32(mask_3)) != mask_3 || mask_3 == 0 {
return 0
}
return encodePPC64RotateMask(int64(r_3), int64(mask_3), 32)
}
// Test if a RLWINM feeding into a CLRLSLDI can be merged into RLWINM. Return
// the encoded RLWINM constant, or 0 if they cannot be merged.
func mergePPC64ClrlsldiRlwinm(sld int32, rlw int64) int64 {
r_1, _, _, mask_1 := DecodePPC64RotateMask(rlw)
// for CLRLSLDI, it's more convient to think of it as a mask left bits then rotate left.
mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld)))
// combine the masks, and adjust for the final left shift.
mask_3 := (mask_1 & mask_2) << uint(GetPPC64Shiftsh(int64(sld)))
r_2 := GetPPC64Shiftsh(int64(sld))
r_3 := (r_1 + r_2) & 31 // This can wrap.
// Verify the result is still a valid bitmask of <= 32 bits.
if !isPPC64WordRotateMask(int64(mask_3)) || uint64(uint32(mask_3)) != mask_3 {
return 0
}
return encodePPC64RotateMask(r_3, int64(mask_3), 32)
}
// Compute the encoded RLWINM constant from combining (SLDconst [sld] (SRWconst [srw] x)),
// or return 0 if they cannot be combined.
func mergePPC64SldiSrw(sld, srw int64) int64 {
if sld > srw || srw >= 32 {
return 0
}
mask_r := uint32(0xFFFFFFFF) >> uint(srw)
mask_l := uint32(0xFFFFFFFF) >> uint(sld)
mask := (mask_r & mask_l) << uint(sld)
return encodePPC64RotateMask((32-srw+sld)&31, int64(mask), 32)
}
// Convenience function to rotate a 32 bit constant value by another constant.
func rotateLeft32(v, rotate int64) int64 {
return int64(bits.RotateLeft32(uint32(v), int(rotate)))
}
// encodes the lsb and width for arm(64) bitfield ops into the expected auxInt format.
func armBFAuxInt(lsb, width int64) arm64BitField {
if lsb < 0 || lsb > 63 {
@ -1418,7 +1536,7 @@ func (bfc arm64BitField) getARM64BFwidth() int64 {
// checks if mask >> rshift applied at lsb is a valid arm64 bitfield op mask.
func isARM64BFMask(lsb, mask, rshift int64) bool {
shiftedMask := int64(uint64(mask) >> uint64(rshift))
return shiftedMask != 0 && isPowerOfTwo(shiftedMask+1) && nto(shiftedMask)+lsb < 64
return shiftedMask != 0 && isPowerOfTwo64(shiftedMask+1) && nto(shiftedMask)+lsb < 64
}
// returns the bitfield width of mask >> rshift for arm64 bitfield ops
@ -1455,18 +1573,18 @@ func needRaceCleanup(sym *AuxCall, v *Value) bool {
if !f.Config.Race {
return false
}
if !isSameCall(sym, "runtime.racefuncenter") && !isSameCall(sym, "runtime.racefuncexit") {
if !isSameCall(sym, "runtime.racefuncenter") && !isSameCall(sym, "runtime.racefuncenterfp") && !isSameCall(sym, "runtime.racefuncexit") {
return false
}
for _, b := range f.Blocks {
for _, v := range b.Values {
switch v.Op {
case OpStaticCall:
// Check for racefuncenter will encounter racefuncexit and vice versa.
// Check for racefuncenter/racefuncenterfp will encounter racefuncexit and vice versa.
// Allow calls to panic*
s := v.Aux.(*AuxCall).Fn.String()
switch s {
case "runtime.racefuncenter", "runtime.racefuncexit",
case "runtime.racefuncenter", "runtime.racefuncenterfp", "runtime.racefuncexit",
"runtime.panicdivide", "runtime.panicwrap",
"runtime.panicshift":
continue

View file

@ -1179,7 +1179,7 @@ func rewriteValue386_Op386ADDLconstmodify(v *Value) bool {
}
// match: (ADDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
// cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (ADDLconstmodify [valoff1.addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
// result: (ADDLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
for {
valoff1 := auxIntToValAndOff(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -1195,7 +1195,7 @@ func rewriteValue386_Op386ADDLconstmodify(v *Value) bool {
}
v.reset(Op386ADDLconstmodify)
v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -1231,7 +1231,7 @@ func rewriteValue386_Op386ADDLload(v *Value) bool {
}
// match: (ADDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (ADDLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
// result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -1248,7 +1248,7 @@ func rewriteValue386_Op386ADDLload(v *Value) bool {
}
v.reset(Op386ADDLload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
@ -1284,7 +1284,7 @@ func rewriteValue386_Op386ADDLmodify(v *Value) bool {
}
// match: (ADDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (ADDLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
// result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -1301,7 +1301,7 @@ func rewriteValue386_Op386ADDLmodify(v *Value) bool {
}
v.reset(Op386ADDLmodify)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
@ -1367,7 +1367,7 @@ func rewriteValue386_Op386ADDSDload(v *Value) bool {
}
// match: (ADDSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (ADDSDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
// result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -1384,7 +1384,7 @@ func rewriteValue386_Op386ADDSDload(v *Value) bool {
}
v.reset(Op386ADDSDload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
@ -1450,7 +1450,7 @@ func rewriteValue386_Op386ADDSSload(v *Value) bool {
}
// match: (ADDSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (ADDSSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
// result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -1467,7 +1467,7 @@ func rewriteValue386_Op386ADDSSload(v *Value) bool {
}
v.reset(Op386ADDSSload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
@ -1611,7 +1611,7 @@ func rewriteValue386_Op386ANDLconstmodify(v *Value) bool {
}
// match: (ANDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
// cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (ANDLconstmodify [valoff1.addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
// result: (ANDLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
for {
valoff1 := auxIntToValAndOff(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -1627,7 +1627,7 @@ func rewriteValue386_Op386ANDLconstmodify(v *Value) bool {
}
v.reset(Op386ANDLconstmodify)
v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -1663,7 +1663,7 @@ func rewriteValue386_Op386ANDLload(v *Value) bool {
}
// match: (ANDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (ANDLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
// result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -1680,7 +1680,7 @@ func rewriteValue386_Op386ANDLload(v *Value) bool {
}
v.reset(Op386ANDLload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
@ -1716,7 +1716,7 @@ func rewriteValue386_Op386ANDLmodify(v *Value) bool {
}
// match: (ANDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (ANDLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
// result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -1733,7 +1733,7 @@ func rewriteValue386_Op386ANDLmodify(v *Value) bool {
}
v.reset(Op386ANDLmodify)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
@ -2690,7 +2690,7 @@ func rewriteValue386_Op386DIVSDload(v *Value) bool {
}
// match: (DIVSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (DIVSDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
// result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -2707,7 +2707,7 @@ func rewriteValue386_Op386DIVSDload(v *Value) bool {
}
v.reset(Op386DIVSDload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
@ -2770,7 +2770,7 @@ func rewriteValue386_Op386DIVSSload(v *Value) bool {
}
// match: (DIVSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (DIVSSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
// result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -2787,7 +2787,7 @@ func rewriteValue386_Op386DIVSSload(v *Value) bool {
}
v.reset(Op386DIVSSload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
@ -2843,7 +2843,7 @@ func rewriteValue386_Op386LEAL(v *Value) bool {
}
// match: (LEAL [off1] {sym1} (LEAL [off2] {sym2} x))
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
// result: (LEAL [off1+off2] {mergeSymTyped(sym1,sym2)} x)
// result: (LEAL [off1+off2] {mergeSym(sym1,sym2)} x)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -2858,13 +2858,13 @@ func rewriteValue386_Op386LEAL(v *Value) bool {
}
v.reset(Op386LEAL)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg(x)
return true
}
// match: (LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y))
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
// result: (LEAL1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
// result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -2880,13 +2880,13 @@ func rewriteValue386_Op386LEAL(v *Value) bool {
}
v.reset(Op386LEAL1)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(x, y)
return true
}
// match: (LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y))
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
// result: (LEAL2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
// result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -2902,13 +2902,13 @@ func rewriteValue386_Op386LEAL(v *Value) bool {
}
v.reset(Op386LEAL2)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(x, y)
return true
}
// match: (LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y))
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
// result: (LEAL4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
// result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -2924,13 +2924,13 @@ func rewriteValue386_Op386LEAL(v *Value) bool {
}
v.reset(Op386LEAL4)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(x, y)
return true
}
// match: (LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y))
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
// result: (LEAL8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
// result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -2946,7 +2946,7 @@ func rewriteValue386_Op386LEAL(v *Value) bool {
}
v.reset(Op386LEAL8)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(x, y)
return true
}
@ -3038,7 +3038,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool {
}
// match: (LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (LEAL1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
// result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3055,7 +3055,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool {
}
v.reset(Op386LEAL1)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(x, y)
return true
}
@ -3063,7 +3063,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool {
}
// match: (LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} y y))
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
// result: (LEAL2 [off1+off2] {mergeSymTyped(sym1, sym2)} x y)
// result: (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} x y)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3080,7 +3080,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool {
}
v.reset(Op386LEAL2)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(x, y)
return true
}
@ -3088,7 +3088,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool {
}
// match: (LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} x y))
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
// result: (LEAL2 [off1+off2] {mergeSymTyped(sym1, sym2)} y x)
// result: (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} y x)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3112,7 +3112,7 @@ func rewriteValue386_Op386LEAL1(v *Value) bool {
}
v.reset(Op386LEAL2)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(y, x)
return true
}
@ -3212,7 +3212,7 @@ func rewriteValue386_Op386LEAL2(v *Value) bool {
}
// match: (LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (LEAL2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
// result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3228,7 +3228,7 @@ func rewriteValue386_Op386LEAL2(v *Value) bool {
}
v.reset(Op386LEAL2)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(x, y)
return true
}
@ -3321,7 +3321,7 @@ func rewriteValue386_Op386LEAL4(v *Value) bool {
}
// match: (LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (LEAL4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
// result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3337,7 +3337,7 @@ func rewriteValue386_Op386LEAL4(v *Value) bool {
}
v.reset(Op386LEAL4)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(x, y)
return true
}
@ -3414,7 +3414,7 @@ func rewriteValue386_Op386LEAL8(v *Value) bool {
}
// match: (LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (LEAL8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
// result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3430,7 +3430,7 @@ func rewriteValue386_Op386LEAL8(v *Value) bool {
}
v.reset(Op386LEAL8)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(x, y)
return true
}
@ -3509,7 +3509,7 @@ func rewriteValue386_Op386MOVBLSXload(v *Value) bool {
}
// match: (MOVBLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVBLSXload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
// result: (MOVBLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3525,7 +3525,7 @@ func rewriteValue386_Op386MOVBLSXload(v *Value) bool {
}
v.reset(Op386MOVBLSXload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -3621,7 +3621,7 @@ func rewriteValue386_Op386MOVBload(v *Value) bool {
}
// match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3637,7 +3637,7 @@ func rewriteValue386_Op386MOVBload(v *Value) bool {
}
v.reset(Op386MOVBload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -3741,7 +3741,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool {
}
// match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3758,7 +3758,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool {
}
v.reset(Op386MOVBstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
@ -4052,7 +4052,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool {
}
// match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
// cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVBstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
// result: (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
for {
sc := auxIntToValAndOff(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -4068,13 +4068,13 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool {
}
v.reset(Op386MOVBstoreconst)
v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
// cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x)
// result: (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), int32(a.Off()))] {s} p mem)
// result: (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p mem)
for {
c := auxIntToValAndOff(v.AuxInt)
s := auxToSym(v.Aux)
@ -4092,14 +4092,14 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool {
break
}
v.reset(Op386MOVWstoreconst)
v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xff|c.Val()<<8), int32(a.Off())))
v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xff|c.Val()<<8), a.Off32()))
v.Aux = symToAux(s)
v.AddArg2(p, mem)
return true
}
// match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
// cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x)
// result: (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), int32(a.Off()))] {s} p mem)
// result: (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p mem)
for {
a := auxIntToValAndOff(v.AuxInt)
s := auxToSym(v.Aux)
@ -4117,14 +4117,14 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool {
break
}
v.reset(Op386MOVWstoreconst)
v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xff|c.Val()<<8), int32(a.Off())))
v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xff|c.Val()<<8), a.Off32()))
v.Aux = symToAux(s)
v.AddArg2(p, mem)
return true
}
// match: (MOVBstoreconst [c] {s} p1 x:(MOVBstoreconst [a] {s} p0 mem))
// cond: x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 1) && clobber(x)
// result: (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), int32(a.Off()))] {s} p0 mem)
// result: (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p0 mem)
for {
c := auxIntToValAndOff(v.AuxInt)
s := auxToSym(v.Aux)
@ -4143,14 +4143,14 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool {
break
}
v.reset(Op386MOVWstoreconst)
v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xff|c.Val()<<8), int32(a.Off())))
v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xff|c.Val()<<8), a.Off32()))
v.Aux = symToAux(s)
v.AddArg2(p0, mem)
return true
}
// match: (MOVBstoreconst [a] {s} p0 x:(MOVBstoreconst [c] {s} p1 mem))
// cond: x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 1) && clobber(x)
// result: (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), int32(a.Off()))] {s} p0 mem)
// result: (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p0 mem)
for {
a := auxIntToValAndOff(v.AuxInt)
s := auxToSym(v.Aux)
@ -4169,7 +4169,7 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool {
break
}
v.reset(Op386MOVWstoreconst)
v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xff|c.Val()<<8), int32(a.Off())))
v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xff|c.Val()<<8), a.Off32()))
v.Aux = symToAux(s)
v.AddArg2(p0, mem)
return true
@ -4224,7 +4224,7 @@ func rewriteValue386_Op386MOVLload(v *Value) bool {
}
// match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVLload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
// result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -4240,7 +4240,7 @@ func rewriteValue386_Op386MOVLload(v *Value) bool {
}
v.reset(Op386MOVLload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -4310,7 +4310,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
}
// match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVLstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
// result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -4327,7 +4327,7 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
}
v.reset(Op386MOVLstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
@ -4719,7 +4719,7 @@ func rewriteValue386_Op386MOVLstoreconst(v *Value) bool {
}
// match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
// cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVLstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
// result: (MOVLstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
for {
sc := auxIntToValAndOff(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -4735,7 +4735,7 @@ func rewriteValue386_Op386MOVLstoreconst(v *Value) bool {
}
v.reset(Op386MOVLstoreconst)
v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -4789,7 +4789,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool {
}
// match: (MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVSDload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
// result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -4805,7 +4805,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool {
}
v.reset(Op386MOVSDload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -4841,7 +4841,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool {
}
// match: (MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVSDstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
// result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -4858,7 +4858,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool {
}
v.reset(Op386MOVSDstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
@ -4912,7 +4912,7 @@ func rewriteValue386_Op386MOVSSload(v *Value) bool {
}
// match: (MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVSSload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
// result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -4928,7 +4928,7 @@ func rewriteValue386_Op386MOVSSload(v *Value) bool {
}
v.reset(Op386MOVSSload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -4964,7 +4964,7 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool {
}
// match: (MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVSSstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
// result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -4981,7 +4981,7 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool {
}
v.reset(Op386MOVSSstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
@ -5060,7 +5060,7 @@ func rewriteValue386_Op386MOVWLSXload(v *Value) bool {
}
// match: (MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVWLSXload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
// result: (MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -5076,7 +5076,7 @@ func rewriteValue386_Op386MOVWLSXload(v *Value) bool {
}
v.reset(Op386MOVWLSXload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -5172,7 +5172,7 @@ func rewriteValue386_Op386MOVWload(v *Value) bool {
}
// match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -5188,7 +5188,7 @@ func rewriteValue386_Op386MOVWload(v *Value) bool {
}
v.reset(Op386MOVWload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -5292,7 +5292,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool {
}
// match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -5309,7 +5309,7 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool {
}
v.reset(Op386MOVWstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
@ -5452,7 +5452,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool {
}
// match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
// cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVWstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
// result: (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
for {
sc := auxIntToValAndOff(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -5468,13 +5468,13 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool {
}
v.reset(Op386MOVWstoreconst)
v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
// cond: x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x)
// result: (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), int32(a.Off()))] {s} p mem)
// result: (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p mem)
for {
c := auxIntToValAndOff(v.AuxInt)
s := auxToSym(v.Aux)
@ -5492,14 +5492,14 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool {
break
}
v.reset(Op386MOVLstoreconst)
v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xffff|c.Val()<<16), int32(a.Off())))
v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xffff|c.Val()<<16), a.Off32()))
v.Aux = symToAux(s)
v.AddArg2(p, mem)
return true
}
// match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
// cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
// result: (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), int32(a.Off()))] {s} p mem)
// result: (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p mem)
for {
a := auxIntToValAndOff(v.AuxInt)
s := auxToSym(v.Aux)
@ -5517,14 +5517,14 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool {
break
}
v.reset(Op386MOVLstoreconst)
v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xffff|c.Val()<<16), int32(a.Off())))
v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xffff|c.Val()<<16), a.Off32()))
v.Aux = symToAux(s)
v.AddArg2(p, mem)
return true
}
// match: (MOVWstoreconst [c] {s} p1 x:(MOVWstoreconst [a] {s} p0 mem))
// cond: x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 2) && clobber(x)
// result: (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), int32(a.Off()))] {s} p0 mem)
// result: (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p0 mem)
for {
c := auxIntToValAndOff(v.AuxInt)
s := auxToSym(v.Aux)
@ -5543,14 +5543,14 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool {
break
}
v.reset(Op386MOVLstoreconst)
v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xffff|c.Val()<<16), int32(a.Off())))
v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xffff|c.Val()<<16), a.Off32()))
v.Aux = symToAux(s)
v.AddArg2(p0, mem)
return true
}
// match: (MOVWstoreconst [a] {s} p0 x:(MOVWstoreconst [c] {s} p1 mem))
// cond: x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 2) && clobber(x)
// result: (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), int32(a.Off()))] {s} p0 mem)
// result: (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p0 mem)
for {
a := auxIntToValAndOff(v.AuxInt)
s := auxToSym(v.Aux)
@ -5569,7 +5569,7 @@ func rewriteValue386_Op386MOVWstoreconst(v *Value) bool {
break
}
v.reset(Op386MOVLstoreconst)
v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xffff|c.Val()<<16), int32(a.Off())))
v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xffff|c.Val()<<16), a.Off32()))
v.Aux = symToAux(s)
v.AddArg2(p0, mem)
return true
@ -6070,7 +6070,7 @@ func rewriteValue386_Op386MULLload(v *Value) bool {
}
// match: (MULLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MULLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
// result: (MULLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -6087,7 +6087,7 @@ func rewriteValue386_Op386MULLload(v *Value) bool {
}
v.reset(Op386MULLload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
@ -6153,7 +6153,7 @@ func rewriteValue386_Op386MULSDload(v *Value) bool {
}
// match: (MULSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MULSDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
// result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -6170,7 +6170,7 @@ func rewriteValue386_Op386MULSDload(v *Value) bool {
}
v.reset(Op386MULSDload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
@ -6236,7 +6236,7 @@ func rewriteValue386_Op386MULSSload(v *Value) bool {
}
// match: (MULSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MULSSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
// result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -6253,7 +6253,7 @@ func rewriteValue386_Op386MULSSload(v *Value) bool {
}
v.reset(Op386MULSSload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
@ -6686,7 +6686,7 @@ func rewriteValue386_Op386ORLconstmodify(v *Value) bool {
}
// match: (ORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
// cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (ORLconstmodify [valoff1.addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
// result: (ORLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
for {
valoff1 := auxIntToValAndOff(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -6702,7 +6702,7 @@ func rewriteValue386_Op386ORLconstmodify(v *Value) bool {
}
v.reset(Op386ORLconstmodify)
v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -6738,7 +6738,7 @@ func rewriteValue386_Op386ORLload(v *Value) bool {
}
// match: (ORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (ORLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
// result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -6755,7 +6755,7 @@ func rewriteValue386_Op386ORLload(v *Value) bool {
}
v.reset(Op386ORLload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
@ -6791,7 +6791,7 @@ func rewriteValue386_Op386ORLmodify(v *Value) bool {
}
// match: (ORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (ORLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
// result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -6808,7 +6808,7 @@ func rewriteValue386_Op386ORLmodify(v *Value) bool {
}
v.reset(Op386ORLmodify)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
@ -8096,7 +8096,7 @@ func rewriteValue386_Op386SUBLload(v *Value) bool {
}
// match: (SUBLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (SUBLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
// result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -8113,7 +8113,7 @@ func rewriteValue386_Op386SUBLload(v *Value) bool {
}
v.reset(Op386SUBLload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
@ -8149,7 +8149,7 @@ func rewriteValue386_Op386SUBLmodify(v *Value) bool {
}
// match: (SUBLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (SUBLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
// result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -8166,7 +8166,7 @@ func rewriteValue386_Op386SUBLmodify(v *Value) bool {
}
v.reset(Op386SUBLmodify)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
@ -8229,7 +8229,7 @@ func rewriteValue386_Op386SUBSDload(v *Value) bool {
}
// match: (SUBSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (SUBSDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
// result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -8246,7 +8246,7 @@ func rewriteValue386_Op386SUBSDload(v *Value) bool {
}
v.reset(Op386SUBSDload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
@ -8309,7 +8309,7 @@ func rewriteValue386_Op386SUBSSload(v *Value) bool {
}
// match: (SUBSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (SUBSSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
// result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -8326,7 +8326,7 @@ func rewriteValue386_Op386SUBSSload(v *Value) bool {
}
v.reset(Op386SUBSSload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
@ -8533,7 +8533,7 @@ func rewriteValue386_Op386XORLconstmodify(v *Value) bool {
}
// match: (XORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
// cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (XORLconstmodify [valoff1.addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem)
// result: (XORLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
for {
valoff1 := auxIntToValAndOff(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -8549,7 +8549,7 @@ func rewriteValue386_Op386XORLconstmodify(v *Value) bool {
}
v.reset(Op386XORLconstmodify)
v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -8585,7 +8585,7 @@ func rewriteValue386_Op386XORLload(v *Value) bool {
}
// match: (XORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (XORLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem)
// result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -8602,7 +8602,7 @@ func rewriteValue386_Op386XORLload(v *Value) bool {
}
v.reset(Op386XORLload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(val, base, mem)
return true
}
@ -8638,7 +8638,7 @@ func rewriteValue386_Op386XORLmodify(v *Value) bool {
}
// match: (XORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (XORLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
// result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -8655,7 +8655,7 @@ func rewriteValue386_Op386XORLmodify(v *Value) bool {
}
v.reset(Op386XORLmodify)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
@ -8696,11 +8696,11 @@ func rewriteValue386_OpConst8(v *Value) bool {
}
func rewriteValue386_OpConstBool(v *Value) bool {
// match: (ConstBool [c])
// result: (MOVLconst [int32(b2i(c))])
// result: (MOVLconst [b2i32(c)])
for {
c := auxIntToBool(v.AuxInt)
v.reset(Op386MOVLconst)
v.AuxInt = int32ToAuxInt(int32(b2i(c)))
v.AuxInt = int32ToAuxInt(b2i32(c))
return true
}
}

File diff suppressed because it is too large Load diff

View file

@ -448,8 +448,7 @@ func rewriteValueARM(v *Value) bool {
v.Op = OpARMADD
return true
case OpAddr:
v.Op = OpARMMOVWaddr
return true
return rewriteValueARM_OpAddr(v)
case OpAnd16:
v.Op = OpARMAND
return true
@ -481,23 +480,17 @@ func rewriteValueARM(v *Value) bool {
v.Op = OpARMMVN
return true
case OpConst16:
v.Op = OpARMMOVWconst
return true
return rewriteValueARM_OpConst16(v)
case OpConst32:
v.Op = OpARMMOVWconst
return true
return rewriteValueARM_OpConst32(v)
case OpConst32F:
v.Op = OpARMMOVFconst
return true
return rewriteValueARM_OpConst32F(v)
case OpConst64F:
v.Op = OpARMMOVDconst
return true
return rewriteValueARM_OpConst64F(v)
case OpConst8:
v.Op = OpARMMOVWconst
return true
return rewriteValueARM_OpConst8(v)
case OpConstBool:
v.Op = OpARMMOVWconst
return true
return rewriteValueARM_OpConstBool(v)
case OpConstNil:
return rewriteValueARM_OpConstNil(v)
case OpCtz16:
@ -4536,7 +4529,7 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool {
}
// match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVBUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -4552,7 +4545,7 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool {
}
v.reset(OpARMMOVBUload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -4600,15 +4593,15 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool {
}
// match: (MOVBUload [off] {sym} (SB) _)
// cond: symIsRO(sym)
// result: (MOVWconst [int64(read8(sym, off))])
// result: (MOVWconst [int32(read8(sym, int64(off)))])
for {
off := v.AuxInt
sym := v.Aux
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpARMMOVWconst)
v.AuxInt = int64(read8(sym, off))
v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
return true
}
return false
@ -4754,7 +4747,7 @@ func rewriteValueARM_OpARMMOVBload(v *Value) bool {
}
// match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -4770,7 +4763,7 @@ func rewriteValueARM_OpARMMOVBload(v *Value) bool {
}
v.reset(OpARMMOVBload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -4966,7 +4959,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool {
}
// match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -4983,7 +4976,7 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool {
}
v.reset(OpARMMOVBstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -5155,7 +5148,7 @@ func rewriteValueARM_OpARMMOVDload(v *Value) bool {
}
// match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -5171,7 +5164,7 @@ func rewriteValueARM_OpARMMOVDload(v *Value) bool {
}
v.reset(OpARMMOVDload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -5239,7 +5232,7 @@ func rewriteValueARM_OpARMMOVDstore(v *Value) bool {
}
// match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -5256,7 +5249,7 @@ func rewriteValueARM_OpARMMOVDstore(v *Value) bool {
}
v.reset(OpARMMOVDstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -5301,7 +5294,7 @@ func rewriteValueARM_OpARMMOVFload(v *Value) bool {
}
// match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVFload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -5317,7 +5310,7 @@ func rewriteValueARM_OpARMMOVFload(v *Value) bool {
}
v.reset(OpARMMOVFload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -5385,7 +5378,7 @@ func rewriteValueARM_OpARMMOVFstore(v *Value) bool {
}
// match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVFstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -5402,7 +5395,7 @@ func rewriteValueARM_OpARMMOVFstore(v *Value) bool {
}
v.reset(OpARMMOVFstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -5449,7 +5442,7 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool {
}
// match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVHUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -5465,7 +5458,7 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool {
}
v.reset(OpARMMOVHUload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -5513,15 +5506,15 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool {
}
// match: (MOVHUload [off] {sym} (SB) _)
// cond: symIsRO(sym)
// result: (MOVWconst [int64(read16(sym, off, config.ctxt.Arch.ByteOrder))])
// result: (MOVWconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for {
off := v.AuxInt
sym := v.Aux
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpARMMOVWconst)
v.AuxInt = int64(read16(sym, off, config.ctxt.Arch.ByteOrder))
v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true
}
return false
@ -5689,7 +5682,7 @@ func rewriteValueARM_OpARMMOVHload(v *Value) bool {
}
// match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -5705,7 +5698,7 @@ func rewriteValueARM_OpARMMOVHload(v *Value) bool {
}
v.reset(OpARMMOVHload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -5945,7 +5938,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool {
}
// match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -5962,7 +5955,7 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool {
}
v.reset(OpARMMOVHstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -6102,7 +6095,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool {
}
// match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -6118,7 +6111,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool {
}
v.reset(OpARMMOVWload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -6234,15 +6227,15 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool {
}
// match: (MOVWload [off] {sym} (SB) _)
// cond: symIsRO(sym)
// result: (MOVWconst [int64(int32(read32(sym, off, config.ctxt.Arch.ByteOrder)))])
// result: (MOVWconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for {
off := v.AuxInt
sym := v.Aux
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpARMMOVWconst)
v.AuxInt = int64(int32(read32(sym, off, config.ctxt.Arch.ByteOrder)))
v.AuxInt = int32ToAuxInt(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true
}
return false
@ -6577,7 +6570,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool {
}
// match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -6594,7 +6587,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool {
}
v.reset(OpARMMOVWstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -12873,6 +12866,19 @@ func rewriteValueARM_OpARMXORshiftRR(v *Value) bool {
}
return false
}
func rewriteValueARM_OpAddr(v *Value) bool {
v_0 := v.Args[0]
// match: (Addr {sym} base)
// result: (MOVWaddr {sym} base)
for {
sym := auxToSym(v.Aux)
base := v_0
v.reset(OpARMMOVWaddr)
v.Aux = symToAux(sym)
v.AddArg(base)
return true
}
}
func rewriteValueARM_OpAvg32u(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@ -12954,6 +12960,66 @@ func rewriteValueARM_OpBswap32(v *Value) bool {
}
return false
}
func rewriteValueARM_OpConst16(v *Value) bool {
// match: (Const16 [val])
// result: (MOVWconst [int32(val)])
for {
val := auxIntToInt16(v.AuxInt)
v.reset(OpARMMOVWconst)
v.AuxInt = int32ToAuxInt(int32(val))
return true
}
}
func rewriteValueARM_OpConst32(v *Value) bool {
// match: (Const32 [val])
// result: (MOVWconst [int32(val)])
for {
val := auxIntToInt32(v.AuxInt)
v.reset(OpARMMOVWconst)
v.AuxInt = int32ToAuxInt(int32(val))
return true
}
}
func rewriteValueARM_OpConst32F(v *Value) bool {
// match: (Const32F [val])
// result: (MOVFconst [float64(val)])
for {
val := auxIntToFloat32(v.AuxInt)
v.reset(OpARMMOVFconst)
v.AuxInt = float64ToAuxInt(float64(val))
return true
}
}
func rewriteValueARM_OpConst64F(v *Value) bool {
// match: (Const64F [val])
// result: (MOVDconst [float64(val)])
for {
val := auxIntToFloat64(v.AuxInt)
v.reset(OpARMMOVDconst)
v.AuxInt = float64ToAuxInt(float64(val))
return true
}
}
func rewriteValueARM_OpConst8(v *Value) bool {
// match: (Const8 [val])
// result: (MOVWconst [int32(val)])
for {
val := auxIntToInt8(v.AuxInt)
v.reset(OpARMMOVWconst)
v.AuxInt = int32ToAuxInt(int32(val))
return true
}
}
func rewriteValueARM_OpConstBool(v *Value) bool {
// match: (ConstBool [b])
// result: (MOVWconst [b2i32(b)])
for {
b := auxIntToBool(v.AuxInt)
v.reset(OpARMMOVWconst)
v.AuxInt = int32ToAuxInt(b2i32(b))
return true
}
}
func rewriteValueARM_OpConstNil(v *Value) bool {
// match: (ConstNil)
// result: (MOVWconst [0])
@ -14648,25 +14714,25 @@ func rewriteValueARM_OpNot(v *Value) bool {
func rewriteValueARM_OpOffPtr(v *Value) bool {
v_0 := v.Args[0]
// match: (OffPtr [off] ptr:(SP))
// result: (MOVWaddr [off] ptr)
// result: (MOVWaddr [int32(off)] ptr)
for {
off := v.AuxInt
off := auxIntToInt64(v.AuxInt)
ptr := v_0
if ptr.Op != OpSP {
break
}
v.reset(OpARMMOVWaddr)
v.AuxInt = off
v.AuxInt = int32ToAuxInt(int32(off))
v.AddArg(ptr)
return true
}
// match: (OffPtr [off] ptr)
// result: (ADDconst [off] ptr)
// result: (ADDconst [int32(off)] ptr)
for {
off := v.AuxInt
off := auxIntToInt64(v.AuxInt)
ptr := v_0
v.reset(OpARMADDconst)
v.AuxInt = off
v.AuxInt = int32ToAuxInt(int32(off))
v.AddArg(ptr)
return true
}

File diff suppressed because it is too large Load diff

View file

@ -44,6 +44,9 @@ func rewriteValueMIPS(v *Value) bool {
case OpAtomicAdd32:
v.Op = OpMIPSLoweredAtomicAdd
return true
case OpAtomicAnd32:
v.Op = OpMIPSLoweredAtomicAnd
return true
case OpAtomicAnd8:
return rewriteValueMIPS_OpAtomicAnd8(v)
case OpAtomicCompareAndSwap32:
@ -61,6 +64,9 @@ func rewriteValueMIPS(v *Value) bool {
case OpAtomicLoadPtr:
v.Op = OpMIPSLoweredAtomicLoad32
return true
case OpAtomicOr32:
v.Op = OpMIPSLoweredAtomicOr
return true
case OpAtomicOr8:
return rewriteValueMIPS_OpAtomicOr8(v)
case OpAtomicStore32:
@ -862,11 +868,11 @@ func rewriteValueMIPS_OpConst8(v *Value) bool {
}
func rewriteValueMIPS_OpConstBool(v *Value) bool {
// match: (ConstBool [b])
// result: (MOVWconst [int32(b2i(b))])
// result: (MOVWconst [b2i32(b)])
for {
b := auxIntToBool(v.AuxInt)
v.reset(OpMIPSMOVWconst)
v.AuxInt = int32ToAuxInt(int32(b2i(b)))
v.AuxInt = int32ToAuxInt(b2i32(b))
return true
}
}
@ -2333,7 +2339,7 @@ func rewriteValueMIPS_OpMIPSMOVBUload(v *Value) bool {
}
// match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVBUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -2349,7 +2355,7 @@ func rewriteValueMIPS_OpMIPSMOVBUload(v *Value) bool {
}
v.reset(OpMIPSMOVBUload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -2478,7 +2484,7 @@ func rewriteValueMIPS_OpMIPSMOVBload(v *Value) bool {
}
// match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -2494,7 +2500,7 @@ func rewriteValueMIPS_OpMIPSMOVBload(v *Value) bool {
}
v.reset(OpMIPSMOVBload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -2629,7 +2635,7 @@ func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool {
}
// match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -2646,7 +2652,7 @@ func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool {
}
v.reset(OpMIPSMOVBstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -2780,7 +2786,7 @@ func rewriteValueMIPS_OpMIPSMOVBstorezero(v *Value) bool {
}
// match: (MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVBstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -2796,7 +2802,7 @@ func rewriteValueMIPS_OpMIPSMOVBstorezero(v *Value) bool {
}
v.reset(OpMIPSMOVBstorezero)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -2829,7 +2835,7 @@ func rewriteValueMIPS_OpMIPSMOVDload(v *Value) bool {
}
// match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -2845,7 +2851,7 @@ func rewriteValueMIPS_OpMIPSMOVDload(v *Value) bool {
}
v.reset(OpMIPSMOVDload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -2900,7 +2906,7 @@ func rewriteValueMIPS_OpMIPSMOVDstore(v *Value) bool {
}
// match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -2917,7 +2923,7 @@ func rewriteValueMIPS_OpMIPSMOVDstore(v *Value) bool {
}
v.reset(OpMIPSMOVDstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -2950,7 +2956,7 @@ func rewriteValueMIPS_OpMIPSMOVFload(v *Value) bool {
}
// match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVFload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -2966,7 +2972,7 @@ func rewriteValueMIPS_OpMIPSMOVFload(v *Value) bool {
}
v.reset(OpMIPSMOVFload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -3021,7 +3027,7 @@ func rewriteValueMIPS_OpMIPSMOVFstore(v *Value) bool {
}
// match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVFstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3038,7 +3044,7 @@ func rewriteValueMIPS_OpMIPSMOVFstore(v *Value) bool {
}
v.reset(OpMIPSMOVFstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -3071,7 +3077,7 @@ func rewriteValueMIPS_OpMIPSMOVHUload(v *Value) bool {
}
// match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVHUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3087,7 +3093,7 @@ func rewriteValueMIPS_OpMIPSMOVHUload(v *Value) bool {
}
v.reset(OpMIPSMOVHUload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -3238,7 +3244,7 @@ func rewriteValueMIPS_OpMIPSMOVHload(v *Value) bool {
}
// match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3254,7 +3260,7 @@ func rewriteValueMIPS_OpMIPSMOVHload(v *Value) bool {
}
v.reset(OpMIPSMOVHload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -3433,7 +3439,7 @@ func rewriteValueMIPS_OpMIPSMOVHstore(v *Value) bool {
}
// match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3450,7 +3456,7 @@ func rewriteValueMIPS_OpMIPSMOVHstore(v *Value) bool {
}
v.reset(OpMIPSMOVHstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -3550,7 +3556,7 @@ func rewriteValueMIPS_OpMIPSMOVHstorezero(v *Value) bool {
}
// match: (MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVHstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3566,7 +3572,7 @@ func rewriteValueMIPS_OpMIPSMOVHstorezero(v *Value) bool {
}
v.reset(OpMIPSMOVHstorezero)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -3599,7 +3605,7 @@ func rewriteValueMIPS_OpMIPSMOVWload(v *Value) bool {
}
// match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3615,7 +3621,7 @@ func rewriteValueMIPS_OpMIPSMOVWload(v *Value) bool {
}
v.reset(OpMIPSMOVWload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -3697,7 +3703,7 @@ func rewriteValueMIPS_OpMIPSMOVWstore(v *Value) bool {
}
// match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3714,7 +3720,7 @@ func rewriteValueMIPS_OpMIPSMOVWstore(v *Value) bool {
}
v.reset(OpMIPSMOVWstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -3780,7 +3786,7 @@ func rewriteValueMIPS_OpMIPSMOVWstorezero(v *Value) bool {
}
// match: (MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVWstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3796,7 +3802,7 @@ func rewriteValueMIPS_OpMIPSMOVWstorezero(v *Value) bool {
}
v.reset(OpMIPSMOVWstorezero)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -3846,7 +3852,7 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool {
break
}
// match: (MUL (MOVWconst [c]) x )
// cond: isPowerOfTwo(int64(uint32(c)))
// cond: isPowerOfTwo64(int64(uint32(c)))
// result: (SLLconst [int32(log2uint32(int64(c)))] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@ -3855,7 +3861,7 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool {
}
c := auxIntToInt32(v_0.AuxInt)
x := v_1
if !(isPowerOfTwo(int64(uint32(c)))) {
if !(isPowerOfTwo64(int64(uint32(c)))) {
continue
}
v.reset(OpMIPSSLLconst)
@ -6382,7 +6388,7 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool {
break
}
// match: (Select0 (MULTU (MOVWconst [c]) x ))
// cond: isPowerOfTwo(int64(uint32(c)))
// cond: isPowerOfTwo64(int64(uint32(c)))
// result: (SRLconst [int32(32-log2uint32(int64(c)))] x)
for {
if v_0.Op != OpMIPSMULTU {
@ -6397,7 +6403,7 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool {
}
c := auxIntToInt32(v_0_0.AuxInt)
x := v_0_1
if !(isPowerOfTwo(int64(uint32(c)))) {
if !(isPowerOfTwo64(int64(uint32(c)))) {
continue
}
v.reset(OpMIPSSRLconst)
@ -6570,7 +6576,7 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool {
break
}
// match: (Select1 (MULTU (MOVWconst [c]) x ))
// cond: isPowerOfTwo(int64(uint32(c)))
// cond: isPowerOfTwo64(int64(uint32(c)))
// result: (SLLconst [int32(log2uint32(int64(c)))] x)
for {
if v_0.Op != OpMIPSMULTU {
@ -6585,7 +6591,7 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool {
}
c := auxIntToInt32(v_0_0.AuxInt)
x := v_0_1
if !(isPowerOfTwo(int64(uint32(c)))) {
if !(isPowerOfTwo64(int64(uint32(c)))) {
continue
}
v.reset(OpMIPSSLLconst)

View file

@ -2558,7 +2558,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBUload(v *Value) bool {
}
// match: (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
// result: (MOVBUload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -2574,7 +2574,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBUload(v *Value) bool {
}
v.reset(OpMIPS64MOVBUload)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -2643,7 +2643,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBload(v *Value) bool {
}
// match: (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
// result: (MOVBload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -2659,7 +2659,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBload(v *Value) bool {
}
v.reset(OpMIPS64MOVBload)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -2730,7 +2730,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool {
}
// match: (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
// result: (MOVBstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -2747,7 +2747,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool {
}
v.reset(OpMIPS64MOVBstore)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -2897,7 +2897,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstorezero(v *Value) bool {
}
// match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
// result: (MOVBstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -2913,7 +2913,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstorezero(v *Value) bool {
}
v.reset(OpMIPS64MOVBstorezero)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -2945,7 +2945,7 @@ func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value) bool {
}
// match: (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
// result: (MOVDload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -2961,7 +2961,7 @@ func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value) bool {
}
v.reset(OpMIPS64MOVDload)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -2995,7 +2995,7 @@ func rewriteValueMIPS64_OpMIPS64MOVDstore(v *Value) bool {
}
// match: (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
// result: (MOVDstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3012,7 +3012,7 @@ func rewriteValueMIPS64_OpMIPS64MOVDstore(v *Value) bool {
}
v.reset(OpMIPS64MOVDstore)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -3044,7 +3044,7 @@ func rewriteValueMIPS64_OpMIPS64MOVFload(v *Value) bool {
}
// match: (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
// result: (MOVFload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3060,7 +3060,7 @@ func rewriteValueMIPS64_OpMIPS64MOVFload(v *Value) bool {
}
v.reset(OpMIPS64MOVFload)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -3094,7 +3094,7 @@ func rewriteValueMIPS64_OpMIPS64MOVFstore(v *Value) bool {
}
// match: (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
// result: (MOVFstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3111,7 +3111,7 @@ func rewriteValueMIPS64_OpMIPS64MOVFstore(v *Value) bool {
}
v.reset(OpMIPS64MOVFstore)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -3143,7 +3143,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHUload(v *Value) bool {
}
// match: (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
// result: (MOVHUload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3159,7 +3159,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHUload(v *Value) bool {
}
v.reset(OpMIPS64MOVHUload)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -3250,7 +3250,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHload(v *Value) bool {
}
// match: (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
// result: (MOVHload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3266,7 +3266,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHload(v *Value) bool {
}
v.reset(OpMIPS64MOVHload)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -3381,7 +3381,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool {
}
// match: (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
// result: (MOVHstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3398,7 +3398,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool {
}
v.reset(OpMIPS64MOVHstore)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -3514,7 +3514,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value) bool {
}
// match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
// result: (MOVHstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3530,7 +3530,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value) bool {
}
v.reset(OpMIPS64MOVHstorezero)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -3562,7 +3562,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value) bool {
}
// match: (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
// result: (MOVVload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3578,7 +3578,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value) bool {
}
v.reset(OpMIPS64MOVVload)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -3639,7 +3639,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value) bool {
}
// match: (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
// result: (MOVVstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3656,7 +3656,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value) bool {
}
v.reset(OpMIPS64MOVVstore)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -3704,7 +3704,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVstorezero(v *Value) bool {
}
// match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
// result: (MOVVstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3720,7 +3720,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVstorezero(v *Value) bool {
}
v.reset(OpMIPS64MOVVstorezero)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -3752,7 +3752,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWUload(v *Value) bool {
}
// match: (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
// result: (MOVWUload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3768,7 +3768,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWUload(v *Value) bool {
}
v.reset(OpMIPS64MOVWUload)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -3881,7 +3881,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWload(v *Value) bool {
}
// match: (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
// result: (MOVWload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -3897,7 +3897,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWload(v *Value) bool {
}
v.reset(OpMIPS64MOVWload)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -4045,7 +4045,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool {
}
// match: (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
// result: (MOVWstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -4062,7 +4062,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool {
}
v.reset(OpMIPS64MOVWstore)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -4144,7 +4144,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWstorezero(v *Value) bool {
}
// match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
// result: (MOVWstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -4160,7 +4160,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWstorezero(v *Value) bool {
}
v.reset(OpMIPS64MOVWstorezero)
v.AuxInt = int32ToAuxInt(off1 + int32(off2))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -6865,7 +6865,7 @@ func rewriteValueMIPS64_OpSelect0(v *Value) bool {
return true
}
// match: (Select0 (DIVVU x (MOVVconst [c])))
// cond: isPowerOfTwo(c)
// cond: isPowerOfTwo64(c)
// result: (ANDconst [c-1] x)
for {
if v_0.Op != OpMIPS64DIVVU {
@ -6878,7 +6878,7 @@ func rewriteValueMIPS64_OpSelect0(v *Value) bool {
break
}
c := auxIntToInt64(v_0_1.AuxInt)
if !(isPowerOfTwo(c)) {
if !(isPowerOfTwo64(c)) {
break
}
v.reset(OpMIPS64ANDconst)
@ -7012,8 +7012,8 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool {
break
}
// match: (Select1 (MULVU x (MOVVconst [c])))
// cond: isPowerOfTwo(c)
// result: (SLLVconst [log2(c)] x)
// cond: isPowerOfTwo64(c)
// result: (SLLVconst [log64(c)] x)
for {
if v_0.Op != OpMIPS64MULVU {
break
@ -7027,11 +7027,11 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool {
continue
}
c := auxIntToInt64(v_0_1.AuxInt)
if !(isPowerOfTwo(c)) {
if !(isPowerOfTwo64(c)) {
continue
}
v.reset(OpMIPS64SLLVconst)
v.AuxInt = int64ToAuxInt(log2(c))
v.AuxInt = int64ToAuxInt(log64(c))
v.AddArg(x)
return true
}
@ -7053,8 +7053,8 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool {
return true
}
// match: (Select1 (DIVVU x (MOVVconst [c])))
// cond: isPowerOfTwo(c)
// result: (SRLVconst [log2(c)] x)
// cond: isPowerOfTwo64(c)
// result: (SRLVconst [log64(c)] x)
for {
if v_0.Op != OpMIPS64DIVVU {
break
@ -7066,11 +7066,11 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool {
break
}
c := auxIntToInt64(v_0_1.AuxInt)
if !(isPowerOfTwo(c)) {
if !(isPowerOfTwo64(c)) {
break
}
v.reset(OpMIPS64SRLVconst)
v.AuxInt = int64ToAuxInt(log2(c))
v.AuxInt = int64ToAuxInt(log64(c))
v.AddArg(x)
return true
}

View file

@ -59,6 +59,9 @@ func rewriteValuePPC64(v *Value) bool {
case OpAtomicAdd64:
v.Op = OpPPC64LoweredAtomicAdd64
return true
case OpAtomicAnd32:
v.Op = OpPPC64LoweredAtomicAnd32
return true
case OpAtomicAnd8:
v.Op = OpPPC64LoweredAtomicAnd8
return true
@ -86,6 +89,9 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpAtomicLoadAcq64(v)
case OpAtomicLoadPtr:
return rewriteValuePPC64_OpAtomicLoadPtr(v)
case OpAtomicOr32:
v.Op = OpPPC64LoweredAtomicOr32
return true
case OpAtomicOr8:
v.Op = OpPPC64LoweredAtomicOr8
return true
@ -438,6 +444,8 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpPPC64ANDN(v)
case OpPPC64ANDconst:
return rewriteValuePPC64_OpPPC64ANDconst(v)
case OpPPC64CLRLSLDI:
return rewriteValuePPC64_OpPPC64CLRLSLDI(v)
case OpPPC64CMP:
return rewriteValuePPC64_OpPPC64CMP(v)
case OpPPC64CMPU:
@ -592,6 +600,8 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpPPC64ROTL(v)
case OpPPC64ROTLW:
return rewriteValuePPC64_OpPPC64ROTLW(v)
case OpPPC64ROTLWconst:
return rewriteValuePPC64_OpPPC64ROTLWconst(v)
case OpPPC64SLD:
return rewriteValuePPC64_OpPPC64SLD(v)
case OpPPC64SLDconst:
@ -608,6 +618,8 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpPPC64SRD(v)
case OpPPC64SRW:
return rewriteValuePPC64_OpPPC64SRW(v)
case OpPPC64SRWconst:
return rewriteValuePPC64_OpPPC64SRWconst(v)
case OpPPC64SUB:
return rewriteValuePPC64_OpPPC64SUB(v)
case OpPPC64SUBFCconst:
@ -4206,6 +4218,100 @@ func rewriteValuePPC64_OpPPC64ADDconst(v *Value) bool {
func rewriteValuePPC64_OpPPC64AND(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (AND (MOVDconst [m]) (ROTLWconst [r] x))
// cond: isPPC64WordRotateMask(m)
// result: (RLWINM [encodePPC64RotateMask(r,m,32)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpPPC64MOVDconst {
continue
}
m := auxIntToInt64(v_0.AuxInt)
if v_1.Op != OpPPC64ROTLWconst {
continue
}
r := auxIntToInt64(v_1.AuxInt)
x := v_1.Args[0]
if !(isPPC64WordRotateMask(m)) {
continue
}
v.reset(OpPPC64RLWINM)
v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32))
v.AddArg(x)
return true
}
break
}
// match: (AND (MOVDconst [m]) (ROTLW x r))
// cond: isPPC64WordRotateMask(m)
// result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpPPC64MOVDconst {
continue
}
m := auxIntToInt64(v_0.AuxInt)
if v_1.Op != OpPPC64ROTLW {
continue
}
r := v_1.Args[1]
x := v_1.Args[0]
if !(isPPC64WordRotateMask(m)) {
continue
}
v.reset(OpPPC64RLWNM)
v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32))
v.AddArg2(x, r)
return true
}
break
}
// match: (AND (MOVDconst [m]) (SRWconst x [s]))
// cond: mergePPC64RShiftMask(m,s,32) == 0
// result: (MOVDconst [0])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpPPC64MOVDconst {
continue
}
m := auxIntToInt64(v_0.AuxInt)
if v_1.Op != OpPPC64SRWconst {
continue
}
s := auxIntToInt64(v_1.AuxInt)
if !(mergePPC64RShiftMask(m, s, 32) == 0) {
continue
}
v.reset(OpPPC64MOVDconst)
v.AuxInt = int64ToAuxInt(0)
return true
}
break
}
// match: (AND (MOVDconst [m]) (SRWconst x [s]))
// cond: mergePPC64AndSrwi(m,s) != 0
// result: (RLWINM [mergePPC64AndSrwi(m,s)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpPPC64MOVDconst {
continue
}
m := auxIntToInt64(v_0.AuxInt)
if v_1.Op != OpPPC64SRWconst {
continue
}
s := auxIntToInt64(v_1.AuxInt)
x := v_1.Args[0]
if !(mergePPC64AndSrwi(m, s) != 0) {
continue
}
v.reset(OpPPC64RLWINM)
v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s))
v.AddArg(x)
return true
}
break
}
// match: (AND x (NOR y y))
// result: (ANDN x y)
for {
@ -4341,6 +4447,76 @@ func rewriteValuePPC64_OpPPC64ANDN(v *Value) bool {
}
func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool {
v_0 := v.Args[0]
// match: (ANDconst [m] (ROTLWconst [r] x))
// cond: isPPC64WordRotateMask(m)
// result: (RLWINM [encodePPC64RotateMask(r,m,32)] x)
for {
m := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64ROTLWconst {
break
}
r := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isPPC64WordRotateMask(m)) {
break
}
v.reset(OpPPC64RLWINM)
v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32))
v.AddArg(x)
return true
}
// match: (ANDconst [m] (ROTLW x r))
// cond: isPPC64WordRotateMask(m)
// result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
for {
m := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64ROTLW {
break
}
r := v_0.Args[1]
x := v_0.Args[0]
if !(isPPC64WordRotateMask(m)) {
break
}
v.reset(OpPPC64RLWNM)
v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32))
v.AddArg2(x, r)
return true
}
// match: (ANDconst [m] (SRWconst x [s]))
// cond: mergePPC64RShiftMask(m,s,32) == 0
// result: (MOVDconst [0])
for {
m := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64SRWconst {
break
}
s := auxIntToInt64(v_0.AuxInt)
if !(mergePPC64RShiftMask(m, s, 32) == 0) {
break
}
v.reset(OpPPC64MOVDconst)
v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (ANDconst [m] (SRWconst x [s]))
// cond: mergePPC64AndSrwi(m,s) != 0
// result: (RLWINM [mergePPC64AndSrwi(m,s)] x)
for {
m := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64SRWconst {
break
}
s := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(mergePPC64AndSrwi(m, s) != 0) {
break
}
v.reset(OpPPC64RLWINM)
v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s))
v.AddArg(x)
return true
}
// match: (ANDconst [c] (ANDconst [d] x))
// result: (ANDconst [c&d] x)
for {
@ -4505,6 +4681,47 @@ func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool {
}
return false
}
func rewriteValuePPC64_OpPPC64CLRLSLDI(v *Value) bool {
v_0 := v.Args[0]
// match: (CLRLSLDI [c] (SRWconst [s] x))
// cond: mergePPC64ClrlsldiSrw(int64(c),s) != 0
// result: (RLWINM [mergePPC64ClrlsldiSrw(int64(c),s)] x)
for {
c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpPPC64SRWconst {
break
}
s := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(mergePPC64ClrlsldiSrw(int64(c), s) != 0) {
break
}
v.reset(OpPPC64RLWINM)
v.AuxInt = int64ToAuxInt(mergePPC64ClrlsldiSrw(int64(c), s))
v.AddArg(x)
return true
}
// match: (CLRLSLDI [c] i:(RLWINM [s] x))
// cond: mergePPC64ClrlsldiRlwinm(c,s) != 0
// result: (RLWINM [mergePPC64ClrlsldiRlwinm(c,s)] x)
for {
c := auxIntToInt32(v.AuxInt)
i := v_0
if i.Op != OpPPC64RLWINM {
break
}
s := auxIntToInt64(i.AuxInt)
x := i.Args[0]
if !(mergePPC64ClrlsldiRlwinm(c, s) != 0) {
break
}
v.reset(OpPPC64RLWINM)
v.AuxInt = int64ToAuxInt(mergePPC64ClrlsldiRlwinm(c, s))
v.AddArg(x)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64CMP(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@ -5213,7 +5430,7 @@ func rewriteValuePPC64_OpPPC64FMOVDload(v *Value) bool {
}
// match: (FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (FMOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -5230,7 +5447,7 @@ func rewriteValuePPC64_OpPPC64FMOVDload(v *Value) bool {
}
v.reset(OpPPC64FMOVDload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -5302,7 +5519,7 @@ func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value) bool {
}
// match: (FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (FMOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -5320,7 +5537,7 @@ func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value) bool {
}
v.reset(OpPPC64FMOVDstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -5331,7 +5548,7 @@ func rewriteValuePPC64_OpPPC64FMOVSload(v *Value) bool {
v_0 := v.Args[0]
// match: (FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (FMOVSload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -5348,7 +5565,7 @@ func rewriteValuePPC64_OpPPC64FMOVSload(v *Value) bool {
}
v.reset(OpPPC64FMOVSload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -5403,7 +5620,7 @@ func rewriteValuePPC64_OpPPC64FMOVSstore(v *Value) bool {
}
// match: (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (FMOVSstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -5421,7 +5638,7 @@ func rewriteValuePPC64_OpPPC64FMOVSstore(v *Value) bool {
}
v.reset(OpPPC64FMOVSstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -6389,7 +6606,7 @@ func rewriteValuePPC64_OpPPC64MOVBZload(v *Value) bool {
v_0 := v.Args[0]
// match: (MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (MOVBZload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -6406,7 +6623,7 @@ func rewriteValuePPC64_OpPPC64MOVBZload(v *Value) bool {
}
v.reset(OpPPC64MOVBZload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -7141,7 +7358,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool {
}
// match: (MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -7159,7 +7376,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool {
}
v.reset(OpPPC64MOVBstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -8114,7 +8331,7 @@ func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value) bool {
}
// match: (MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
// cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1)
// result: (MOVBstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} x mem)
// result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -8131,7 +8348,7 @@ func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value) bool {
}
v.reset(OpPPC64MOVBstorezero)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(x, mem)
return true
}
@ -8159,7 +8376,7 @@ func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool {
}
// match: (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
// result: (MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -8176,7 +8393,7 @@ func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool {
}
v.reset(OpPPC64MOVDload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -8312,7 +8529,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool {
}
// match: (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
// result: (MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -8330,7 +8547,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool {
}
v.reset(OpPPC64MOVDstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -8446,7 +8663,7 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value) bool {
}
// match: (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
// cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
// result: (MOVDstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} x mem)
// result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -8463,7 +8680,7 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value) bool {
}
v.reset(OpPPC64MOVDstorezero)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(x, mem)
return true
}
@ -8540,7 +8757,7 @@ func rewriteValuePPC64_OpPPC64MOVHZload(v *Value) bool {
v_0 := v.Args[0]
// match: (MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (MOVHZload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -8557,7 +8774,7 @@ func rewriteValuePPC64_OpPPC64MOVHZload(v *Value) bool {
}
v.reset(OpPPC64MOVHZload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -9062,7 +9279,7 @@ func rewriteValuePPC64_OpPPC64MOVHload(v *Value) bool {
v_0 := v.Args[0]
// match: (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -9079,7 +9296,7 @@ func rewriteValuePPC64_OpPPC64MOVHload(v *Value) bool {
}
v.reset(OpPPC64MOVHload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -9456,7 +9673,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool {
}
// match: (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -9474,7 +9691,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool {
}
v.reset(OpPPC64MOVHstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -9772,7 +9989,7 @@ func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value) bool {
}
// match: (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
// cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1)
// result: (MOVHstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} x mem)
// result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -9789,7 +10006,7 @@ func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value) bool {
}
v.reset(OpPPC64MOVHstorezero)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(x, mem)
return true
}
@ -9836,7 +10053,7 @@ func rewriteValuePPC64_OpPPC64MOVWZload(v *Value) bool {
v_0 := v.Args[0]
// match: (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (MOVWZload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -9853,7 +10070,7 @@ func rewriteValuePPC64_OpPPC64MOVWZload(v *Value) bool {
}
v.reset(OpPPC64MOVWZload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -10365,7 +10582,7 @@ func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool {
v_0 := v.Args[0]
// match: (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
// result: (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -10382,7 +10599,7 @@ func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool {
}
v.reset(OpPPC64MOVWload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -10778,7 +10995,7 @@ func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool {
}
// match: (MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -10796,7 +11013,7 @@ func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool {
}
v.reset(OpPPC64MOVWstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
@ -10974,7 +11191,7 @@ func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value) bool {
}
// match: (MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
// cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1)
// result: (MOVWstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} x mem)
// result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -10991,7 +11208,7 @@ func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value) bool {
}
v.reset(OpPPC64MOVWstorezero)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(x, mem)
return true
}
@ -12844,6 +13061,55 @@ func rewriteValuePPC64_OpPPC64ROTLW(v *Value) bool {
}
return false
}
func rewriteValuePPC64_OpPPC64ROTLWconst(v *Value) bool {
v_0 := v.Args[0]
// match: (ROTLWconst [r] (AND (MOVDconst [m]) x))
// cond: isPPC64WordRotateMask(m)
// result: (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
for {
r := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64AND {
break
}
_ = v_0.Args[1]
v_0_0 := v_0.Args[0]
v_0_1 := v_0.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
if v_0_0.Op != OpPPC64MOVDconst {
continue
}
m := auxIntToInt64(v_0_0.AuxInt)
x := v_0_1
if !(isPPC64WordRotateMask(m)) {
continue
}
v.reset(OpPPC64RLWINM)
v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, rotateLeft32(m, r), 32))
v.AddArg(x)
return true
}
break
}
// match: (ROTLWconst [r] (ANDconst [m] x))
// cond: isPPC64WordRotateMask(m)
// result: (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
for {
r := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64ANDconst {
break
}
m := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isPPC64WordRotateMask(m)) {
break
}
v.reset(OpPPC64RLWINM)
v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, rotateLeft32(m, r), 32))
v.AddArg(x)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64SLD(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@ -12864,6 +13130,24 @@ func rewriteValuePPC64_OpPPC64SLD(v *Value) bool {
}
func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool {
v_0 := v.Args[0]
// match: (SLDconst [l] (SRWconst [r] x))
// cond: mergePPC64SldiSrw(l,r) != 0
// result: (RLWINM [mergePPC64SldiSrw(l,r)] x)
for {
l := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64SRWconst {
break
}
r := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(mergePPC64SldiSrw(l, r) != 0) {
break
}
v.reset(OpPPC64RLWINM)
v.AuxInt = int64ToAuxInt(mergePPC64SldiSrw(l, r))
v.AddArg(x)
return true
}
// match: (SLDconst [c] z:(MOVBZreg x))
// cond: c < 8 && z.Uses == 1
// result: (CLRLSLDI [newPPC64ShiftAuxInt(c,56,63,64)] x)
@ -13180,6 +13464,96 @@ func rewriteValuePPC64_OpPPC64SRW(v *Value) bool {
}
return false
}
func rewriteValuePPC64_OpPPC64SRWconst(v *Value) bool {
v_0 := v.Args[0]
// match: (SRWconst (ANDconst [m] x) [s])
// cond: mergePPC64RShiftMask(m>>uint(s),s,32) == 0
// result: (MOVDconst [0])
for {
s := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64ANDconst {
break
}
m := auxIntToInt64(v_0.AuxInt)
if !(mergePPC64RShiftMask(m>>uint(s), s, 32) == 0) {
break
}
v.reset(OpPPC64MOVDconst)
v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (SRWconst (ANDconst [m] x) [s])
// cond: mergePPC64AndSrwi(m>>uint(s),s) != 0
// result: (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
for {
s := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64ANDconst {
break
}
m := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(mergePPC64AndSrwi(m>>uint(s), s) != 0) {
break
}
v.reset(OpPPC64RLWINM)
v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m>>uint(s), s))
v.AddArg(x)
return true
}
// match: (SRWconst (AND (MOVDconst [m]) x) [s])
// cond: mergePPC64RShiftMask(m>>uint(s),s,32) == 0
// result: (MOVDconst [0])
for {
s := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64AND {
break
}
_ = v_0.Args[1]
v_0_0 := v_0.Args[0]
v_0_1 := v_0.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
if v_0_0.Op != OpPPC64MOVDconst {
continue
}
m := auxIntToInt64(v_0_0.AuxInt)
if !(mergePPC64RShiftMask(m>>uint(s), s, 32) == 0) {
continue
}
v.reset(OpPPC64MOVDconst)
v.AuxInt = int64ToAuxInt(0)
return true
}
break
}
// match: (SRWconst (AND (MOVDconst [m]) x) [s])
// cond: mergePPC64AndSrwi(m>>uint(s),s) != 0
// result: (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
for {
s := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64AND {
break
}
_ = v_0.Args[1]
v_0_0 := v_0.Args[0]
v_0_1 := v_0.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
if v_0_0.Op != OpPPC64MOVDconst {
continue
}
m := auxIntToInt64(v_0_0.AuxInt)
x := v_0_1
if !(mergePPC64AndSrwi(m>>uint(s), s) != 0) {
continue
}
v.reset(OpPPC64RLWINM)
v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m>>uint(s), s))
v.AddArg(x)
return true
}
break
}
return false
}
func rewriteValuePPC64_OpPPC64SUB(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]

File diff suppressed because it is too large Load diff

View file

@ -49,6 +49,9 @@ func rewriteValueS390X(v *Value) bool {
return rewriteValueS390X_OpAtomicAdd32(v)
case OpAtomicAdd64:
return rewriteValueS390X_OpAtomicAdd64(v)
case OpAtomicAnd32:
v.Op = OpS390XLAN
return true
case OpAtomicAnd8:
return rewriteValueS390X_OpAtomicAnd8(v)
case OpAtomicCompareAndSwap32:
@ -69,6 +72,9 @@ func rewriteValueS390X(v *Value) bool {
return rewriteValueS390X_OpAtomicLoadAcq32(v)
case OpAtomicLoadPtr:
return rewriteValueS390X_OpAtomicLoadPtr(v)
case OpAtomicOr32:
v.Op = OpS390XLAO
return true
case OpAtomicOr8:
return rewriteValueS390X_OpAtomicOr8(v)
case OpAtomicStore32:
@ -5629,7 +5635,7 @@ func rewriteValueS390X_OpS390XADDWload(v *Value) bool {
}
// match: (ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
// cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
// result: (ADDWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
// result: (ADDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
for {
o1 := auxIntToInt32(v.AuxInt)
s1 := auxToSym(v.Aux)
@ -5646,7 +5652,7 @@ func rewriteValueS390X_OpS390XADDWload(v *Value) bool {
}
v.reset(OpS390XADDWload)
v.AuxInt = int32ToAuxInt(o1 + o2)
v.Aux = symToAux(mergeSymTyped(s1, s2))
v.Aux = symToAux(mergeSym(s1, s2))
v.AddArg3(x, ptr, mem)
return true
}
@ -5809,7 +5815,7 @@ func rewriteValueS390X_OpS390XADDload(v *Value) bool {
}
// match: (ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
// cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
// result: (ADDload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
// result: (ADDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
for {
o1 := auxIntToInt32(v.AuxInt)
s1 := auxToSym(v.Aux)
@ -5826,7 +5832,7 @@ func rewriteValueS390X_OpS390XADDload(v *Value) bool {
}
v.reset(OpS390XADDload)
v.AuxInt = int32ToAuxInt(o1 + o2)
v.Aux = symToAux(mergeSymTyped(s1, s2))
v.Aux = symToAux(mergeSym(s1, s2))
v.AddArg3(x, ptr, mem)
return true
}
@ -6185,7 +6191,7 @@ func rewriteValueS390X_OpS390XANDWload(v *Value) bool {
}
// match: (ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
// cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
// result: (ANDWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
// result: (ANDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
for {
o1 := auxIntToInt32(v.AuxInt)
s1 := auxToSym(v.Aux)
@ -6202,7 +6208,7 @@ func rewriteValueS390X_OpS390XANDWload(v *Value) bool {
}
v.reset(OpS390XANDWload)
v.AuxInt = int32ToAuxInt(o1 + o2)
v.Aux = symToAux(mergeSymTyped(s1, s2))
v.Aux = symToAux(mergeSym(s1, s2))
v.AddArg3(x, ptr, mem)
return true
}
@ -6310,7 +6316,7 @@ func rewriteValueS390X_OpS390XANDload(v *Value) bool {
}
// match: (ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
// cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
// result: (ANDload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
// result: (ANDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
for {
o1 := auxIntToInt32(v.AuxInt)
s1 := auxToSym(v.Aux)
@ -6327,7 +6333,7 @@ func rewriteValueS390X_OpS390XANDload(v *Value) bool {
}
v.reset(OpS390XANDload)
v.AuxInt = int32ToAuxInt(o1 + o2)
v.Aux = symToAux(mergeSymTyped(s1, s2))
v.Aux = symToAux(mergeSym(s1, s2))
v.AddArg3(x, ptr, mem)
return true
}
@ -7433,7 +7439,7 @@ func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool {
}
// match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
// result: (FMOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
// result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -7449,7 +7455,7 @@ func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool {
}
v.reset(OpS390XFMOVDload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -7483,7 +7489,7 @@ func rewriteValueS390X_OpS390XFMOVDstore(v *Value) bool {
}
// match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
// result: (FMOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
// result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -7500,7 +7506,7 @@ func rewriteValueS390X_OpS390XFMOVDstore(v *Value) bool {
}
v.reset(OpS390XFMOVDstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
@ -7550,7 +7556,7 @@ func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool {
}
// match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
// result: (FMOVSload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
// result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -7566,7 +7572,7 @@ func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool {
}
v.reset(OpS390XFMOVSload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -7600,7 +7606,7 @@ func rewriteValueS390X_OpS390XFMOVSstore(v *Value) bool {
}
// match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
// result: (FMOVSstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
// result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -7617,7 +7623,7 @@ func rewriteValueS390X_OpS390XFMOVSstore(v *Value) bool {
}
v.reset(OpS390XFMOVSstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
@ -8095,7 +8101,7 @@ func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool {
}
// match: (MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
// result: (MOVBZload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
// result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -8111,7 +8117,7 @@ func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool {
}
v.reset(OpS390XMOVBZload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -8365,7 +8371,7 @@ func rewriteValueS390X_OpS390XMOVBload(v *Value) bool {
}
// match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
// result: (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -8381,7 +8387,7 @@ func rewriteValueS390X_OpS390XMOVBload(v *Value) bool {
}
v.reset(OpS390XMOVBload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -8652,7 +8658,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool {
}
// match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
// result: (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -8669,7 +8675,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool {
}
v.reset(OpS390XMOVBstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
@ -8925,7 +8931,7 @@ func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value) bool {
}
// match: (MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
// cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)
// result: (MOVBstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
// result: (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
for {
sc := auxIntToValAndOff(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -8941,7 +8947,7 @@ func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value) bool {
}
v.reset(OpS390XMOVBstoreconst)
v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -9019,7 +9025,7 @@ func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool {
}
// match: (MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (MOVDaddridx [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
// result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -9035,13 +9041,13 @@ func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool {
}
v.reset(OpS390XMOVDaddridx)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(x, y)
return true
}
// match: (MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y))
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB
// result: (MOVDaddridx [off1+off2] {mergeSymTyped(sym1,sym2)} x y)
// result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -9057,7 +9063,7 @@ func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool {
}
v.reset(OpS390XMOVDaddridx)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(x, y)
return true
}
@ -9126,7 +9132,7 @@ func rewriteValueS390X_OpS390XMOVDload(v *Value) bool {
}
// match: (MOVDload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))
// result: (MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -9143,7 +9149,7 @@ func rewriteValueS390X_OpS390XMOVDload(v *Value) bool {
}
v.reset(OpS390XMOVDload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -9198,7 +9204,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool {
}
// match: (MOVDstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))
// result: (MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
// result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -9216,7 +9222,7 @@ func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool {
}
v.reset(OpS390XMOVDstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
@ -9329,7 +9335,7 @@ func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value) bool {
}
// match: (MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
// cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)
// result: (MOVDstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
// result: (MOVDstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
for {
sc := auxIntToValAndOff(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -9345,7 +9351,7 @@ func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value) bool {
}
v.reset(OpS390XMOVDstoreconst)
v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -9512,7 +9518,7 @@ func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool {
}
// match: (MOVHZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))
// result: (MOVHZload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
// result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -9529,7 +9535,7 @@ func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool {
}
v.reset(OpS390XMOVHZload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -9753,7 +9759,7 @@ func rewriteValueS390X_OpS390XMOVHload(v *Value) bool {
}
// match: (MOVHload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))
// result: (MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -9770,7 +9776,7 @@ func rewriteValueS390X_OpS390XMOVHload(v *Value) bool {
}
v.reset(OpS390XMOVHload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -10047,7 +10053,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool {
}
// match: (MOVHstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))
// result: (MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
// result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -10065,7 +10071,7 @@ func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool {
}
v.reset(OpS390XMOVHstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
@ -10213,7 +10219,7 @@ func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool {
}
// match: (MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
// cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)
// result: (MOVHstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
// result: (MOVHstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
for {
sc := auxIntToValAndOff(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -10229,7 +10235,7 @@ func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool {
}
v.reset(OpS390XMOVHstoreconst)
v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -10368,7 +10374,7 @@ func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool {
}
// match: (MOVWZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))
// result: (MOVWZload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
// result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -10385,7 +10391,7 @@ func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool {
}
v.reset(OpS390XMOVWZload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -10588,7 +10594,7 @@ func rewriteValueS390X_OpS390XMOVWload(v *Value) bool {
}
// match: (MOVWload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))
// result: (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem)
// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -10605,7 +10611,7 @@ func rewriteValueS390X_OpS390XMOVWload(v *Value) bool {
}
v.reset(OpS390XMOVWload)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(base, mem)
return true
}
@ -10868,7 +10874,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool {
}
// match: (MOVWstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))
// result: (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem)
// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -10886,7 +10892,7 @@ func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool {
}
v.reset(OpS390XMOVWstore)
v.AuxInt = int32ToAuxInt(off1 + off2)
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg3(base, val, mem)
return true
}
@ -11057,7 +11063,7 @@ func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool {
}
// match: (MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
// cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)
// result: (MOVWstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem)
// result: (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
for {
sc := auxIntToValAndOff(v.AuxInt)
sym1 := auxToSym(v.Aux)
@ -11073,7 +11079,7 @@ func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool {
}
v.reset(OpS390XMOVWstoreconst)
v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.Aux = symToAux(mergeSym(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
@ -11287,7 +11293,7 @@ func rewriteValueS390X_OpS390XMULLDload(v *Value) bool {
}
// match: (MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
// cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
// result: (MULLDload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
// result: (MULLDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
for {
o1 := auxIntToInt32(v.AuxInt)
s1 := auxToSym(v.Aux)
@ -11304,7 +11310,7 @@ func rewriteValueS390X_OpS390XMULLDload(v *Value) bool {
}
v.reset(OpS390XMULLDload)
v.AuxInt = int32ToAuxInt(o1 + o2)
v.Aux = symToAux(mergeSymTyped(s1, s2))
v.Aux = symToAux(mergeSym(s1, s2))
v.AddArg3(x, ptr, mem)
return true
}
@ -11490,7 +11496,7 @@ func rewriteValueS390X_OpS390XMULLWload(v *Value) bool {
}
// match: (MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
// cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
// result: (MULLWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
// result: (MULLWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
for {
o1 := auxIntToInt32(v.AuxInt)
s1 := auxToSym(v.Aux)
@ -11507,7 +11513,7 @@ func rewriteValueS390X_OpS390XMULLWload(v *Value) bool {
}
v.reset(OpS390XMULLWload)
v.AuxInt = int32ToAuxInt(o1 + o2)
v.Aux = symToAux(mergeSymTyped(s1, s2))
v.Aux = symToAux(mergeSym(s1, s2))
v.AddArg3(x, ptr, mem)
return true
}
@ -12840,7 +12846,7 @@ func rewriteValueS390X_OpS390XORWload(v *Value) bool {
}
// match: (ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
// cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
// result: (ORWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
// result: (ORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
for {
o1 := auxIntToInt32(v.AuxInt)
s1 := auxToSym(v.Aux)
@ -12857,7 +12863,7 @@ func rewriteValueS390X_OpS390XORWload(v *Value) bool {
}
v.reset(OpS390XORWload)
v.AuxInt = int32ToAuxInt(o1 + o2)
v.Aux = symToAux(mergeSymTyped(s1, s2))
v.Aux = symToAux(mergeSym(s1, s2))
v.AddArg3(x, ptr, mem)
return true
}
@ -12951,7 +12957,7 @@ func rewriteValueS390X_OpS390XORload(v *Value) bool {
}
// match: (ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
// cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
// result: (ORload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
// result: (ORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
for {
o1 := auxIntToInt32(v.AuxInt)
s1 := auxToSym(v.Aux)
@ -12968,7 +12974,7 @@ func rewriteValueS390X_OpS390XORload(v *Value) bool {
}
v.reset(OpS390XORload)
v.AuxInt = int32ToAuxInt(o1 + o2)
v.Aux = symToAux(mergeSymTyped(s1, s2))
v.Aux = symToAux(mergeSym(s1, s2))
v.AddArg3(x, ptr, mem)
return true
}
@ -14327,7 +14333,7 @@ func rewriteValueS390X_OpS390XSUBWload(v *Value) bool {
}
// match: (SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
// cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
// result: (SUBWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
// result: (SUBWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
for {
o1 := auxIntToInt32(v.AuxInt)
s1 := auxToSym(v.Aux)
@ -14344,7 +14350,7 @@ func rewriteValueS390X_OpS390XSUBWload(v *Value) bool {
}
v.reset(OpS390XSUBWload)
v.AuxInt = int32ToAuxInt(o1 + o2)
v.Aux = symToAux(mergeSymTyped(s1, s2))
v.Aux = symToAux(mergeSym(s1, s2))
v.AddArg3(x, ptr, mem)
return true
}
@ -14460,7 +14466,7 @@ func rewriteValueS390X_OpS390XSUBload(v *Value) bool {
}
// match: (SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
// cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
// result: (SUBload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
// result: (SUBload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
for {
o1 := auxIntToInt32(v.AuxInt)
s1 := auxToSym(v.Aux)
@ -14477,7 +14483,7 @@ func rewriteValueS390X_OpS390XSUBload(v *Value) bool {
}
v.reset(OpS390XSUBload)
v.AuxInt = int32ToAuxInt(o1 + o2)
v.Aux = symToAux(mergeSymTyped(s1, s2))
v.Aux = symToAux(mergeSym(s1, s2))
v.AddArg3(x, ptr, mem)
return true
}
@ -14806,7 +14812,7 @@ func rewriteValueS390X_OpS390XXORWload(v *Value) bool {
}
// match: (XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
// cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
// result: (XORWload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
// result: (XORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
for {
o1 := auxIntToInt32(v.AuxInt)
s1 := auxToSym(v.Aux)
@ -14823,7 +14829,7 @@ func rewriteValueS390X_OpS390XXORWload(v *Value) bool {
}
v.reset(OpS390XXORWload)
v.AuxInt = int32ToAuxInt(o1 + o2)
v.Aux = symToAux(mergeSymTyped(s1, s2))
v.Aux = symToAux(mergeSym(s1, s2))
v.AddArg3(x, ptr, mem)
return true
}
@ -14907,7 +14913,7 @@ func rewriteValueS390X_OpS390XXORload(v *Value) bool {
}
// match: (XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
// cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
// result: (XORload [o1+o2] {mergeSymTyped(s1, s2)} x ptr mem)
// result: (XORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
for {
o1 := auxIntToInt32(v.AuxInt)
s1 := auxToSym(v.Aux)
@ -14924,7 +14930,7 @@ func rewriteValueS390X_OpS390XXORload(v *Value) bool {
}
v.reset(OpS390XXORload)
v.AuxInt = int32ToAuxInt(o1 + o2)
v.Aux = symToAux(mergeSymTyped(s1, s2))
v.Aux = symToAux(mergeSym(s1, s2))
v.AddArg3(x, ptr, mem)
return true
}

View file

@ -36,3 +36,184 @@ func TestSubFlags(t *testing.T) {
t.Errorf("subFlags32(0,1).ult() returned false")
}
}
func TestIsPPC64WordRotateMask(t *testing.T) {
tests := []struct {
input int64
expected bool
}{
{0x00000001, true},
{0x80000001, true},
{0x80010001, false},
{0xFFFFFFFA, false},
{0xF0F0F0F0, false},
{0xFFFFFFFD, true},
{0x80000000, true},
{0x00000000, false},
{0xFFFFFFFF, true},
{0x0000FFFF, true},
{0xFF0000FF, true},
{0x00FFFF00, true},
}
for _, v := range tests {
if v.expected != isPPC64WordRotateMask(v.input) {
t.Errorf("isPPC64WordRotateMask(0x%x) failed", v.input)
}
}
}
func TestEncodeDecodePPC64WordRotateMask(t *testing.T) {
tests := []struct {
rotate int64
mask uint64
nbits,
mb,
me,
encoded int64
}{
{1, 0x00000001, 32, 31, 31, 0x20011f20},
{2, 0x80000001, 32, 31, 0, 0x20021f01},
{3, 0xFFFFFFFD, 32, 31, 29, 0x20031f1e},
{4, 0x80000000, 32, 0, 0, 0x20040001},
{5, 0xFFFFFFFF, 32, 0, 31, 0x20050020},
{6, 0x0000FFFF, 32, 16, 31, 0x20061020},
{7, 0xFF0000FF, 32, 24, 7, 0x20071808},
{8, 0x00FFFF00, 32, 8, 23, 0x20080818},
{9, 0x0000000000FFFF00, 64, 40, 55, 0x40092838},
{10, 0xFFFF000000000000, 64, 0, 15, 0x400A0010},
{10, 0xFFFF000000000001, 64, 63, 15, 0x400A3f10},
}
for i, v := range tests {
result := encodePPC64RotateMask(v.rotate, int64(v.mask), v.nbits)
if result != v.encoded {
t.Errorf("encodePPC64RotateMask(%d,0x%x,%d) = 0x%x, expected 0x%x", v.rotate, v.mask, v.nbits, result, v.encoded)
}
rotate, mb, me, mask := DecodePPC64RotateMask(result)
if rotate != v.rotate || mb != v.mb || me != v.me || mask != v.mask {
t.Errorf("DecodePPC64Failure(Test %d) got (%d, %d, %d, %x) expected (%d, %d, %d, %x)", i, rotate, mb, me, mask, v.rotate, v.mb, v.me, v.mask)
}
}
}
func TestMergePPC64ClrlsldiSrw(t *testing.T) {
tests := []struct {
clrlsldi int32
srw int64
valid bool
rotate int64
mask uint64
}{
// ((x>>4)&0xFF)<<4
{newPPC64ShiftAuxInt(4, 56, 63, 64), 4, true, 0, 0xFF0},
// ((x>>4)&0xFFFF)<<4
{newPPC64ShiftAuxInt(4, 48, 63, 64), 4, true, 0, 0xFFFF0},
// ((x>>4)&0xFFFF)<<17
{newPPC64ShiftAuxInt(17, 48, 63, 64), 4, false, 0, 0},
// ((x>>4)&0xFFFF)<<16
{newPPC64ShiftAuxInt(16, 48, 63, 64), 4, true, 12, 0xFFFF0000},
// ((x>>32)&0xFFFF)<<17
{newPPC64ShiftAuxInt(17, 48, 63, 64), 32, false, 0, 0},
}
for i, v := range tests {
result := mergePPC64ClrlsldiSrw(int64(v.clrlsldi), v.srw)
if v.valid && result == 0 {
t.Errorf("mergePPC64ClrlsldiSrw(Test %d) did not merge", i)
} else if !v.valid && result != 0 {
t.Errorf("mergePPC64ClrlsldiSrw(Test %d) should return 0", i)
} else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
t.Errorf("mergePPC64ClrlsldiSrw(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
}
}
}
func TestMergePPC64ClrlsldiRlwinm(t *testing.T) {
tests := []struct {
clrlsldi int32
rlwinm int64
valid bool
rotate int64
mask uint64
}{
// ((x<<4)&0xFF00)<<4
{newPPC64ShiftAuxInt(4, 56, 63, 64), encodePPC64RotateMask(4, 0xFF00, 32), false, 0, 0},
// ((x>>4)&0xFF)<<4
{newPPC64ShiftAuxInt(4, 56, 63, 64), encodePPC64RotateMask(28, 0x0FFFFFFF, 32), true, 0, 0xFF0},
// ((x>>4)&0xFFFF)<<4
{newPPC64ShiftAuxInt(4, 48, 63, 64), encodePPC64RotateMask(28, 0xFFFF, 32), true, 0, 0xFFFF0},
// ((x>>4)&0xFFFF)<<17
{newPPC64ShiftAuxInt(17, 48, 63, 64), encodePPC64RotateMask(28, 0xFFFF, 32), false, 0, 0},
// ((x>>4)&0xFFFF)<<16
{newPPC64ShiftAuxInt(16, 48, 63, 64), encodePPC64RotateMask(28, 0xFFFF, 32), true, 12, 0xFFFF0000},
// ((x>>4)&0xF000FFFF)<<16
{newPPC64ShiftAuxInt(16, 48, 63, 64), encodePPC64RotateMask(28, 0xF000FFFF, 32), true, 12, 0xFFFF0000},
}
for i, v := range tests {
result := mergePPC64ClrlsldiRlwinm(v.clrlsldi, v.rlwinm)
if v.valid && result == 0 {
t.Errorf("mergePPC64ClrlsldiRlwinm(Test %d) did not merge", i)
} else if !v.valid && result != 0 {
t.Errorf("mergePPC64ClrlsldiRlwinm(Test %d) should return 0", i)
} else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
t.Errorf("mergePPC64ClrlsldiRlwinm(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
}
}
}
func TestMergePPC64SldiSrw(t *testing.T) {
tests := []struct {
sld int64
srw int64
valid bool
rotate int64
mask uint64
}{
{4, 4, true, 0, 0xFFFFFFF0},
{4, 8, true, 28, 0x0FFFFFF0},
{0, 0, true, 0, 0xFFFFFFFF},
{8, 4, false, 0, 0},
{0, 32, false, 0, 0},
{0, 31, true, 1, 0x1},
{31, 31, true, 0, 0x80000000},
{32, 32, false, 0, 0},
}
for i, v := range tests {
result := mergePPC64SldiSrw(v.sld, v.srw)
if v.valid && result == 0 {
t.Errorf("mergePPC64SldiSrw(Test %d) did not merge", i)
} else if !v.valid && result != 0 {
t.Errorf("mergePPC64SldiSrw(Test %d) should return 0", i)
} else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
t.Errorf("mergePPC64SldiSrw(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
}
}
}
func TestMergePPC64AndSrwi(t *testing.T) {
tests := []struct {
and int64
srw int64
valid bool
rotate int64
mask uint64
}{
{0x000000FF, 8, true, 24, 0xFF},
{0xF00000FF, 8, true, 24, 0xFF},
{0x0F0000FF, 4, false, 0, 0},
{0x00000000, 4, false, 0, 0},
{0xF0000000, 4, false, 0, 0},
{0xF0000000, 32, false, 0, 0},
}
for i, v := range tests {
result := mergePPC64AndSrwi(v.and, v.srw)
if v.valid && result == 0 {
t.Errorf("mergePPC64AndSrwi(Test %d) did not merge", i)
} else if !v.valid && result != 0 {
t.Errorf("mergePPC64AndSrwi(Test %d) should return 0", i)
} else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
t.Errorf("mergePPC64AndSrwi(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -394,6 +394,8 @@ func rewriteValuegeneric(v *Value) bool {
return rewriteValuegeneric_OpSqrt(v)
case OpStaticCall:
return rewriteValuegeneric_OpStaticCall(v)
case OpStaticLECall:
return rewriteValuegeneric_OpStaticLECall(v)
case OpStore:
return rewriteValuegeneric_OpStore(v)
case OpStringLen:
@ -5208,6 +5210,66 @@ func rewriteValuegeneric_OpDiv64u(v *Value) bool {
return true
}
// match: (Div64u x (Const64 [c]))
// cond: c > 0 && c <= 0xFFFF && umagicOK32(int32(c)) && config.RegSize == 4 && config.useHmul
// result: (Add64 (Add64 <typ.UInt64> (Add64 <typ.UInt64> (Lsh64x64 <typ.UInt64> (ZeroExt32to64 (Div32u <typ.UInt32> (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32]))) (Const32 <typ.UInt32> [int32(c)]))) (Const64 <typ.UInt64> [32])) (ZeroExt32to64 (Div32u <typ.UInt32> (Trunc64to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(c)])))) (Mul64 <typ.UInt64> (ZeroExt32to64 <typ.UInt64> (Mod32u <typ.UInt32> (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32]))) (Const32 <typ.UInt32> [int32(c)]))) (Const64 <typ.UInt64> [int64((1<<32)/c)]))) (ZeroExt32to64 (Div32u <typ.UInt32> (Add32 <typ.UInt32> (Mod32u <typ.UInt32> (Trunc64to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(c)])) (Mul32 <typ.UInt32> (Mod32u <typ.UInt32> (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32]))) (Const32 <typ.UInt32> [int32(c)])) (Const32 <typ.UInt32> [int32((1<<32)%c)]))) (Const32 <typ.UInt32> [int32(c)]))))
for {
x := v_0
if v_1.Op != OpConst64 {
break
}
c := auxIntToInt64(v_1.AuxInt)
if !(c > 0 && c <= 0xFFFF && umagicOK32(int32(c)) && config.RegSize == 4 && config.useHmul) {
break
}
v.reset(OpAdd64)
v0 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64)
v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64)
v2 := b.NewValue0(v.Pos, OpLsh64x64, typ.UInt64)
v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4 := b.NewValue0(v.Pos, OpDiv32u, typ.UInt32)
v5 := b.NewValue0(v.Pos, OpTrunc64to32, typ.UInt32)
v6 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v7.AuxInt = int64ToAuxInt(32)
v6.AddArg2(x, v7)
v5.AddArg(v6)
v8 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v8.AuxInt = int32ToAuxInt(int32(c))
v4.AddArg2(v5, v8)
v3.AddArg(v4)
v2.AddArg2(v3, v7)
v9 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v10 := b.NewValue0(v.Pos, OpDiv32u, typ.UInt32)
v11 := b.NewValue0(v.Pos, OpTrunc64to32, typ.UInt32)
v11.AddArg(x)
v10.AddArg2(v11, v8)
v9.AddArg(v10)
v1.AddArg2(v2, v9)
v12 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
v13 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v14 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32)
v14.AddArg2(v5, v8)
v13.AddArg(v14)
v15 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v15.AuxInt = int64ToAuxInt(int64((1 << 32) / c))
v12.AddArg2(v13, v15)
v0.AddArg2(v1, v12)
v16 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v17 := b.NewValue0(v.Pos, OpDiv32u, typ.UInt32)
v18 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
v19 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32)
v19.AddArg2(v11, v8)
v20 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
v21 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v21.AuxInt = int32ToAuxInt(int32((1 << 32) % c))
v20.AddArg2(v14, v21)
v18.AddArg2(v19, v20)
v17.AddArg2(v18, v8)
v16.AddArg(v17)
v.AddArg2(v0, v16)
return true
}
// match: (Div64u x (Const64 [c]))
// cond: umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul
// result: (Rsh64Ux64 <typ.UInt64> (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<63+umagic64(c).m/2)]) x) (Const64 <typ.UInt64> [umagic64(c).s-1]))
for {
@ -20707,6 +20769,36 @@ func rewriteValuegeneric_OpSelectN(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
config := b.Func.Config
// match: (SelectN [0] (MakeResult a ___))
// result: a
for {
if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpMakeResult || len(v_0.Args) < 1 {
break
}
a := v_0.Args[0]
v.copyOf(a)
return true
}
// match: (SelectN [1] (MakeResult a b ___))
// result: b
for {
if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpMakeResult || len(v_0.Args) < 2 {
break
}
b := v_0.Args[1]
v.copyOf(b)
return true
}
// match: (SelectN [2] (MakeResult a b c ___))
// result: c
for {
if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpMakeResult || len(v_0.Args) < 3 {
break
}
c := v_0.Args[2]
v.copyOf(c)
return true
}
// match: (SelectN [0] call:(StaticLECall {sym} dst src (Const64 [sz]) mem))
// cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && dst.Type.IsPtr() && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)
// result: (Move {dst.Type.Elem()} [int64(sz)] dst src mem)
@ -21307,6 +21399,44 @@ func rewriteValuegeneric_OpStaticCall(v *Value) bool {
}
return false
}
func rewriteValuegeneric_OpStaticLECall(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [1]) mem)
// cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon)
// result: (MakeResult (Eq8 (Load <typ.Int8> sptr mem) (Const8 <typ.Int8> [int8(read8(scon,0))])) mem)
for {
if len(v.Args) != 4 {
break
}
callAux := auxToCall(v.Aux)
mem := v.Args[3]
sptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAddr {
break
}
scon := auxToSym(v_1.Aux)
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpSB {
break
}
v_2 := v.Args[2]
if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 1 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon)) {
break
}
v.reset(OpMakeResult)
v0 := b.NewValue0(v.Pos, OpEq8, typ.Bool)
v1 := b.NewValue0(v.Pos, OpLoad, typ.Int8)
v1.AddArg2(sptr, mem)
v2 := b.NewValue0(v.Pos, OpConst8, typ.Int8)
v2.AuxInt = int8ToAuxInt(int8(read8(scon, 0)))
v0.AddArg2(v1, v2)
v.AddArg2(v0, mem)
return true
}
return false
}
func rewriteValuegeneric_OpStore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]

View file

@ -153,6 +153,9 @@ func (s *stackAllocState) stackalloc() {
if v.Op != OpArg {
continue
}
if v.Aux == nil {
f.Fatalf("%s has nil Aux\n", v.LongString())
}
loc := LocalSlot{N: v.Aux.(GCNode), Type: v.Type, Off: v.AuxInt}
if f.pass.debug > stackDebug {
fmt.Printf("stackalloc %s to %s\n", v, loc)

View file

@ -44,10 +44,85 @@ func BenchmarkDivisibleWDivconstI64(b *testing.B) {
var u64res uint64
func BenchmarkDivconstU64(b *testing.B) {
for i := 0; i < b.N; i++ {
u64res = uint64(i) / 7
func TestDivmodConstU64(t *testing.T) {
// Test division by c. Function f must be func(n) { return n/c, n%c }
testdiv := func(c uint64, f func(uint64) (uint64, uint64)) func(*testing.T) {
return func(t *testing.T) {
x := uint64(12345)
for i := 0; i < 10000; i++ {
x += x << 2
q, r := f(x)
if r < 0 || r >= c || q*c+r != x {
t.Errorf("divmod(%d, %d) returned incorrect (%d, %d)", x, c, q, r)
}
}
max := uint64(1<<64-1) / c * c
xs := []uint64{0, 1, c - 1, c, c + 1, 2*c - 1, 2 * c, 2*c + 1,
c*c - 1, c * c, c*c + 1, max - 1, max, max + 1, 1<<64 - 1}
for _, x := range xs {
q, r := f(x)
if r < 0 || r >= c || q*c+r != x {
t.Errorf("divmod(%d, %d) returned incorrect (%d, %d)", x, c, q, r)
}
}
}
}
t.Run("2", testdiv(2, func(n uint64) (uint64, uint64) { return n / 2, n % 2 }))
t.Run("3", testdiv(3, func(n uint64) (uint64, uint64) { return n / 3, n % 3 }))
t.Run("4", testdiv(4, func(n uint64) (uint64, uint64) { return n / 4, n % 4 }))
t.Run("5", testdiv(5, func(n uint64) (uint64, uint64) { return n / 5, n % 5 }))
t.Run("6", testdiv(6, func(n uint64) (uint64, uint64) { return n / 6, n % 6 }))
t.Run("7", testdiv(7, func(n uint64) (uint64, uint64) { return n / 7, n % 7 }))
t.Run("8", testdiv(8, func(n uint64) (uint64, uint64) { return n / 8, n % 8 }))
t.Run("9", testdiv(9, func(n uint64) (uint64, uint64) { return n / 9, n % 9 }))
t.Run("10", testdiv(10, func(n uint64) (uint64, uint64) { return n / 10, n % 10 }))
t.Run("11", testdiv(11, func(n uint64) (uint64, uint64) { return n / 11, n % 11 }))
t.Run("12", testdiv(12, func(n uint64) (uint64, uint64) { return n / 12, n % 12 }))
t.Run("13", testdiv(13, func(n uint64) (uint64, uint64) { return n / 13, n % 13 }))
t.Run("14", testdiv(14, func(n uint64) (uint64, uint64) { return n / 14, n % 14 }))
t.Run("15", testdiv(15, func(n uint64) (uint64, uint64) { return n / 15, n % 15 }))
t.Run("16", testdiv(16, func(n uint64) (uint64, uint64) { return n / 16, n % 16 }))
t.Run("17", testdiv(17, func(n uint64) (uint64, uint64) { return n / 17, n % 17 }))
t.Run("255", testdiv(255, func(n uint64) (uint64, uint64) { return n / 255, n % 255 }))
t.Run("256", testdiv(256, func(n uint64) (uint64, uint64) { return n / 256, n % 256 }))
t.Run("257", testdiv(257, func(n uint64) (uint64, uint64) { return n / 257, n % 257 }))
t.Run("65535", testdiv(65535, func(n uint64) (uint64, uint64) { return n / 65535, n % 65535 }))
t.Run("65536", testdiv(65536, func(n uint64) (uint64, uint64) { return n / 65536, n % 65536 }))
t.Run("65537", testdiv(65537, func(n uint64) (uint64, uint64) { return n / 65537, n % 65537 }))
t.Run("1<<32-1", testdiv(1<<32-1, func(n uint64) (uint64, uint64) { return n / (1<<32 - 1), n % (1<<32 - 1) }))
t.Run("1<<32+1", testdiv(1<<32+1, func(n uint64) (uint64, uint64) { return n / (1<<32 + 1), n % (1<<32 + 1) }))
t.Run("1<<64-1", testdiv(1<<64-1, func(n uint64) (uint64, uint64) { return n / (1<<64 - 1), n % (1<<64 - 1) }))
}
func BenchmarkDivconstU64(b *testing.B) {
b.Run("3", func(b *testing.B) {
x := uint64(123456789123456789)
for i := 0; i < b.N; i++ {
x += x << 4
u64res = uint64(x) / 3
}
})
b.Run("5", func(b *testing.B) {
x := uint64(123456789123456789)
for i := 0; i < b.N; i++ {
x += x << 4
u64res = uint64(x) / 5
}
})
b.Run("37", func(b *testing.B) {
x := uint64(123456789123456789)
for i := 0; i < b.N; i++ {
x += x << 4
u64res = uint64(x) / 37
}
})
b.Run("1234567", func(b *testing.B) {
x := uint64(123456789123456789)
for i := 0; i < b.N; i++ {
x += x << 4
u64res = uint64(x) / 1234567
}
})
}
func BenchmarkModconstU64(b *testing.B) {

View file

@ -42,10 +42,11 @@ func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
// loadByType returns the load instruction of the given type.
func loadByType(t *types.Type) obj.As {
// Avoid partial register write
if !t.IsFloat() && t.Size() <= 2 {
if t.Size() == 1 {
if !t.IsFloat() {
switch t.Size() {
case 1:
return x86.AMOVBLZX
} else {
case 2:
return x86.AMOVWLZX
}
}

View file

@ -1549,7 +1549,7 @@ var cgoEnabled = map[string]bool{
"linux/mipsle": true,
"linux/mips64": true,
"linux/mips64le": true,
"linux/riscv64": false, // Issue 36641
"linux/riscv64": true,
"linux/s390x": true,
"linux/sparc64": true,
"android/386": true,
@ -1567,6 +1567,7 @@ var cgoEnabled = map[string]bool{
"openbsd/amd64": true,
"openbsd/arm": true,
"openbsd/arm64": true,
"openbsd/mips64": false,
"plan9/386": false,
"plan9/amd64": false,
"plan9/arm": false,
@ -1580,6 +1581,7 @@ var cgoEnabled = map[string]bool{
// filtered out of cgoEnabled for 'dist list'. See golang.org/issue/28944
var incomplete = map[string]bool{
"linux/sparc64": true,
"openbsd/mips64": true,
}
func needCC() bool {

31
src/cmd/dist/test.go vendored
View file

@ -217,6 +217,9 @@ func (t *tester) run() {
fmt.Println("\nFAILED")
xexit(1)
} else if incomplete[goos+"/"+goarch] {
// The test succeeded, but consider it as failed so we don't
// forget to remove the port from the incomplete map once the
// port is complete.
fmt.Println("\nFAILED (incomplete port)")
xexit(1)
} else if t.partial {
@ -463,13 +466,14 @@ func (t *tester) registerTests() {
})
}
// Test the ios build tag on darwin/amd64 for the iOS simulator.
if goos == "darwin" && goarch == "amd64" {
// Test ios/amd64 for the iOS simulator.
if goos == "darwin" && goarch == "amd64" && t.cgoEnabled {
t.tests = append(t.tests, distTest{
name: "amd64ios",
heading: "ios tag on darwin/amd64",
heading: "GOOS=ios on darwin/amd64",
fn: func(dt *distTest) error {
t.addCmd(dt, "src", t.goTest(), t.timeout(300), "-tags=ios", "-run=SystemRoots", "crypto/x509")
cmd := t.addCmd(dt, "src", t.goTest(), t.timeout(300), "-run=SystemRoots", "crypto/x509")
cmd.Env = append(os.Environ(), "GOOS=ios", "CGO_ENABLED=1")
return nil
},
})
@ -949,7 +953,7 @@ func (t *tester) internalLink() bool {
// Internally linking cgo is incomplete on some architectures.
// https://golang.org/issue/10373
// https://golang.org/issue/14449
if goarch == "mips64" || goarch == "mips64le" || goarch == "mips" || goarch == "mipsle" {
if goarch == "mips64" || goarch == "mips64le" || goarch == "mips" || goarch == "mipsle" || goarch == "riscv64" {
return false
}
if goos == "aix" {
@ -1078,7 +1082,12 @@ func (t *tester) cgoTest(dt *distTest) error {
cmd := t.addCmd(dt, "misc/cgo/test", t.goTest())
cmd.Env = append(os.Environ(), "GOFLAGS=-ldflags=-linkmode=auto")
if t.internalLink() {
// Skip internal linking cases on arm64 to support GCC-9.4 and above,
// only for linux, conservatively.
// See issue #39466.
skipInternalLink := goarch == "arm64" && goos == "linux"
if t.internalLink() && !skipInternalLink {
cmd := t.addCmd(dt, "misc/cgo/test", t.goTest(), "-tags=internal")
cmd.Env = append(os.Environ(), "GOFLAGS=-ldflags=-linkmode=internal")
}
@ -1108,8 +1117,8 @@ func (t *tester) cgoTest(dt *distTest) error {
"android-arm", "android-arm64",
"dragonfly-amd64",
"freebsd-386", "freebsd-amd64", "freebsd-arm",
"linux-386", "linux-amd64", "linux-arm", "linux-ppc64le", "linux-s390x",
"netbsd-386", "netbsd-amd64", "linux-arm64":
"linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-riscv64", "linux-s390x",
"netbsd-386", "netbsd-amd64":
cmd := t.addCmd(dt, "misc/cgo/test", t.goTest())
cmd.Env = append(os.Environ(), "GOFLAGS=-ldflags=-linkmode=external")
@ -1154,7 +1163,7 @@ func (t *tester) cgoTest(dt *distTest) error {
if t.supportedBuildmode("pie") {
t.addCmd(dt, "misc/cgo/test", t.goTest(), "-buildmode=pie")
if t.internalLink() && t.internalLinkPIE() {
if t.internalLink() && t.internalLinkPIE() && !skipInternalLink {
t.addCmd(dt, "misc/cgo/test", t.goTest(), "-buildmode=pie", "-ldflags=-linkmode=internal", "-tags=internal,internal_pie")
}
t.addCmd(dt, "misc/cgo/testtls", t.goTest(), "-buildmode=pie")
@ -1610,7 +1619,9 @@ func raceDetectorSupported(goos, goarch string) bool {
switch goos {
case "linux":
return goarch == "amd64" || goarch == "ppc64le" || goarch == "arm64"
case "darwin", "freebsd", "netbsd", "windows":
case "darwin":
return goarch == "amd64" || goarch == "arm64"
case "freebsd", "netbsd", "windows":
return goarch == "amd64"
default:
return false

Some files were not shown because too many files have changed in this diff Show more