[dev.boringcrypto] all: merge master into dev.boringcrypto

Change-Id: I218ba1b89a2df6e4335c6a5846889d9a04affe5d
This commit is contained in:
Filippo Valsorda 2018-10-15 17:09:34 -04:00
commit 623650b27a
545 changed files with 38459 additions and 24309 deletions

View file

@ -3,6 +3,7 @@ pkg math/big, const MaxBase = 36
pkg math/big, type Word uintptr
pkg net, func ListenUnixgram(string, *UnixAddr) (*UDPConn, error)
pkg os, const ModeType = 2399141888
pkg os, const ModeType = 2399666176
pkg os (linux-arm), const O_SYNC = 4096
pkg os (linux-arm-cgo), const O_SYNC = 4096
pkg syscall (darwin-386), const ImplementsGetwd = false

View file

@ -34,6 +34,7 @@ We encourage all Go users to subscribe to
<p>A <a href="/doc/devel/release.html">summary</a> of the changes between Go releases. Notes for the major releases:</p>
<ul>
<li><a href="/doc/go1.11">Go 1.11</a> <small>(August 2018)</small></li>
<li><a href="/doc/go1.10">Go 1.10</a> <small>(February 2018)</small></li>
<li><a href="/doc/go1.9">Go 1.9</a> <small>(August 2017)</small></li>
<li><a href="/doc/go1.8">Go 1.8</a> <small>(February 2017)</small></li>
@ -59,6 +60,15 @@ Go 1 matures.
<h3 id="source"><a href="https://golang.org/change">Source Code</a></h3>
<p>Check out the Go source code.</p>
<h3 id="discuss"><a href="//groups.google.com/group/golang-nuts">Discussion Mailing List</a></h3>
<p>
A mailing list for general discussion of Go programming.
</p>
<p>
Questions about using Go or announcements relevant to other Go users should be sent to
<a href="//groups.google.com/group/golang-nuts">golang-nuts</a>.
</p>
<h3 id="golang-dev"><a href="https://groups.google.com/group/golang-dev">Developer</a> and
<a href="https://groups.google.com/group/golang-codereviews">Code Review Mailing List</a></h3>
<p>The <a href="https://groups.google.com/group/golang-dev">golang-dev</a>
@ -66,9 +76,6 @@ mailing list is for discussing code changes to the Go project.
The <a href="https://groups.google.com/group/golang-codereviews">golang-codereviews</a>
mailing list is for actual reviewing of the code changes (CLs).</p>
<p>For general discussion of Go programming, see <a
href="https://groups.google.com/group/golang-nuts">golang-nuts</a>.</p>
<h3 id="golang-checkins"><a href="https://groups.google.com/group/golang-checkins">Checkins Mailing List</a></h3>
<p>A mailing list that receives a message summarizing each checkin to the Go repository.</p>
@ -116,7 +123,7 @@ To get started, read these <a href="/doc/contribute.html">contribution
guidelines</a> for information on design, testing, and our code review process.
</p>
<p>
Check <a href="//golang.org/issue">the tracker</a> for
Check <a href="//golang.org/issue">the tracker</a> for
open issues that interest you. Those labeled
<a href="https://github.com/golang/go/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22">help wanted</a>
are particularly in need of outside help.

View file

@ -179,7 +179,15 @@ from it.</li>
<code>"fmt.Print"</code> as an unstructured literal with a <code>"."</code>
that needs to be quoted. It objects even more strongly to method names of
the form <code>pkg.(*MyType).Meth</code>.
<li>All global variables are lumped into package <code>"main"</code>.</li>
<li>As of Go 1.11, debug information is compressed by default.
Older versions of gdb, such as the one available by default on MacOS,
do not understand the compression.
You can generate uncompressed debug information by using <code>go
build -ldflags=-compressdwarf=false</code>.
(For convenience you can put the <code>-ldflags</code> option in
the <a href="/cmd/go/#hdr-Environment_variables"><code>GOFLAGS</code>
environment variable</a> so that you don't have to specify it each time.)
</li>
</ol>
<h2 id="Tutorial">Tutorial</h2>

View file

@ -30,6 +30,17 @@ Go 1.11 is a major release of Go.
Read the <a href="/doc/go1.11">Go 1.11 Release Notes</a> for more information.
</p>
<h3 id="go1.11.minor">Minor revisions</h3>
<p>
go1.11.1 (released 2018/10/01) includes fixes to the compiler, documentation, go
command, runtime, and the <code>crypto/x509</code>, <code>encoding/json</code>,
<code>go/types</code>, <code>net</code>, <code>net/http</code>, and
<code>reflect</code> packages.
See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.11.1">Go
1.11.1 milestone</a> on our issue tracker for details.
</p>
<h2 id="go1.10">go1.10 (released 2018/02/16)</h2>
<p>

View file

@ -348,6 +348,20 @@ updating. See the <a href="go1.10.html#cgo">Go 1.10 release notes</a> for
details. <!-- CL 126275, CL 127156, CL 122217, CL 122575, CL 123177 -->
</p>
<h3 id="go_command">Go command</h3>
<p><!-- CL 126656 -->
The environment variable <code>GOFLAGS</code> may now be used
to set default flags for the <code>go</code> command.
This is useful in certain situations.
Linking can be noticeably slower on underpowered systems due to DWARF,
and users may want to set <code>-ldflags=-w</code> by default.
For modules, some users and CI systems will want vendoring always,
so they should set <code>-mod=vendor</code> by default.
For more information, see the <a href="/cmd/go/#hdr-Environment_variables"><code>go</code>
command documentation</a>.
</p>
<h3 id="godoc">Godoc</h3>
<p>

View file

@ -108,6 +108,26 @@ by Renée at Gophercon in 2016.
He has unique features; he's the <em>Go gopher</em>, not just any old gopher.
</p>
<h3 id="go_or_golang">
Is the language called Go or Golang?</h3>
<p>
The language is called Go.
The "golang" moniker arose because the web site is
<a href="https://golang.org">golang.org</a>, not
go.org, which was not available to us.
Many use the golang name, though, and it is handy as
a label.
For instance, the Twitter tag for the language is "#golang".
The language's name is just plain Go, regardless.
</p>
<p>
A side note: Although the
<a href="https://blog.golang.org/go-brand">official logo</a>
has two capital letters, the language name is written Go, not GO.
</p>
<h3 id="creating_a_new_language">
Why did you create a new language?</h3>
@ -2438,7 +2458,7 @@ in networked servers.
Work continues to refine the algorithm, reduce overhead and
latency further, and to explore new approaches.
The 2018
<a href="https://talks.golang.org/2018/ismmkeynote">ISMM keynote</a>
<a href="https://blog.golang.org/ismmkeynote">ISMM keynote</a>
by Rick Hudson of the Go team
describes the progress so far and suggests some future approaches.
</p>

View file

@ -1,6 +1,6 @@
<!--{
"Title": "The Go Programming Language Specification",
"Subtitle": "Version of August 30, 2018",
"Subtitle": "Version of September 24, 2018",
"Path": "/ref/spec"
}-->
@ -5546,7 +5546,10 @@ executes, the function value and parameters to the call are
and saved anew but the actual function is not invoked.
Instead, deferred functions are invoked immediately before
the surrounding function returns, in the reverse order
they were deferred.
they were deferred. That is, if the surrounding function
returns through an explicit <a href="#Return_statements">return statement</a>,
deferred functions are executed <i>after</i> any result parameters are set
by that return statement but <i>before</i> the function returns to its caller.
If a deferred function value evaluates
to <code>nil</code>, execution <a href="#Handling_panics">panics</a>
when the function is invoked, not when the "defer" statement is executed.
@ -5572,12 +5575,13 @@ for i := 0; i &lt;= 3; i++ {
defer fmt.Print(i)
}
// f returns 1
// f returns 42
func f() (result int) {
defer func() {
result++
// result is accessed after it was set to 6 by the return statement
result *= 7
}()
return 0
return 6
}
</pre>

View file

@ -177,6 +177,8 @@ go src=..
strconv
testdata
+
testdata
+
text
template
testdata

View file

@ -12,6 +12,11 @@ license that can be found in the LICENSE file.
</head>
<body>
<!--
Add the following polyfill for Microsoft Edge 17/18 support:
<script src="https://cdn.jsdelivr.net/npm/text-encoding@0.7.0/lib/encoding.min.js"></script>
(see https://caniuse.com/#feat=textencoder)
-->
<script src="wasm_exec.js"></script>
<script>
if (!WebAssembly.instantiateStreaming) { // polyfill

View file

@ -152,6 +152,9 @@ func delete(m map[Type]Type1, key Type)
// String: the number of bytes in v.
// Channel: the number of elements queued (unread) in the channel buffer;
// if v is nil, len(v) is zero.
// For some arguments, such as a string literal or a simple array expression, the
// result can be a constant. See the Go language specification's "Length and
// capacity" section for details.
func len(v Type) int
// The cap built-in function returns the capacity of v, according to its type:
@ -161,6 +164,9 @@ func len(v Type) int
// if v is nil, cap(v) is zero.
// Channel: the channel buffer capacity, in units of elements;
// if v is nil, cap(v) is zero.
// For some arguments, such as a simple array expression, the result can be a
// constant. See the Go language specification's "Length and capacity" section for
// details.
func cap(v Type) int
// The make built-in function allocates and initializes an object of type

View file

@ -12,13 +12,15 @@ import (
"unicode/utf8"
)
// smallBufferSize is an initial allocation minimal capacity.
const smallBufferSize = 64
// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
// The zero value for Buffer is an empty buffer ready to use.
type Buffer struct {
buf []byte // contents are the bytes buf[off : len(buf)]
off int // read at &buf[off], write at &buf[len(buf)]
bootstrap [64]byte // memory to hold first slice; helps small buffers avoid allocation.
lastRead readOp // last read operation, so that Unread* can work correctly.
buf []byte // contents are the bytes buf[off : len(buf)]
off int // read at &buf[off], write at &buf[len(buf)]
lastRead readOp // last read operation, so that Unread* can work correctly.
// FIXME: it would be advisable to align Buffer to cachelines to avoid false
// sharing.
@ -125,9 +127,8 @@ func (b *Buffer) grow(n int) int {
if i, ok := b.tryGrowByReslice(n); ok {
return i
}
// Check if we can make use of bootstrap array.
if b.buf == nil && n <= len(b.bootstrap) {
b.buf = b.bootstrap[:n]
if b.buf == nil && n <= smallBufferSize {
b.buf = make([]byte, n, smallBufferSize)
return 0
}
c := cap(b.buf)

View file

@ -774,6 +774,15 @@ func Replace(s, old, new []byte, n int) []byte {
return t[0:w]
}
// ReplaceAll returns a copy of the slice s with all
// non-overlapping instances of old replaced by new.
// If old is empty, it matches at the beginning of the slice
// and after each UTF-8 sequence, yielding up to k+1 replacements
// for a k-rune slice.
func ReplaceAll(s, old, new []byte) []byte {
return Replace(s, old, new, -1)
}
// EqualFold reports whether s and t, interpreted as UTF-8 strings,
// are equal under Unicode case-folding.
func EqualFold(s, t []byte) bool {
@ -849,21 +858,22 @@ func Index(s, sep []byte) int {
if len(s) <= bytealg.MaxBruteForce {
return bytealg.Index(s, sep)
}
c := sep[0]
c0 := sep[0]
c1 := sep[1]
i := 0
t := s[:len(s)-n+1]
t := len(s) - n + 1
fails := 0
for i < len(t) {
if t[i] != c {
for i < t {
if s[i] != c0 {
// IndexByte is faster than bytealg.Index, so use it as long as
// we're not getting lots of false positives.
o := IndexByte(t[i:], c)
o := IndexByte(s[i:t], c0)
if o < 0 {
return -1
}
i += o
}
if Equal(s[i:i+n], sep) {
if s[i+1] == c1 && Equal(s[i:i+n], sep) {
return i
}
fails++
@ -879,24 +889,25 @@ func Index(s, sep []byte) int {
}
return -1
}
c := sep[0]
c0 := sep[0]
c1 := sep[1]
i := 0
fails := 0
t := s[:len(s)-n+1]
for i < len(t) {
if t[i] != c {
o := IndexByte(t[i:], c)
t := len(s) - n + 1
for i < t {
if s[i] != c0 {
o := IndexByte(s[i:t], c0)
if o < 0 {
break
}
i += o
}
if Equal(s[i:i+n], sep) {
if s[i+1] == c1 && Equal(s[i:i+n], sep) {
return i
}
i++
fails++
if fails >= 4+i>>4 && i < len(t) {
if fails >= 4+i>>4 && i < t {
// Give up on IndexByte, it isn't skipping ahead
// far enough to be better than Rabin-Karp.
// Experiments (using IndexPeriodic) suggest

View file

@ -1362,6 +1362,12 @@ func TestReplace(t *testing.T) {
if cap(in) == cap(out) && &in[:1][0] == &out[:1][0] {
t.Errorf("Replace(%q, %q, %q, %d) didn't copy", tt.in, tt.old, tt.new, tt.n)
}
if tt.n == -1 {
out := ReplaceAll(in, []byte(tt.old), []byte(tt.new))
if s := string(out); s != tt.out {
t.Errorf("ReplaceAll(%q, %q, %q) = %q, want %q", tt.in, tt.old, tt.new, s, tt.out)
}
}
}
}

View file

@ -70,7 +70,7 @@ label:
// LTYPEM spec6 { outcode(int($1), &$2); }
MOVL AX, BX
MOVL $4, BX
// LTYPEI spec7 { outcode(int($1), &$2); }
IMULL AX
IMULL $4, CX

View file

@ -654,6 +654,7 @@ again:
CALL foo(SB)
// LDP/STP
LDP (R0), (R0, R1) // 000440a9
LDP (R0), (R1, R2) // 010840a9
LDP 8(R0), (R1, R2) // 018840a9
LDP -8(R0), (R1, R2) // 01887fa9

View file

@ -188,8 +188,8 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8
MOVBU (R18)(R14<<0), R23 // 577a6e38
MOVBU (R2)(R8.SXTX), R19 // 53e86838
MOVBU (R27)(R23), R14 // MOVBU (R27)(R23*1), R14 // 6e6b7738
MOVHU.P 107(R13), R13 // adb54678
MOVHU.W 192(R2), R2 // 420c4c78
MOVHU.P 107(R14), R13 // cdb54678
MOVHU.W 192(R3), R2 // 620c4c78
MOVHU 6844(R4), R18 // 92787579
MOVHU (R5)(R25.SXTW), R15 // afc87978
//TODO MOVBW.P 77(R18), R11 // 4bd6c438

View file

@ -8,7 +8,19 @@ TEXT errors(SB),$0
ADDSW R7->32, R14, R13 // ERROR "shift amount out of range 0 to 31"
ADD R1.UXTB<<5, R2, R3 // ERROR "shift amount out of range 0 to 4"
ADDS R1.UXTX<<7, R2, R3 // ERROR "shift amount out of range 0 to 4"
AND $0x22220000, R2, RSP // ERROR "illegal combination"
ANDS $0x22220000, R2, RSP // ERROR "illegal combination"
ADD R1, R2, R3, R4 // ERROR "illegal combination"
BICW R7@>33, R5, R16 // ERROR "shift amount out of range 0 to 31"
CINC CS, R2, R3, R4 // ERROR "illegal combination"
CSEL LT, R1, R2 // ERROR "illegal combination"
LDP.P 8(R2), (R2, R3) // ERROR "constrained unpredictable behavior"
LDP.W 8(R3), (R2, R3) // ERROR "constrained unpredictable behavior"
LDP (R1), (R2, R2) // ERROR "constrained unpredictable behavior"
LDP (R0), (F0, F1) // ERROR "invalid register pair"
LDP (R0), (R3, ZR) // ERROR "invalid register pair"
LDXPW (RSP), (R2, R2) // ERROR "constrained unpredictable behavior"
LDAXPW (R5), (R2, R2) // ERROR "constrained unpredictable behavior"
MOVD.P 300(R2), R3 // ERROR "offset out of range [-255,254]"
MOVD.P R3, 344(R2) // ERROR "offset out of range [-255,254]"
MOVD (R3)(R7.SXTX<<2), R8 // ERROR "invalid index shift amount"
@ -16,6 +28,17 @@ TEXT errors(SB),$0
MOVWU (R5)(R4<<1), R10 // ERROR "invalid index shift amount"
MOVB (R5)(R4.SXTW<<5), R10 // ERROR "invalid index shift amount"
MOVH R5, (R6)(R2<<3) // ERROR "invalid index shift amount"
MADD R1, R2, R3 // ERROR "illegal combination"
MOVD.P R1, 8(R1) // ERROR "constrained unpredictable behavior"
MOVD.W 16(R2), R2 // ERROR "constrained unpredictable behavior"
STP (F2, F3), (R0) // ERROR "invalid register pair"
STP.W (R1, R2), 8(R1) // ERROR "constrained unpredictable behavior"
STP.P (R1, R2), 8(R2) // ERROR "constrained unpredictable behavior"
STLXP (R6, R11), (RSP), R6 // ERROR "constrained unpredictable behavior"
STXP (R6, R11), (R2), R2 // ERROR "constrained unpredictable behavior"
STLXR R3, (RSP), R3 // ERROR "constrained unpredictable behavior"
STXR R3, (R4), R4 // ERROR "constrained unpredictable behavior"
STLXRB R2, (R5), R5 // ERROR "constrained unpredictable behavior"
VLD1 (R8)(R13), [V2.B16] // ERROR "illegal combination"
VLD1 8(R9), [V2.B16] // ERROR "illegal combination"
VST1 [V1.B16], (R8)(R13) // ERROR "illegal combination"
@ -83,15 +106,10 @@ TEXT errors(SB),$0
VST1.P [V1.B16], (R8)(R9<<1) // ERROR "invalid extended register"
VREV64 V1.H4, V2.H8 // ERROR "invalid arrangement"
VREV64 V1.D1, V2.D1 // ERROR "invalid arrangement"
ADD R1, R2, R3, R4 // ERROR "illegal combination"
MADD R1, R2, R3 // ERROR "illegal combination"
CINC CS, R2, R3, R4 // ERROR "illegal combination"
CSEL LT, R1, R2 // ERROR "illegal combination"
AND $0x22220000, R2, RSP // ERROR "illegal combination"
ANDS $0x22220000, R2, RSP // ERROR "illegal combination"
LDP (R0), (F0, F1) // ERROR "invalid register pair"
LDP (R0), (R3, ZR) // ERROR "invalid register pair"
STP (F2, F3), (R0) // ERROR "invalid register pair"
FLDPD (R0), (R1, R2) // ERROR "invalid register pair"
FLDPD (R1), (F2, F2) // ERROR "constrained unpredictable behavior"
FLDPS (R2), (F3, F3) // ERROR "constrained unpredictable behavior"
FSTPD (R1, R2), (R0) // ERROR "invalid register pair"
FMOVS (F2), F0 // ERROR "illegal combination"
FMOVD F0, (F1) // ERROR "illegal combination"
RET

View file

@ -550,7 +550,7 @@ label1:
// ftsqrt BF, FRB
FTSQRT F2,$7
// FCFID
// FCFID
// FCFIDS
FCFID F2,F3

View file

@ -700,6 +700,7 @@ var knownFormats = map[string]string{
"int8 %x": "",
"interface{} %#v": "",
"interface{} %T": "",
"interface{} %p": "",
"interface{} %q": "",
"interface{} %s": "",
"interface{} %v": "",

View file

@ -583,7 +583,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_CONST
p.To.Offset = v.AuxInt
case ssa.OpAMD64BTLconst, ssa.OpAMD64BTQconst:
case ssa.OpAMD64BTLconst, ssa.OpAMD64BTQconst,
ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst,
ssa.OpAMD64BTSLconst, ssa.OpAMD64BTSQconst,
ssa.OpAMD64BTCLconst, ssa.OpAMD64BTCQconst,
ssa.OpAMD64BTRLconst, ssa.OpAMD64BTRQconst:
op := v.Op
if op == ssa.OpAMD64BTQconst && v.AuxInt < 32 {
// Emit 32-bit version because it's shorter
@ -594,15 +598,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[0].Reg()
case ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst,
ssa.OpAMD64BTSLconst, ssa.OpAMD64BTSQconst,
ssa.OpAMD64BTCLconst, ssa.OpAMD64BTCQconst,
ssa.OpAMD64BTRLconst, ssa.OpAMD64BTRQconst:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[0].Reg()
case ssa.OpAMD64CMPQload, ssa.OpAMD64CMPLload, ssa.OpAMD64CMPWload, ssa.OpAMD64CMPBload:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
@ -700,6 +695,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore,
ssa.OpAMD64BTCQmodify, ssa.OpAMD64BTCLmodify, ssa.OpAMD64BTRQmodify, ssa.OpAMD64BTRLmodify, ssa.OpAMD64BTSQmodify, ssa.OpAMD64BTSLmodify,
ssa.OpAMD64ADDQmodify, ssa.OpAMD64SUBQmodify, ssa.OpAMD64ANDQmodify, ssa.OpAMD64ORQmodify, ssa.OpAMD64XORQmodify,
ssa.OpAMD64ADDLmodify, ssa.OpAMD64SUBLmodify, ssa.OpAMD64ANDLmodify, ssa.OpAMD64ORLmodify, ssa.OpAMD64XORLmodify:
p := s.Prog(v.Op.Asm())
@ -764,16 +760,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, off)
} else {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = val
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, off)
break
}
fallthrough
case ssa.OpAMD64ANDQconstmodify, ssa.OpAMD64ANDLconstmodify, ssa.OpAMD64ORQconstmodify, ssa.OpAMD64ORLconstmodify,
ssa.OpAMD64XORQconstmodify, ssa.OpAMD64XORLconstmodify:
ssa.OpAMD64BTCQconstmodify, ssa.OpAMD64BTCLconstmodify, ssa.OpAMD64BTSQconstmodify, ssa.OpAMD64BTSLconstmodify,
ssa.OpAMD64BTRQconstmodify, ssa.OpAMD64BTRLconstmodify, ssa.OpAMD64XORQconstmodify, ssa.OpAMD64XORLconstmodify:
sc := v.AuxValAndOff()
off := sc.Off()
val := sc.Val()

View file

@ -7,6 +7,7 @@ package arm
import (
"fmt"
"math"
"math/bits"
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
@ -119,6 +120,28 @@ func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *
return p
}
// find a (lsb, width) pair for BFC
// lsb must be in [0, 31], width must be in [1, 32 - lsb]
// return (0xffffffff, 0) if v is not a binary like 0...01...10...0
func getBFC(v uint32) (uint32, uint32) {
var m, l uint32
// BFC is not applicable with zero
if v == 0 {
return 0xffffffff, 0
}
// find the lowest set bit, for example l=2 for 0x3ffffffc
l = uint32(bits.TrailingZeros32(v))
// m-1 represents the highest set bit index, for example m=30 for 0x3ffffffc
m = 32 - uint32(bits.LeadingZeros32(v))
// check if v is a binary like 0...01...10...0
if (1<<m)-(1<<l) == v {
// it must be m > l for non-zero v
return l, m - l
}
// invalid
return 0xffffffff, 0
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy, ssa.OpARMMOVWreg:
@ -267,16 +290,38 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARMANDconst, ssa.OpARMBICconst:
// try to optimize ANDconst and BICconst to BFC, which saves bytes and ticks
// BFC is only available on ARMv7, and its result and source are in the same register
if objabi.GOARM == 7 && v.Reg() == v.Args[0].Reg() {
var val uint32
if v.Op == ssa.OpARMANDconst {
val = ^uint32(v.AuxInt)
} else { // BICconst
val = uint32(v.AuxInt)
}
lsb, width := getBFC(val)
// omit BFC for ARM's imm12
if 8 < width && width < 24 {
p := s.Prog(arm.ABFC)
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(width)
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(lsb)})
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
break
}
}
// fall back to ordinary form
fallthrough
case ssa.OpARMADDconst,
ssa.OpARMADCconst,
ssa.OpARMSUBconst,
ssa.OpARMSBCconst,
ssa.OpARMRSBconst,
ssa.OpARMRSCconst,
ssa.OpARMANDconst,
ssa.OpARMORconst,
ssa.OpARMXORconst,
ssa.OpARMBICconst,
ssa.OpARMSLLconst,
ssa.OpARMSRLconst,
ssa.OpARMSRAconst:

View file

@ -195,7 +195,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64FNMULS,
ssa.OpARM64FNMULD,
ssa.OpARM64FDIVS,
ssa.OpARM64FDIVD:
ssa.OpARM64FDIVD,
ssa.OpARM64ROR,
ssa.OpARM64RORW:
r := v.Reg()
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
@ -253,6 +255,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARM64MVNshiftLL, ssa.OpARM64NEGshiftLL:
genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_LL, v.AuxInt)
case ssa.OpARM64MVNshiftRL, ssa.OpARM64NEGshiftRL:
genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_LR, v.AuxInt)
case ssa.OpARM64MVNshiftRA, ssa.OpARM64NEGshiftRA:
genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt)
case ssa.OpARM64ADDshiftLL,
ssa.OpARM64SUBshiftLL,
ssa.OpARM64ANDshiftLL,
@ -315,11 +323,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg()
case ssa.OpARM64CMPshiftLL:
case ssa.OpARM64CMPshiftLL, ssa.OpARM64CMNshiftLL, ssa.OpARM64TSTshiftLL:
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LL, v.AuxInt)
case ssa.OpARM64CMPshiftRL:
case ssa.OpARM64CMPshiftRL, ssa.OpARM64CMNshiftRL, ssa.OpARM64TSTshiftRL:
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LR, v.AuxInt)
case ssa.OpARM64CMPshiftRA:
case ssa.OpARM64CMPshiftRA, ssa.OpARM64CMNshiftRA, ssa.OpARM64TSTshiftRA:
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_AR, v.AuxInt)
case ssa.OpARM64MOVDaddr:
p := s.Prog(arm64.AMOVD)
@ -696,8 +704,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
fallthrough
case ssa.OpARM64MVN,
ssa.OpARM64NEG,
ssa.OpARM64FABSD,
ssa.OpARM64FMOVDfpgp,
ssa.OpARM64FMOVDgpfp,
ssa.OpARM64FMOVSfpgp,
ssa.OpARM64FMOVSgpfp,
ssa.OpARM64FNEGS,
ssa.OpARM64FNEGD,
ssa.OpARM64FSQRTD,
@ -728,6 +739,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64CLZW,
ssa.OpARM64FRINTAD,
ssa.OpARM64FRINTMD,
ssa.OpARM64FRINTND,
ssa.OpARM64FRINTPD,
ssa.OpARM64FRINTZD:
p := s.Prog(v.Op.Asm())

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -234,7 +234,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, reuse canReuseNode) *Node {
if n.Op == OLITERAL && !reuse {
// Can't always set n.Type directly on OLITERAL nodes.
// See discussion on CL 20813.
n = n.copy()
n = n.rawcopy()
reuse = true
}
@ -476,7 +476,7 @@ func toflt(v Val) Val {
f := newMpflt()
f.Set(&u.Real)
if u.Imag.CmpFloat64(0) != 0 {
yyerror("constant %v%vi truncated to real", fconv(&u.Real, FmtSharp), fconv(&u.Imag, FmtSharp|FmtSign))
yyerror("constant %v truncated to real", u.GoString())
}
v.U = f
}
@ -509,11 +509,11 @@ func toint(v Val) Val {
// value from the error message.
// (See issue #11371).
var t big.Float
t.Parse(fconv(u, FmtSharp), 10)
t.Parse(u.GoString(), 10)
if t.IsInt() {
yyerror("constant truncated to integer")
} else {
yyerror("constant %v truncated to integer", fconv(u, FmtSharp))
yyerror("constant %v truncated to integer", u.GoString())
}
}
}
@ -522,7 +522,7 @@ func toint(v Val) Val {
case *Mpcplx:
i := new(Mpint)
if !i.SetFloat(&u.Real) || u.Imag.CmpFloat64(0) != 0 {
yyerror("constant %v%vi truncated to integer", fconv(&u.Real, FmtSharp), fconv(&u.Imag, FmtSharp|FmtSign))
yyerror("constant %v truncated to integer", u.GoString())
}
v.U = i
@ -1200,8 +1200,7 @@ func setconst(n *Node, v Val) {
// Ensure n.Orig still points to a semantically-equivalent
// expression after we rewrite n into a constant.
if n.Orig == n {
n.Orig = n.copy()
n.Orig.Orig = n.Orig
n.Orig = n.sepcopy()
}
*n = Node{
@ -1331,7 +1330,7 @@ func defaultlitreuse(n *Node, t *types.Type, reuse canReuseNode) *Node {
}
if n.Op == OLITERAL && !reuse {
n = n.copy()
n = n.rawcopy()
reuse = true
}

View file

@ -0,0 +1,287 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements textual dumping of arbitrary data structures
// for debugging purposes. The code is customized for Node graphs
// and may be used for an alternative view of the node structure.
package gc
import (
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
"io"
"os"
"reflect"
"regexp"
"unicode"
"unicode/utf8"
)
// dump is like fdump but prints to stderr.
func dump(root interface{}, filter string, depth int) {
fdump(os.Stderr, root, filter, depth)
}
// fdump prints the structure of a rooted data structure
// to w by depth-first traversal of the data structure.
//
// The filter parameter is a regular expression. If it is
// non-empty, only struct fields whose names match filter
// are printed.
//
// The depth parameter controls how deep traversal recurses
// before it returns (higher value means greater depth).
// If an empty field filter is given, a good depth default value
// is 4. A negative depth means no depth limit, which may be fine
// for small data structures or if there is a non-empty filter.
//
// In the output, Node structs are identified by their Op name
// rather than their type; struct fields with zero values or
// non-matching field names are omitted, and "…" means recursion
// depth has been reached or struct fields have been omitted.
func fdump(w io.Writer, root interface{}, filter string, depth int) {
if root == nil {
fmt.Fprintln(w, "nil")
return
}
if filter == "" {
filter = ".*" // default
}
p := dumper{
output: w,
fieldrx: regexp.MustCompile(filter),
ptrmap: make(map[uintptr]int),
last: '\n', // force printing of line number on first line
}
p.dump(reflect.ValueOf(root), depth)
p.printf("\n")
}
type dumper struct {
output io.Writer
fieldrx *regexp.Regexp // field name filter
ptrmap map[uintptr]int // ptr -> dump line number
lastadr string // last address string printed (for shortening)
// output
indent int // current indentation level
last byte // last byte processed by Write
line int // current line number
}
var indentBytes = []byte(". ")
func (p *dumper) Write(data []byte) (n int, err error) {
var m int
for i, b := range data {
// invariant: data[0:n] has been written
if b == '\n' {
m, err = p.output.Write(data[n : i+1])
n += m
if err != nil {
return
}
} else if p.last == '\n' {
p.line++
_, err = fmt.Fprintf(p.output, "%6d ", p.line)
if err != nil {
return
}
for j := p.indent; j > 0; j-- {
_, err = p.output.Write(indentBytes)
if err != nil {
return
}
}
}
p.last = b
}
if len(data) > n {
m, err = p.output.Write(data[n:])
n += m
}
return
}
// printf is a convenience wrapper.
func (p *dumper) printf(format string, args ...interface{}) {
if _, err := fmt.Fprintf(p, format, args...); err != nil {
panic(err)
}
}
// addr returns the (hexadecimal) address string of the object
// represented by x (or "?" if x is not addressable), with the
// common prefix between this and the prior address replaced by
// "0x…" to make it easier to visually match addresses.
func (p *dumper) addr(x reflect.Value) string {
if !x.CanAddr() {
return "?"
}
adr := fmt.Sprintf("%p", x.Addr().Interface())
s := adr
if i := commonPrefixLen(p.lastadr, adr); i > 0 {
s = "0x…" + adr[i:]
}
p.lastadr = adr
return s
}
// dump prints the contents of x.
func (p *dumper) dump(x reflect.Value, depth int) {
if depth == 0 {
p.printf("…")
return
}
// special cases
switch v := x.Interface().(type) {
case Nodes:
// unpack Nodes since reflect cannot look inside
// due to the unexported field in its struct
x = reflect.ValueOf(v.Slice())
case src.XPos:
p.printf("%s", linestr(v))
return
case *types.Node:
x = reflect.ValueOf(asNode(v))
}
switch x.Kind() {
case reflect.String:
p.printf("%q", x.Interface()) // print strings in quotes
case reflect.Interface:
if x.IsNil() {
p.printf("nil")
return
}
p.dump(x.Elem(), depth-1)
case reflect.Ptr:
if x.IsNil() {
p.printf("nil")
return
}
p.printf("*")
ptr := x.Pointer()
if line, exists := p.ptrmap[ptr]; exists {
p.printf("(@%d)", line)
return
}
p.ptrmap[ptr] = p.line
p.dump(x.Elem(), depth) // don't count pointer indirection towards depth
case reflect.Slice:
if x.IsNil() {
p.printf("nil")
return
}
p.printf("%s (%d entries) {", x.Type(), x.Len())
if x.Len() > 0 {
p.indent++
p.printf("\n")
for i, n := 0, x.Len(); i < n; i++ {
p.printf("%d: ", i)
p.dump(x.Index(i), depth-1)
p.printf("\n")
}
p.indent--
}
p.printf("}")
case reflect.Struct:
typ := x.Type()
isNode := false
if n, ok := x.Interface().(Node); ok {
isNode = true
p.printf("%s %s {", n.Op.String(), p.addr(x))
} else {
p.printf("%s {", typ)
}
p.indent++
first := true
omitted := false
for i, n := 0, typ.NumField(); i < n; i++ {
// Exclude non-exported fields because their
// values cannot be accessed via reflection.
if name := typ.Field(i).Name; isExported(name) {
if !p.fieldrx.MatchString(name) {
omitted = true
continue // field name not selected by filter
}
// special cases
if isNode && name == "Op" {
omitted = true
continue // Op field already printed for Nodes
}
x := x.Field(i)
if isZeroVal(x) {
omitted = true
continue // exclude zero-valued fields
}
if n, ok := x.Interface().(Nodes); ok && n.Len() == 0 {
omitted = true
continue // exclude empty Nodes slices
}
if first {
p.printf("\n")
first = false
}
p.printf("%s: ", name)
p.dump(x, depth-1)
p.printf("\n")
}
}
if omitted {
p.printf("…\n")
}
p.indent--
p.printf("}")
default:
p.printf("%v", x.Interface())
}
}
func isZeroVal(x reflect.Value) bool {
switch x.Kind() {
case reflect.Bool:
return !x.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return x.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return x.Uint() == 0
case reflect.String:
return x.String() == ""
case reflect.Interface, reflect.Ptr, reflect.Slice:
return x.IsNil()
}
return false
}
func isExported(name string) bool {
ch, _ := utf8.DecodeRuneInString(name)
return unicode.IsUpper(ch)
}
func commonPrefixLen(a, b string) (i int) {
for i < len(a) && i < len(b) && a[i] == b[i] {
i++
}
return
}

View file

@ -654,9 +654,71 @@ func (e *EscState) esclist(l Nodes, parent *Node) {
}
}
func (e *EscState) isSliceSelfAssign(dst, src *Node) bool {
// Detect the following special case.
//
// func (b *Buffer) Foo() {
// n, m := ...
// b.buf = b.buf[n:m]
// }
//
// This assignment is a no-op for escape analysis,
// it does not store any new pointers into b that were not already there.
// However, without this special case b will escape, because we assign to OIND/ODOTPTR.
// Here we assume that the statement will not contain calls,
// that is, that order will move any calls to init.
// Otherwise base ONAME value could change between the moments
// when we evaluate it for dst and for src.
// dst is ONAME dereference.
if dst.Op != OIND && dst.Op != ODOTPTR || dst.Left.Op != ONAME {
return false
}
// src is a slice operation.
switch src.Op {
case OSLICE, OSLICE3, OSLICESTR:
// OK.
case OSLICEARR, OSLICE3ARR:
// Since arrays are embedded into containing object,
// slice of non-pointer array will introduce a new pointer into b that was not already there
// (pointer to b itself). After such assignment, if b contents escape,
// b escapes as well. If we ignore such OSLICEARR, we will conclude
// that b does not escape when b contents do.
//
// Pointer to an array is OK since it's not stored inside b directly.
// For slicing an array (not pointer to array), there is an implicit OADDR.
// We check that to determine non-pointer array slicing.
if src.Left.Op == OADDR {
return false
}
default:
return false
}
// slice is applied to ONAME dereference.
if src.Left.Op != OIND && src.Left.Op != ODOTPTR || src.Left.Left.Op != ONAME {
return false
}
// dst and src reference the same base ONAME.
return dst.Left == src.Left.Left
}
// isSelfAssign reports whether assignment from src to dst can
// be ignored by the escape analysis as it's effectively a self-assignment.
func (e *EscState) isSelfAssign(dst, src *Node) bool {
if e.isSliceSelfAssign(dst, src) {
return true
}
// Detect trivial assignments that assign back to the same object.
//
// It covers these cases:
// val.x = val.y
// val.x[i] = val.y[j]
// val.x1.x2 = val.x1.y2
// ... etc
//
// These assignments do not change assigned object lifetime.
if dst == nil || src == nil || dst.Op != src.Op {
return false
}
@ -689,18 +751,16 @@ func (e *EscState) mayAffectMemory(n *Node) bool {
switch n.Op {
case ONAME, OCLOSUREVAR, OLITERAL:
return false
case ODOT, ODOTPTR:
return e.mayAffectMemory(n.Left)
case OIND, OCONVNOP:
return e.mayAffectMemory(n.Left)
case OCONV:
return e.mayAffectMemory(n.Left)
case OINDEX:
// Left+Right group.
case OINDEX, OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
return e.mayAffectMemory(n.Left) || e.mayAffectMemory(n.Right)
case OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
return e.mayAffectMemory(n.Left) || e.mayAffectMemory(n.Right)
case ONOT, OCOM, OPLUS, OMINUS, OALIGNOF, OOFFSETOF, OSIZEOF:
// Left group.
case ODOT, ODOTPTR, OIND, OCONVNOP, OCONV, OLEN, OCAP,
ONOT, OCOM, OPLUS, OMINUS, OALIGNOF, OOFFSETOF, OSIZEOF:
return e.mayAffectMemory(n.Left)
default:
return true
}
@ -832,48 +892,8 @@ opSwitch:
}
}
// Filter out the following special case.
//
// func (b *Buffer) Foo() {
// n, m := ...
// b.buf = b.buf[n:m]
// }
//
// This assignment is a no-op for escape analysis,
// it does not store any new pointers into b that were not already there.
// However, without this special case b will escape, because we assign to OIND/ODOTPTR.
case OAS, OASOP:
if (n.Left.Op == OIND || n.Left.Op == ODOTPTR) && n.Left.Left.Op == ONAME && // dst is ONAME dereference
(n.Right.Op == OSLICE || n.Right.Op == OSLICE3 || n.Right.Op == OSLICESTR) && // src is slice operation
(n.Right.Left.Op == OIND || n.Right.Left.Op == ODOTPTR) && n.Right.Left.Left.Op == ONAME && // slice is applied to ONAME dereference
n.Left.Left == n.Right.Left.Left { // dst and src reference the same base ONAME
// Here we also assume that the statement will not contain calls,
// that is, that order will move any calls to init.
// Otherwise base ONAME value could change between the moments
// when we evaluate it for dst and for src.
//
// Note, this optimization does not apply to OSLICEARR,
// because it does introduce a new pointer into b that was not already there
// (pointer to b itself). After such assignment, if b contents escape,
// b escapes as well. If we ignore such OSLICEARR, we will conclude
// that b does not escape when b contents do.
if Debug['m'] != 0 {
Warnl(n.Pos, "%v ignoring self-assignment to %S", e.curfnSym(n), n.Left)
}
break
}
// Also skip trivial assignments that assign back to the same object.
//
// It covers these cases:
// val.x = val.y
// val.x[i] = val.y[j]
// val.x1.x2 = val.x1.y2
// ... etc
//
// These assignments do not change assigned object lifetime.
// Filter out some no-op assignments for escape analysis.
if e.isSelfAssign(n.Left, n.Right) {
if Debug['m'] != 0 {
Warnl(n.Pos, "%v ignoring self-assignment in %S", e.curfnSym(n), n)
@ -1396,7 +1416,7 @@ func describeEscape(em uint16) string {
}
s += "contentToHeap"
}
for em >>= EscReturnBits; em != 0; em = em >> bitsPerOutputInTag {
for em >>= EscReturnBits; em != 0; em >>= bitsPerOutputInTag {
// See encoding description above
if s != "" {
s += " "
@ -1446,7 +1466,7 @@ func (e *EscState) escassignfromtag(note string, dsts Nodes, src, call *Node) ui
em0 := em
dstsi := 0
for em >>= EscReturnBits; em != 0 && dstsi < dsts.Len(); em = em >> bitsPerOutputInTag {
for em >>= EscReturnBits; em != 0 && dstsi < dsts.Len(); em >>= bitsPerOutputInTag {
// Prefer the lowest-level path to the reference (for escape purposes).
// Two-bit encoding (for example. 1, 3, and 4 bits are other options)
// 01 = 0-level

View file

@ -12,8 +12,6 @@ import (
)
var (
flagiexport bool // if set, use indexed export data format
Debug_export int // if set, print debugging information about export data
)
@ -75,11 +73,7 @@ func dumpexport(bout *bio.Writer) {
// The linker also looks for the $$ marker - use char after $$ to distinguish format.
exportf(bout, "\n$$B\n") // indicate binary export format
off := bout.Offset()
if flagiexport {
iexport(bout.Writer)
} else {
export(bout.Writer, Debug_export != 0)
}
iexport(bout.Writer)
size := bout.Offset() - off
exportf(bout, "\n$$\n")
@ -95,7 +89,7 @@ func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node {
// declaration for all imported symbols. The exception
// is declarations for Runtimepkg, which are populated
// by loadsys instead.
if flagiexport && s.Pkg != Runtimepkg {
if s.Pkg != Runtimepkg {
Fatalf("missing ONONAME for %v\n", s)
}

View file

@ -6,6 +6,8 @@ package gc
import (
"math"
"os"
"runtime"
"testing"
)
@ -364,11 +366,19 @@ func TestFloatConvertFolded(t *testing.T) {
func TestFloat32StoreToLoadConstantFold(t *testing.T) {
// Test that math.Float32{,from}bits constant fold correctly.
// In particular we need to be careful that signalling NaN (sNaN) values
// In particular we need to be careful that signaling NaN (sNaN) values
// are not converted to quiet NaN (qNaN) values during compilation.
// See issue #27193 for more information.
// signalling NaNs
// TODO: this method for detecting 387 won't work if the compiler has been
// built using GOARCH=386 GO386=387 and either the target is a different
// architecture or the GO386=387 environment variable is not set when the
// test is run.
if runtime.GOARCH == "386" && os.Getenv("GO386") == "387" {
t.Skip("signaling NaNs are not propagated on 387 (issue #27516)")
}
// signaling NaNs
{
const nan = uint32(0x7f800001) // sNaN
if x := math.Float32bits(math.Float32frombits(nan)); x != nan {

View file

@ -119,7 +119,7 @@ const (
// *types.Type:
// %#v Go format
// %#L type definition instead of name
// %#S omit"func" and receiver in function signature
// %#S omit "func" and receiver in function signature
//
// %-v type identifiers
// %-S type identifiers without "func" and arg names in type signatures (methodsym)
@ -514,10 +514,10 @@ func (v Val) vconv(s fmt.State, flag FmtFlag) {
case *Mpint:
if !u.Rune {
if flag&FmtSharp != 0 {
fmt.Fprint(s, bconv(u, FmtSharp))
fmt.Fprint(s, u.String())
return
}
fmt.Fprint(s, bconv(u, 0))
fmt.Fprint(s, u.GoString())
return
}
@ -537,29 +537,19 @@ func (v Val) vconv(s fmt.State, flag FmtFlag) {
case *Mpflt:
if flag&FmtSharp != 0 {
fmt.Fprint(s, fconv(u, 0))
fmt.Fprint(s, u.String())
return
}
fmt.Fprint(s, fconv(u, FmtSharp))
fmt.Fprint(s, u.GoString())
return
case *Mpcplx:
switch {
case flag&FmtSharp != 0:
fmt.Fprintf(s, "(%v+%vi)", &u.Real, &u.Imag)
case v.U.(*Mpcplx).Real.CmpFloat64(0) == 0:
fmt.Fprintf(s, "%vi", fconv(&u.Imag, FmtSharp))
case v.U.(*Mpcplx).Imag.CmpFloat64(0) == 0:
fmt.Fprint(s, fconv(&u.Real, FmtSharp))
case v.U.(*Mpcplx).Imag.CmpFloat64(0) < 0:
fmt.Fprintf(s, "(%v%vi)", fconv(&u.Real, FmtSharp), fconv(&u.Imag, FmtSharp))
default:
fmt.Fprintf(s, "(%v+%vi)", fconv(&u.Real, FmtSharp), fconv(&u.Imag, FmtSharp))
if flag&FmtSharp != 0 {
fmt.Fprint(s, u.String())
return
}
fmt.Fprint(s, u.GoString())
return
case string:
fmt.Fprint(s, strconv.Quote(u))
@ -671,7 +661,7 @@ func typefmt(t *types.Type, flag FmtFlag, mode fmtMode, depth int) string {
return "error"
}
// Unless the 'l' flag was specified, if the type has a name, just print that name.
// Unless the 'L' flag was specified, if the type has a name, just print that name.
if flag&FmtLong == 0 && t.Sym != nil && t != types.Types[t.Etype] {
switch mode {
case FTypeId, FTypeIdName:
@ -1314,16 +1304,14 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
mode.Fprintf(s, "%v { %v }", n.Type, n.Func.Closure.Nbody)
case OCOMPLIT:
ptrlit := n.Right != nil && n.Right.Implicit() && n.Right.Type != nil && n.Right.Type.IsPtr()
if mode == FErr {
if n.Right != nil && n.Right.Type != nil && !n.Implicit() {
if ptrlit {
if n.Right.Implicit() && n.Right.Type.IsPtr() {
mode.Fprintf(s, "&%v literal", n.Right.Type.Elem())
return
} else {
mode.Fprintf(s, "%v literal", n.Right.Type)
return
}
mode.Fprintf(s, "%v literal", n.Right.Type)
return
}
fmt.Fprint(s, "composite literal")
@ -1532,9 +1520,8 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
func (n *Node) nodefmt(s fmt.State, flag FmtFlag, mode fmtMode) {
t := n.Type
// We almost always want the original, except in export mode for literals.
// This saves the importer some work, and avoids us having to redo some
// special casing for package unsafe.
// We almost always want the original.
// TODO(gri) Why the special case for OLITERAL?
if n.Op != OLITERAL && n.Orig != nil {
n = n.Orig
}

View file

@ -71,9 +71,7 @@ func fnpkg(fn *Node) *types.Pkg {
func typecheckinl(fn *Node) {
lno := setlineno(fn)
if flagiexport {
expandInline(fn)
}
expandInline(fn)
// typecheckinl is only for imported functions;
// their bodies may refer to unsafe as long as the package

View file

@ -249,7 +249,6 @@ func Main(archInit func(*Arch)) {
flag.StringVar(&blockprofile, "blockprofile", "", "write block profile to `file`")
flag.StringVar(&mutexprofile, "mutexprofile", "", "write mutex profile to `file`")
flag.StringVar(&benchfile, "bench", "", "append benchmark times to `file`")
flag.BoolVar(&flagiexport, "iexport", true, "export indexed package data")
objabi.Flagparse(usage)
// Record flags that affect the build result. (And don't
@ -1129,24 +1128,13 @@ func importfile(f *Val) *types.Pkg {
errorexit()
}
// New indexed format is distinguished by an 'i' byte,
// whereas old export format always starts with 'c', 'd', or 'v'.
if c == 'i' {
if !flagiexport {
yyerror("import %s: cannot import package compiled with -iexport=true", file)
errorexit()
}
iimport(importpkg, imp)
} else {
if flagiexport {
yyerror("import %s: cannot import package compiled with -iexport=false", file)
errorexit()
}
imp.UnreadByte()
Import(importpkg, imp.Reader)
// Indexed format is distinguished by an 'i' byte,
// whereas previous export formats started with 'c', 'd', or 'v'.
if c != 'i' {
yyerror("import %s: unexpected package format byte: %v", file, c)
errorexit()
}
iimport(importpkg, imp)
default:
yyerror("no import in %q", path_)

View file

@ -201,24 +201,16 @@ func (a *Mpflt) SetString(as string) {
}
func (f *Mpflt) String() string {
return fconv(f, 0)
return f.Val.Text('b', 0)
}
func fconv(fvp *Mpflt, flag FmtFlag) string {
if flag&FmtSharp == 0 {
return fvp.Val.Text('b', 0)
}
// use decimal format for error messages
func (fvp *Mpflt) GoString() string {
// determine sign
sign := ""
f := &fvp.Val
var sign string
if f.Sign() < 0 {
sign = "-"
f = new(big.Float).Abs(f)
} else if flag&FmtSign != 0 {
sign = "+"
}
// Don't try to convert infinities (will not terminate).
@ -334,3 +326,34 @@ func (v *Mpcplx) Div(rv *Mpcplx) bool {
return true
}
func (v *Mpcplx) String() string {
return fmt.Sprintf("(%s+%si)", v.Real.String(), v.Imag.String())
}
func (v *Mpcplx) GoString() string {
var re string
sre := v.Real.CmpFloat64(0)
if sre != 0 {
re = v.Real.GoString()
}
var im string
sim := v.Imag.CmpFloat64(0)
if sim != 0 {
im = v.Imag.GoString()
}
switch {
case sre == 0 && sim == 0:
return "0"
case sre == 0:
return im + "i"
case sim == 0:
return re
case sim < 0:
return fmt.Sprintf("(%s%si)", re, im)
default:
return fmt.Sprintf("(%s+%si)", re, im)
}
}

View file

@ -299,13 +299,10 @@ func (a *Mpint) SetString(as string) {
}
}
func (a *Mpint) String() string {
return bconv(a, 0)
func (a *Mpint) GoString() string {
return a.Val.String()
}
func bconv(xval *Mpint, flag FmtFlag) string {
if flag&FmtSharp != 0 {
return fmt.Sprintf("%#x", &xval.Val)
}
return xval.Val.String()
func (a *Mpint) String() string {
return fmt.Sprintf("%#x", &a.Val)
}

View file

@ -281,7 +281,7 @@ func dumpglobls() {
funcsyms = nil
}
// addGCLocals adds gcargs and gclocals symbols to Ctxt.Data.
// addGCLocals adds gcargs, gclocals, gcregs, and stack object symbols to Ctxt.Data.
// It takes care not to add any duplicates.
// Though the object file format handles duplicates efficiently,
// storing only a single copy of the data,
@ -299,6 +299,9 @@ func addGCLocals() {
Ctxt.Data = append(Ctxt.Data, gcsym)
seen[gcsym.Name] = true
}
if x := s.Func.StackObjects; x != nil {
ggloblsym(x, int32(len(x.P)), obj.RODATA|obj.LOCAL)
}
}
}

View file

@ -4,9 +4,9 @@ package gc
import "strconv"
const _Op_name = "XXXNAMENONAMETYPEPACKLITERALADDSUBORXORADDSTRADDRANDANDAPPENDARRAYBYTESTRARRAYBYTESTRTMPARRAYRUNESTRSTRARRAYBYTESTRARRAYBYTETMPSTRARRAYRUNEASAS2AS2FUNCAS2RECVAS2MAPRAS2DOTTYPEASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECMPIFACECMPSTRCOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTINDINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMULDIVMODLSHRSHANDANDNOTNEWNOTCOMPLUSMINUSORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFBLOCKBREAKCASEXCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELPROCRANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDDDDARGINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARKILLVARLIVEINDREGSPRETJMPGETGEND"
const _Op_name = "XXXNAMENONAMETYPEPACKLITERALADDSUBORXORADDSTRADDRANDANDAPPENDARRAYBYTESTRARRAYBYTESTRTMPARRAYRUNESTRSTRARRAYBYTESTRARRAYBYTETMPSTRARRAYRUNEASAS2AS2FUNCAS2RECVAS2MAPRAS2DOTTYPEASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECMPIFACECMPSTRCOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTINDINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMULDIVMODLSHRSHANDANDNOTNEWNOTCOMPLUSMINUSORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFBLOCKBREAKCASEXCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELPROCRANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDDDDARGINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARDEFVARKILLVARLIVEINDREGSPRETJMPGETGEND"
var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 36, 39, 45, 49, 55, 61, 73, 88, 100, 112, 127, 139, 141, 144, 151, 158, 165, 175, 179, 183, 191, 199, 208, 216, 219, 224, 231, 239, 245, 252, 258, 267, 275, 283, 289, 293, 302, 309, 313, 316, 323, 331, 339, 346, 352, 355, 361, 368, 376, 380, 387, 395, 397, 399, 401, 403, 405, 407, 410, 415, 423, 426, 435, 438, 442, 450, 457, 466, 469, 472, 475, 478, 481, 484, 490, 493, 496, 499, 503, 508, 512, 517, 522, 528, 533, 537, 542, 550, 558, 564, 573, 580, 584, 591, 598, 606, 610, 614, 618, 625, 632, 640, 646, 651, 656, 660, 665, 673, 678, 683, 687, 690, 698, 702, 704, 709, 713, 718, 724, 730, 736, 742, 747, 751, 758, 764, 769, 775, 778, 784, 791, 796, 800, 805, 809, 819, 824, 832, 839, 846, 854, 860, 864, 867}
var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 36, 39, 45, 49, 55, 61, 73, 88, 100, 112, 127, 139, 141, 144, 151, 158, 165, 175, 179, 183, 191, 199, 208, 216, 219, 224, 231, 239, 245, 252, 258, 267, 275, 283, 289, 293, 302, 309, 313, 316, 323, 331, 339, 346, 352, 355, 361, 368, 376, 380, 387, 395, 397, 399, 401, 403, 405, 407, 410, 415, 423, 426, 435, 438, 442, 450, 457, 466, 469, 472, 475, 478, 481, 484, 490, 493, 496, 499, 503, 508, 512, 517, 522, 528, 533, 537, 542, 550, 558, 564, 573, 580, 584, 591, 598, 606, 610, 614, 618, 625, 632, 640, 646, 651, 656, 660, 665, 673, 678, 683, 687, 690, 698, 702, 704, 709, 713, 718, 724, 730, 736, 742, 747, 751, 758, 764, 769, 775, 778, 784, 791, 796, 800, 805, 809, 819, 824, 832, 838, 845, 852, 860, 866, 870, 873}
func (i Op) String() string {
if i >= Op(len(_Op_index)-1) {

View file

@ -109,8 +109,7 @@ func (o *Order) cheapExpr(n *Node) *Node {
if l == n.Left {
return n
}
a := n.copy()
a.Orig = a
a := n.sepcopy()
a.Left = l
return typecheck(a, Erv)
}
@ -135,8 +134,7 @@ func (o *Order) safeExpr(n *Node) *Node {
if l == n.Left {
return n
}
a := n.copy()
a.Orig = a
a := n.sepcopy()
a.Left = l
return typecheck(a, Erv)
@ -145,8 +143,7 @@ func (o *Order) safeExpr(n *Node) *Node {
if l == n.Left {
return n
}
a := n.copy()
a.Orig = a
a := n.sepcopy()
a.Left = l
return typecheck(a, Erv)
@ -161,8 +158,7 @@ func (o *Order) safeExpr(n *Node) *Node {
if l == n.Left && r == n.Right {
return n
}
a := n.copy()
a.Orig = a
a := n.sepcopy()
a.Left = l
a.Right = r
return typecheck(a, Erv)

View file

@ -233,6 +233,26 @@ func compile(fn *Node) {
// Set up the function's LSym early to avoid data races with the assemblers.
fn.Func.initLSym()
// Make sure type syms are declared for all types that might
// be types of stack objects. We need to do this here
// because symbols must be allocated before the parallel
// phase of the compiler.
if fn.Func.lsym != nil { // not func _(){}
for _, n := range fn.Func.Dcl {
switch n.Class() {
case PPARAM, PPARAMOUT, PAUTO:
if livenessShouldTrack(n) && n.Addrtaken() {
dtypesym(n.Type)
// Also make sure we allocate a linker symbol
// for the stack object data, for the same reason.
if fn.Func.lsym.Func.StackObjects == nil {
fn.Func.lsym.Func.StackObjects = lookup(fmt.Sprintf("%s.stkobj", fn.funcname())).Linksym()
}
}
}
}
}
if compilenow() {
compileSSA(fn, 0)
} else {

View file

@ -78,6 +78,10 @@ import (
// that its argument is certainly dead, for use when the liveness analysis
// would not otherwise be able to deduce that fact.
// TODO: get rid of OpVarKill here. It's useful for stack frame allocation
// so the compiler can allocate two temps to the same location. Here it's now
// useless, since the implementation of stack objects.
// BlockEffects summarizes the liveness effects on an SSA block.
type BlockEffects struct {
// Computed during Liveness.prologue using only the content of
@ -85,23 +89,15 @@ type BlockEffects struct {
//
// uevar: upward exposed variables (used before set in block)
// varkill: killed variables (set in block)
// avarinit: addrtaken variables set or used (proof of initialization)
uevar varRegVec
varkill varRegVec
avarinit bvec
uevar varRegVec
varkill varRegVec
// Computed during Liveness.solve using control flow information:
//
// livein: variables live at block entry
// liveout: variables live at block exit
// avarinitany: addrtaken variables possibly initialized at block exit
// (initialized in block or at exit from any predecessor block)
// avarinitall: addrtaken variables certainly initialized at block exit
// (initialized in block or at exit from all predecessor blocks)
livein varRegVec
liveout varRegVec
avarinitany bvec
avarinitall bvec
livein varRegVec
liveout varRegVec
}
// A collection of global state used by liveness analysis.
@ -186,10 +182,9 @@ func (idx LivenessIndex) Valid() bool {
}
type progeffectscache struct {
textavarinit []int32
retuevar []int32
tailuevar []int32
initialized bool
retuevar []int32
tailuevar []int32
initialized bool
}
// varRegVec contains liveness bitmaps for variables and registers.
@ -264,24 +259,13 @@ func (lv *Liveness) initcache() {
// all the parameters for correctness, and similarly it must not
// read the out arguments - they won't be set until the new
// function runs.
lv.cache.tailuevar = append(lv.cache.tailuevar, int32(i))
if node.Addrtaken() {
lv.cache.textavarinit = append(lv.cache.textavarinit, int32(i))
}
case PPARAMOUT:
// If the result had its address taken, it is being tracked
// by the avarinit code, which does not use uevar.
// If we added it to uevar too, we'd not see any kill
// and decide that the variable was live entry, which it is not.
// So only use uevar in the non-addrtaken case.
// The p.to.type == obj.TYPE_NONE limits the bvset to
// non-tail-call return instructions; see note below for details.
if !node.Addrtaken() {
lv.cache.retuevar = append(lv.cache.retuevar, int32(i))
}
// All results are live at every return point.
// Note that this point is after escaping return values
// are copied back to the stack using their PAUTOHEAP references.
lv.cache.retuevar = append(lv.cache.retuevar, int32(i))
}
}
}
@ -291,21 +275,13 @@ func (lv *Liveness) initcache() {
//
// The possible flags are:
// uevar - used by the instruction
// varkill - killed by the instruction
// for variables without address taken, means variable was set
// for variables with address taken, means variable was marked dead
// avarinit - initialized or referred to by the instruction,
// only for variables with address taken but not escaping to heap
//
// The avarinit output serves as a signal that the data has been
// initialized, because any use of a variable must come after its
// initialization.
// varkill - killed by the instruction (set)
// A kill happens after the use (for an instruction that updates a value, for example).
type liveEffect int
const (
uevar liveEffect = 1 << iota
varkill
avarinit
)
// valueEffects returns the index of a variable in lv.vars and the
@ -329,27 +305,15 @@ func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
}
var effect liveEffect
if n.Addrtaken() {
if v.Op != ssa.OpVarKill {
effect |= avarinit
}
if v.Op == ssa.OpVarDef || v.Op == ssa.OpVarKill {
effect |= varkill
}
} else {
// Read is a read, obviously.
// Addr by itself is also implicitly a read.
//
// Addr|Write means that the address is being taken
// but only so that the instruction can write to the value.
// It is not a read.
if e&ssa.SymRead != 0 || e&(ssa.SymAddr|ssa.SymWrite) == ssa.SymAddr {
effect |= uevar
}
if e&ssa.SymWrite != 0 && (!isfat(n.Type) || v.Op == ssa.OpVarDef) {
effect |= varkill
}
// Read is a read, obviously.
//
// Addr is a read also, as any subseqent holder of the pointer must be able
// to see all the values (including initialization) written so far.
if e&(ssa.SymRead|ssa.SymAddr) != 0 {
effect |= uevar
}
if e&ssa.SymWrite != 0 && (!isfat(n.Type) || v.Op == ssa.OpVarDef) {
effect |= varkill
}
if effect == 0 {
@ -545,9 +509,6 @@ func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkpt
be.varkill = varRegVec{vars: bulk.next()}
be.livein = varRegVec{vars: bulk.next()}
be.liveout = varRegVec{vars: bulk.next()}
be.avarinit = bulk.next()
be.avarinitany = bulk.next()
be.avarinitall = bulk.next()
}
lv.livenessMap.reset(lv.f.NumValues())
@ -869,19 +830,6 @@ func (lv *Liveness) prologue() {
}
be.uevar.regs |= regUevar
}
// Walk the block instructions forward to update avarinit bits.
// avarinit describes the effect at the end of the block, not the beginning.
for _, val := range b.Values {
pos, e := lv.valueEffects(val)
// No need for regEffects because registers never appear in avarinit.
if e&varkill != 0 {
be.avarinit.Unset(pos)
}
if e&avarinit != 0 {
be.avarinit.Set(pos)
}
}
}
}
@ -892,51 +840,10 @@ func (lv *Liveness) solve() {
nvars := int32(len(lv.vars))
newlivein := varRegVec{vars: bvalloc(nvars)}
newliveout := varRegVec{vars: bvalloc(nvars)}
any := bvalloc(nvars)
all := bvalloc(nvars)
// Push avarinitall, avarinitany forward.
// avarinitall says the addressed var is initialized along all paths reaching the block exit.
// avarinitany says the addressed var is initialized along some path reaching the block exit.
for _, b := range lv.f.Blocks {
be := lv.blockEffects(b)
if b == lv.f.Entry {
be.avarinitall.Copy(be.avarinit)
} else {
be.avarinitall.Clear()
be.avarinitall.Not()
}
be.avarinitany.Copy(be.avarinit)
}
// Walk blocks in the general direction of propagation (RPO
// for avarinit{any,all}, and PO for live{in,out}). This
// improves convergence.
// Walk blocks in postorder ordering. This improves convergence.
po := lv.f.Postorder()
for change := true; change; {
change = false
for i := len(po) - 1; i >= 0; i-- {
b := po[i]
be := lv.blockEffects(b)
lv.avarinitanyall(b, any, all)
any.AndNot(any, be.varkill.vars)
all.AndNot(all, be.varkill.vars)
any.Or(any, be.avarinit)
all.Or(all, be.avarinit)
if !any.Eq(be.avarinitany) {
change = true
be.avarinitany.Copy(any)
}
if !all.Eq(be.avarinitall) {
change = true
be.avarinitall.Copy(all)
}
}
}
// Iterate through the blocks in reverse round-robin fashion. A work
// queue might be slightly faster. As is, the number of iterations is
// so low that it hardly seems to be worth the complexity.
@ -957,7 +864,7 @@ func (lv *Liveness) solve() {
newliveout.vars.Set(pos)
}
case ssa.BlockExit:
// nothing to do
// panic exit - nothing to do
default:
// A variable is live on output from this block
// if it is live on input to some successor.
@ -975,7 +882,7 @@ func (lv *Liveness) solve() {
}
// A variable is live on input to this block
// if it is live on output from this block and
// if it is used by this block, or live on output from this block and
// not set by the code in this block.
//
// in[b] = uevar[b] \cup (out[b] \setminus varkill[b])
@ -990,8 +897,6 @@ func (lv *Liveness) solve() {
func (lv *Liveness) epilogue() {
nvars := int32(len(lv.vars))
liveout := varRegVec{vars: bvalloc(nvars)}
any := bvalloc(nvars)
all := bvalloc(nvars)
livedefer := bvalloc(nvars) // always-live variables
// If there is a defer (that could recover), then all output
@ -1017,6 +922,9 @@ func (lv *Liveness) epilogue() {
livedefer.Set(int32(i))
}
if n.IsOutputParamHeapAddr() {
// This variable will be overwritten early in the function
// prologue (from the result of a mallocgc) but we need to
// zero it in case that malloc causes a stack scan.
n.Name.SetNeedzero(true)
livedefer.Set(int32(i))
}
@ -1033,9 +941,6 @@ func (lv *Liveness) epilogue() {
{
// Reserve an entry for function entry.
live := bvalloc(nvars)
for _, pos := range lv.cache.textavarinit {
live.Set(pos)
}
lv.livevars = append(lv.livevars, varRegVec{vars: live})
}
@ -1043,53 +948,14 @@ func (lv *Liveness) epilogue() {
be := lv.blockEffects(b)
firstBitmapIndex := len(lv.livevars)
// Compute avarinitany and avarinitall for entry to block.
// This duplicates information known during Liveness.solve
// but avoids storing two more vectors for each block.
lv.avarinitanyall(b, any, all)
// Walk forward through the basic block instructions and
// allocate liveness maps for those instructions that need them.
// Seed the maps with information about the addrtaken variables.
for _, v := range b.Values {
pos, e := lv.valueEffects(v)
// No need for regEffects because registers never appear in avarinit.
if e&varkill != 0 {
any.Unset(pos)
all.Unset(pos)
}
if e&avarinit != 0 {
any.Set(pos)
all.Set(pos)
}
if !lv.issafepoint(v) {
continue
}
// Annotate ambiguously live variables so that they can
// be zeroed at function entry and at VARKILL points.
// liveout is dead here and used as a temporary.
liveout.vars.AndNot(any, all)
if !liveout.vars.IsEmpty() {
for pos := int32(0); pos < liveout.vars.n; pos++ {
if !liveout.vars.Get(pos) {
continue
}
all.Set(pos) // silence future warnings in this block
n := lv.vars[pos]
if !n.Name.Needzero() {
n.Name.SetNeedzero(true)
if debuglive >= 1 {
Warnl(v.Pos, "%v: %L is ambiguously live", lv.fn.Func.Nname, n)
}
}
}
}
// Live stuff first.
live := bvalloc(nvars)
live.Copy(any)
lv.livevars = append(lv.livevars, varRegVec{vars: live})
}
@ -1128,6 +994,17 @@ func (lv *Liveness) epilogue() {
Fatalf("bad index for entry point: %v", index)
}
// Check to make sure only input variables are live.
for i, n := range lv.vars {
if !liveout.vars.Get(int32(i)) {
continue
}
if n.Class() == PPARAM {
continue // ok
}
Fatalf("bad live variable at entry of %v: %L", lv.fn.Func.Nname, n)
}
// Record live variables.
live := &lv.livevars[index]
live.Or(*live, liveout)
@ -1330,28 +1207,6 @@ func clobberPtr(b *ssa.Block, v *Node, offset int64) {
b.NewValue0IA(src.NoXPos, ssa.OpClobber, types.TypeVoid, offset, v)
}
func (lv *Liveness) avarinitanyall(b *ssa.Block, any, all bvec) {
if len(b.Preds) == 0 {
any.Clear()
all.Clear()
for _, pos := range lv.cache.textavarinit {
any.Set(pos)
all.Set(pos)
}
return
}
be := lv.blockEffects(b.Preds[0].Block())
any.Copy(be.avarinitany)
all.Copy(be.avarinitall)
for _, pred := range b.Preds[1:] {
be := lv.blockEffects(pred.Block())
any.Or(any, be.avarinitany)
all.And(all, be.avarinitall)
}
}
// Compact coalesces identical bitmaps from lv.livevars into the sets
// lv.stackMapSet and lv.regMaps.
//
@ -1559,7 +1414,6 @@ func (lv *Liveness) printDebug() {
printed = false
printed = lv.printeffect(printed, "uevar", pos, effect&uevar != 0, regUevar)
printed = lv.printeffect(printed, "varkill", pos, effect&varkill != 0, regKill)
printed = lv.printeffect(printed, "avarinit", pos, effect&avarinit != 0, 0)
if printed {
fmt.Printf("\n")
}
@ -1596,9 +1450,6 @@ func (lv *Liveness) printDebug() {
printed = false
printed = lv.printbvec(printed, "varkill", be.varkill)
printed = lv.printbvec(printed, "liveout", be.liveout)
printed = lv.printbvec(printed, "avarinit", varRegVec{vars: be.avarinit})
printed = lv.printbvec(printed, "avarinitany", varRegVec{vars: be.avarinitany})
printed = lv.printbvec(printed, "avarinitall", varRegVec{vars: be.avarinitall})
if printed {
fmt.Printf("\n")
}

View file

@ -349,15 +349,13 @@ func staticcopy(l *Node, r *Node, out *[]*Node) bool {
gdata(n, e.Expr, int(n.Type.Width))
continue
}
ll := n.copy()
ll.Orig = ll // completely separate copy
ll := n.sepcopy()
if staticassign(ll, e.Expr, out) {
continue
}
// Requires computation, but we're
// copying someone else's computation.
rr := orig.copy()
rr.Orig = rr // completely separate copy
rr := orig.sepcopy()
rr.Type = ll.Type
rr.Xoffset += e.Xoffset
setlineno(rr)
@ -453,8 +451,7 @@ func staticassign(l *Node, r *Node, out *[]*Node) bool {
continue
}
setlineno(e.Expr)
a := n.copy()
a.Orig = a // completely separate copy
a := n.sepcopy()
if !staticassign(a, e.Expr, out) {
*out = append(*out, nod(OAS, a, e.Expr))
}
@ -518,8 +515,7 @@ func staticassign(l *Node, r *Node, out *[]*Node) bool {
// Copy val directly into n.
n.Type = val.Type
setlineno(val)
a := n.copy()
a.Orig = a
a := n.sepcopy()
if !staticassign(a, val, out) {
*out = append(*out, nod(OAS, a, val))
}
@ -843,6 +839,10 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
a = nod(OAS, x, nil)
a = typecheck(a, Etop)
init.Append(a) // zero new temp
} else {
// Declare that we're about to initialize all of x.
// (Which happens at the *vauto = vstat below.)
init.Append(nod(OVARDEF, x, nil))
}
a = nod(OADDR, x, nil)
@ -853,6 +853,8 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
a = typecheck(a, Etop)
init.Append(a) // zero new temp
a = a.Left
} else {
init.Append(nod(OVARDEF, a, nil))
}
a = nod(OADDR, a, nil)

View file

@ -16,6 +16,7 @@ import (
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
"cmd/internal/sys"
)
@ -754,8 +755,8 @@ func (s *state) stmtList(l Nodes) {
// stmt converts the statement n to SSA and adds it to s.
func (s *state) stmt(n *Node) {
if !(n.Op == OVARKILL || n.Op == OVARLIVE) {
// OVARKILL and OVARLIVE are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
if !(n.Op == OVARKILL || n.Op == OVARLIVE || n.Op == OVARDEF) {
// OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
s.pushLine(n.Pos)
defer s.popLine()
}
@ -1169,6 +1170,10 @@ func (s *state) stmt(n *Node) {
}
s.startBlock(bEnd)
case OVARDEF:
if !s.canSSA(n.Left) {
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left, s.mem(), false)
}
case OVARKILL:
// Insert a varkill op to record that a variable is no longer live.
// We only care about liveness info at call sites, so putting the
@ -3149,12 +3154,12 @@ func init() {
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpRoundToEven, types.Types[TFLOAT64], args[0])
},
sys.S390X)
sys.ARM64, sys.S390X)
addF("math", "Abs",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpAbs, types.Types[TFLOAT64], args[0])
},
sys.PPC64)
sys.ARM64, sys.PPC64)
addF("math", "Copysign",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpCopysign, types.Types[TFLOAT64], args[0], args[1])
@ -3361,12 +3366,12 @@ func init() {
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft32, types.Types[TUINT32], args[0], args[1])
},
sys.AMD64, sys.S390X)
sys.AMD64, sys.ARM64, sys.S390X)
addF("math/bits", "RotateLeft64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft64, types.Types[TUINT64], args[0], args[1])
},
sys.AMD64, sys.S390X)
sys.AMD64, sys.ARM64, sys.S390X)
alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
@ -3435,6 +3440,12 @@ func init() {
addF("math/bits", "OnesCount",
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
sys.AMD64)
alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64)
addF("math/bits", "Mul64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
},
sys.AMD64, sys.ARM64, sys.PPC64)
/******** sync/atomic ********/
@ -4927,6 +4938,57 @@ func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) {
}
}
// byXoffset implements sort.Interface for []*Node using Xoffset as the ordering.
type byXoffset []*Node
func (s byXoffset) Len() int { return len(s) }
func (s byXoffset) Less(i, j int) bool { return s[i].Xoffset < s[j].Xoffset }
func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func emitStackObjects(e *ssafn, pp *Progs) {
var vars []*Node
for _, n := range e.curfn.Func.Dcl {
if livenessShouldTrack(n) && n.Addrtaken() {
vars = append(vars, n)
}
}
if len(vars) == 0 {
return
}
// Sort variables from lowest to highest address.
sort.Sort(byXoffset(vars))
// Populate the stack object data.
// Format must match runtime/stack.go:stackObjectRecord.
x := e.curfn.Func.lsym.Func.StackObjects
off := 0
off = duintptr(x, off, uint64(len(vars)))
for _, v := range vars {
// Note: arguments and return values have non-negative Xoffset,
// in which case the offset is relative to argp.
// Locals have a negative Xoffset, in which case the offset is relative to varp.
off = duintptr(x, off, uint64(v.Xoffset))
if !typesym(v.Type).Siggen() {
Fatalf("stack object's type symbol not generated for type %s", v.Type)
}
off = dsymptr(x, off, dtypesym(v.Type), 0)
}
// Emit a funcdata pointing at the stack object data.
p := pp.Prog(obj.AFUNCDATA)
Addrconst(&p.From, objabi.FUNCDATA_StackObjects)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = x
if debuglive != 0 {
for _, v := range vars {
Warnl(v.Pos, "stack object %v %s", v, v.Type.String())
}
}
}
// genssa appends entries to pp for each instruction in f.
func genssa(f *ssa.Func, pp *Progs) {
var s SSAGenState
@ -4934,6 +4996,7 @@ func genssa(f *ssa.Func, pp *Progs) {
e := f.Frontend().(*ssafn)
s.livenessMap = liveness(e, f)
emitStackObjects(e, pp)
// Remember where each block starts.
s.bstart = make([]*obj.Prog, f.NumBlocks())
@ -5003,24 +5066,8 @@ func genssa(f *ssa.Func, pp *Progs) {
case ssa.OpGetG:
// nothing to do when there's a g register,
// and checkLower complains if there's not
case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive:
case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive, ssa.OpVarKill:
// nothing to do; already used by liveness
case ssa.OpVarKill:
// Zero variable if it is ambiguously live.
// After the VARKILL anything this variable references
// might be collected. If it were to become live again later,
// the GC will see references to already-collected objects.
// See issue 20029.
n := v.Aux.(*Node)
if n.Name.Needzero() {
if n.Class() != PAUTO {
v.Fatalf("zero of variable which isn't PAUTO %v", n)
}
if n.Type.Size()%int64(Widthptr) != 0 {
v.Fatalf("zero of variable not a multiple of ptr size %v", n)
}
thearch.ZeroAuto(s.pp, n)
}
case ssa.OpPhi:
CheckLoweredPhi(v)
case ssa.OpConvert:
@ -5048,7 +5095,6 @@ func genssa(f *ssa.Func, pp *Progs) {
}
}
}
// Emit control flow instructions for block
var next *ssa.Block
if i < len(f.Blocks)-1 && Debug['N'] == 0 {

View file

@ -364,9 +364,35 @@ func nodSym(op Op, left *Node, sym *types.Sym) *Node {
return n
}
// rawcopy returns a shallow copy of n.
// Note: copy or sepcopy (rather than rawcopy) is usually the
// correct choice (see comment with Node.copy, below).
func (n *Node) rawcopy() *Node {
copy := *n
return &copy
}
// sepcopy returns a separate shallow copy of n, with the copy's
// Orig pointing to itself.
func (n *Node) sepcopy() *Node {
copy := *n
copy.Orig = &copy
return &copy
}
// copy returns shallow copy of n and adjusts the copy's Orig if
// necessary: In general, if n.Orig points to itself, the copy's
// Orig should point to itself as well. Otherwise, if n is modified,
// the copy's Orig node appears modified, too, and then doesn't
// represent the original node anymore.
// (This caused the wrong complit Op to be used when printing error
// messages; see issues #26855, #27765).
func (n *Node) copy() *Node {
n2 := *n
return &n2
copy := *n
if n.Orig == n {
copy.Orig = &copy
}
return &copy
}
// methcmp sorts methods by symbol.
@ -412,8 +438,7 @@ func treecopy(n *Node, pos src.XPos) *Node {
switch n.Op {
default:
m := n.copy()
m.Orig = m
m := n.sepcopy()
m.Left = treecopy(n.Left, pos)
m.Right = treecopy(n.Right, pos)
m.List.Set(listtreecopy(n.List.Slice(), pos))

View file

@ -739,6 +739,7 @@ const (
OCLOSUREVAR // variable reference at beginning of closure function
OCFUNC // reference to c function pointer (not go func value)
OCHECKNIL // emit code to ensure pointer/interface not nil
OVARDEF // variable is about to be fully initialized
OVARKILL // variable is dead
OVARLIVE // variable is alive
OINDREGSP // offset plus indirect of REGSP, such as 8(SP).

View file

@ -2436,7 +2436,7 @@ func isMethodApplicable(t *types.Type, m *types.Field) bool {
}
func derefall(t *types.Type) *types.Type {
for t != nil && t.Etype == types.Tptr {
for t != nil && t.IsPtr() {
t = t.Elem()
}
return t
@ -2506,20 +2506,20 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field {
dowidth(tt)
rcvr := f2.Type.Recv().Type
if !eqtype(rcvr, tt) {
if rcvr.Etype == types.Tptr && eqtype(rcvr.Elem(), tt) {
if rcvr.IsPtr() && eqtype(rcvr.Elem(), tt) {
checklvalue(n.Left, "call pointer method on")
n.Left = nod(OADDR, n.Left, nil)
n.Left.SetImplicit(true)
n.Left = typecheck(n.Left, Etype|Erv)
} else if tt.Etype == types.Tptr && rcvr.Etype != types.Tptr && eqtype(tt.Elem(), rcvr) {
} else if tt.IsPtr() && !rcvr.IsPtr() && eqtype(tt.Elem(), rcvr) {
n.Left = nod(OIND, n.Left, nil)
n.Left.SetImplicit(true)
n.Left = typecheck(n.Left, Etype|Erv)
} else if tt.Etype == types.Tptr && tt.Elem().Etype == types.Tptr && eqtype(derefall(tt), derefall(rcvr)) {
} else if tt.IsPtr() && tt.Elem().IsPtr() && eqtype(derefall(tt), derefall(rcvr)) {
yyerror("calling method %v with receiver %L requires explicit dereference", n.Sym, n.Left)
for tt.Etype == types.Tptr {
for tt.IsPtr() {
// Stop one level early for method with pointer receiver.
if rcvr.Etype == types.Tptr && tt.Elem().Etype != types.Tptr {
if rcvr.IsPtr() && !tt.Elem().IsPtr() {
break
}
n.Left = nod(OIND, n.Left, nil)
@ -3298,7 +3298,8 @@ func samesafeexpr(l *Node, r *Node) bool {
case ODOT, ODOTPTR:
return l.Sym != nil && r.Sym != nil && l.Sym == r.Sym && samesafeexpr(l.Left, r.Left)
case OIND, OCONVNOP:
case OIND, OCONVNOP,
ONOT, OCOM, OPLUS, OMINUS:
return samesafeexpr(l.Left, r.Left)
case OCONV:
@ -3306,7 +3307,8 @@ func samesafeexpr(l *Node, r *Node) bool {
// Allow only numeric-ish types. This is a bit conservative.
return issimple[l.Type.Etype] && samesafeexpr(l.Left, r.Left)
case OINDEX, OINDEXMAP:
case OINDEX, OINDEXMAP,
OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
return samesafeexpr(l.Left, r.Left) && samesafeexpr(l.Right, r.Right)
case OLITERAL:
@ -3339,7 +3341,7 @@ func typecheckas(n *Node) {
checkassign(n, n.Left)
if n.Right != nil && n.Right.Type != nil {
if n.Right.Type.IsFuncArgStruct() {
yyerror("assignment mismatch: 1 variable but %d values", n.Right.Type.NumFields())
yyerror("assignment mismatch: 1 variable but %v returns %d values", n.Right.Left, n.Right.Type.NumFields())
// Multi-value RHS isn't actually valid for OAS; nil out
// to indicate failed typechecking.
n.Right.Type = nil
@ -3484,7 +3486,12 @@ func typecheckas2(n *Node) {
}
mismatch:
yyerror("assignment mismatch: %d variables but %d values", cl, cr)
switch r.Op {
default:
yyerror("assignment mismatch: %d variable but %d values", cl, cr)
case OCALLFUNC, OCALLMETH, OCALLINTER:
yyerror("assignment mismatch: %d variables but %v returns %d values", cl, r.Left, cr)
}
// second half of dance
out:
@ -3640,25 +3647,22 @@ func typecheckdeftype(n *Node) {
}
func typecheckdef(n *Node) {
lno := lineno
setlineno(n)
lno := setlineno(n)
if n.Op == ONONAME {
if !n.Diag() {
n.SetDiag(true)
if n.Pos.IsKnown() {
lineno = n.Pos
}
// Note: adderrorname looks for this string and
// adds context about the outer expression
yyerror("undefined: %v", n.Sym)
yyerrorl(lineno, "undefined: %v", n.Sym)
}
lineno = lno
return
}
if n.Walkdef() == 1 {
lineno = lno
return
}
@ -3701,20 +3705,19 @@ func typecheckdef(n *Node) {
e := n.Name.Defn
n.Name.Defn = nil
if e == nil {
lineno = n.Pos
Dump("typecheckdef nil defn", n)
yyerror("xxx")
yyerrorl(n.Pos, "xxx")
}
e = typecheck(e, Erv)
if Isconst(e, CTNIL) {
yyerror("const initializer cannot be nil")
yyerrorl(n.Pos, "const initializer cannot be nil")
goto ret
}
if e.Type != nil && e.Op != OLITERAL || !e.isGoConst() {
if !e.Diag() {
yyerror("const initializer %v is not a constant", e)
yyerrorl(n.Pos, "const initializer %v is not a constant", e)
e.SetDiag(true)
}
@ -3724,12 +3727,12 @@ func typecheckdef(n *Node) {
t := n.Type
if t != nil {
if !okforconst[t.Etype] {
yyerror("invalid constant type %v", t)
yyerrorl(n.Pos, "invalid constant type %v", t)
goto ret
}
if !e.Type.IsUntyped() && !eqtype(t, e.Type) {
yyerror("cannot use %L as type %v in const initializer", e, t)
yyerrorl(n.Pos, "cannot use %L as type %v in const initializer", e, t)
goto ret
}

View file

@ -1312,7 +1312,7 @@ opswitch:
b = conv(b, convType)
b = nod(OLSH, b, nodintconst(int64(8*offset)))
ncsubstr = nod(OOR, ncsubstr, b)
csubstr = csubstr | int64(s[i+offset])<<uint8(8*offset)
csubstr |= int64(s[i+offset]) << uint8(8*offset)
}
csubstrPart := nodintconst(csubstr)
// Compare "step" bytes as once
@ -1418,7 +1418,7 @@ opswitch:
// Maximum key and value size is 128 bytes, larger objects
// are stored with an indirection. So max bucket size is 2048+eps.
if !Isconst(hint, CTINT) ||
!(hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) > 0) {
hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 {
// var bv bmap
bv := temp(bmap(t))
@ -4052,7 +4052,7 @@ func wrapCall(n *Node, init *Nodes) *Node {
// The result of substArgTypes MUST be assigned back to old, e.g.
// n.Left = substArgTypes(n.Left, t1, t2)
func substArgTypes(old *Node, types_ ...*types.Type) *Node {
n := old.copy() // make shallow copy
n := old.copy()
for _, t := range types_ {
dowidth(t)

View file

@ -153,6 +153,24 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = y
}
case ssa.OpPPC64LoweredMuluhilo:
// MULHDU Rarg1, Rarg0, Reg0
// MULLD Rarg1, Rarg0, Reg1
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
p := s.Prog(ppc64.AMULHDU)
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
p1 := s.Prog(ppc64.AMULLD)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.Reg = r0
p1.To.Type = obj.TYPE_REG
p1.To.Reg = v.Reg1()
case ssa.OpPPC64LoweredAtomicAnd8,
ssa.OpPPC64LoweredAtomicOr8:
// LWSYNC
@ -717,7 +735,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// Not a go.string, generate a normal load
fallthrough
case ssa.OpPPC64MOVWload, ssa.OpPPC64MOVHload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHZload:
case ssa.OpPPC64MOVWload, ssa.OpPPC64MOVHload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHZload, ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
@ -739,10 +757,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
case ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload:
case ssa.OpPPC64MOVDloadidx, ssa.OpPPC64MOVWloadidx, ssa.OpPPC64MOVHloadidx, ssa.OpPPC64MOVWZloadidx,
ssa.OpPPC64MOVBZloadidx, ssa.OpPPC64MOVHZloadidx, ssa.OpPPC64FMOVDloadidx, ssa.OpPPC64FMOVSloadidx,
ssa.OpPPC64MOVDBRloadidx, ssa.OpPPC64MOVWBRloadidx, ssa.OpPPC64MOVHBRloadidx:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.From.Index = v.Args[1].Reg()
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@ -755,17 +776,21 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore:
case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore, ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
case ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore:
case ssa.OpPPC64MOVDstoreidx, ssa.OpPPC64MOVWstoreidx, ssa.OpPPC64MOVHstoreidx, ssa.OpPPC64MOVBstoreidx,
ssa.OpPPC64FMOVDstoreidx, ssa.OpPPC64FMOVSstoreidx, ssa.OpPPC64MOVDBRstoreidx, ssa.OpPPC64MOVWBRstoreidx,
ssa.OpPPC64MOVHBRstoreidx:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.From.Reg = v.Args[2].Reg()
p.To.Index = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)

View file

@ -44,8 +44,8 @@
(Xor(32|16|8) x y) -> (XORL x y)
(Neg(32|16|8) x) -> (NEGL x)
(Neg32F x) && !config.use387 -> (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))]))
(Neg64F x) && !config.use387 -> (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))]))
(Neg32F x) && !config.use387 -> (PXOR x (MOVSSconst <typ.Float32> [auxFrom32F(float32(math.Copysign(0, -1)))]))
(Neg64F x) && !config.use387 -> (PXOR x (MOVSDconst <typ.Float64> [auxFrom64F(math.Copysign(0, -1))]))
(Neg32F x) && config.use387 -> (FCHS x)
(Neg64F x) && config.use387 -> (FCHS x)
@ -1116,10 +1116,10 @@
(XORL x x) -> (MOVLconst [0])
// checking AND against 0.
(CMP(L|W|B)const (ANDL x y) [0]) -> (TEST(L|W|B) x y)
(CMPLconst (ANDLconst [c] x) [0]) -> (TESTLconst [c] x)
(CMPWconst (ANDLconst [c] x) [0]) -> (TESTWconst [int64(int16(c))] x)
(CMPBconst (ANDLconst [c] x) [0]) -> (TESTBconst [int64(int8(c))] x)
(CMP(L|W|B)const l:(ANDL x y) [0]) && l.Uses==1 -> (TEST(L|W|B) x y)
(CMPLconst l:(ANDLconst [c] x) [0]) && l.Uses==1 -> (TESTLconst [c] x)
(CMPWconst l:(ANDLconst [c] x) [0]) && l.Uses==1 -> (TESTWconst [int64(int16(c))] x)
(CMPBconst l:(ANDLconst [c] x) [0]) && l.Uses==1 -> (TESTBconst [int64(int8(c))] x)
// TEST %reg,%reg is shorter than CMP
(CMP(L|W|B)const x [0]) -> (TEST(L|W|B) x x)

View file

@ -41,8 +41,8 @@
(Com(64|32|16|8) x) -> (NOT(Q|L|L|L) x)
(Neg(64|32|16|8) x) -> (NEG(Q|L|L|L) x)
(Neg32F x) -> (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))]))
(Neg64F x) -> (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))]))
(Neg32F x) -> (PXOR x (MOVSSconst <typ.Float32> [auxFrom32F(float32(math.Copysign(0, -1)))]))
(Neg64F x) -> (PXOR x (MOVSDconst <typ.Float64> [auxFrom64F(math.Copysign(0, -1))]))
// Lowering boolean ops
(AndB x y) -> (ANDL x y)
@ -709,7 +709,17 @@
(ANDL x (MOVLconst [c])) -> (ANDLconst [c] x)
(AND(L|Q)const [c] (AND(L|Q)const [d] x)) -> (AND(L|Q)const [c & d] x)
(BTR(L|Q)const [c] (AND(L|Q)const [d] x)) -> (AND(L|Q)const [d &^ (1<<uint32(c))] x)
(AND(L|Q)const [c] (BTR(L|Q)const [d] x)) -> (AND(L|Q)const [c &^ (1<<uint32(d))] x)
(BTR(L|Q)const [c] (BTR(L|Q)const [d] x)) -> (AND(L|Q)const [^(1<<uint32(c) | 1<<uint32(d))] x)
(XOR(L|Q)const [c] (XOR(L|Q)const [d] x)) -> (XOR(L|Q)const [c ^ d] x)
(BTC(L|Q)const [c] (XOR(L|Q)const [d] x)) -> (XOR(L|Q)const [d ^ 1<<uint32(c)] x)
(XOR(L|Q)const [c] (BTC(L|Q)const [d] x)) -> (XOR(L|Q)const [c ^ 1<<uint32(d)] x)
(BTC(L|Q)const [c] (BTC(L|Q)const [d] x)) -> (XOR(L|Q)const [1<<uint32(c) ^ 1<<uint32(d)] x)
(OR(L|Q)const [c] (OR(L|Q)const [d] x)) -> (OR(L|Q)const [c | d] x)
(OR(L|Q)const [c] (BTS(L|Q)const [d] x)) -> (OR(L|Q)const [c | 1<<uint32(d)] x)
(BTS(L|Q)const [c] (OR(L|Q)const [d] x)) -> (OR(L|Q)const [d | 1<<uint32(c)] x)
(BTS(L|Q)const [c] (BTS(L|Q)const [d] x)) -> (OR(L|Q)const [1<<uint32(d) | 1<<uint32(c)] x)
(MULLconst [c] (MULLconst [d] x)) -> (MULLconst [int64(int32(c * d))] x)
(MULQconst [c] (MULQconst [d] x)) && is32Bit(c*d) -> (MULQconst [c * d] x)
@ -1042,18 +1052,23 @@
((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {sym} val base mem)
((ADD|SUB|AND|OR|XOR)Lload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {sym} val base mem)
(CMP(Q|L|W|B)load [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) ->
(CMP(Q|L|W|B)load [off1+off2] {sym} base val mem)
(CMP(Q|L|W|B)constload [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
(CMP(Q|L|W|B)constload [ValAndOff(valoff1).add(off2)] {sym} base mem)
((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem)
((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem)
((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) ->
((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {sym} base val mem)
((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) ->
((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem)
((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) ->
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1+off2] {sym} base val mem)
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) ->
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1+off2] {sym} base val mem)
// Fold constants into stores.
(MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validValAndOff(c,off) ->
@ -1088,24 +1103,31 @@
((ADD|SUB|AND|OR|XOR)Lload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
(CMP(Q|L|W|B)load [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(CMP(Q|L|W|B)load [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(CMP(Q|L|W|B)constload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
&& ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) ->
(CMP(Q|L|W|B)constload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
&& ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) ->
((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
&& ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) ->
((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
// generating indexed loads and stores
(MOV(B|W|L|Q|SS|SD)load [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
@ -1412,6 +1434,12 @@
(XORLconst [c] (MOVLconst [d])) -> (MOVLconst [c^d])
(NOTQ (MOVQconst [c])) -> (MOVQconst [^c])
(NOTL (MOVLconst [c])) -> (MOVLconst [^c])
(BTSQconst [c] (MOVQconst [d])) -> (MOVQconst [d|(1<<uint32(c))])
(BTSLconst [c] (MOVLconst [d])) -> (MOVLconst [d|(1<<uint32(c))])
(BTRQconst [c] (MOVQconst [d])) -> (MOVQconst [d&^(1<<uint32(c))])
(BTRLconst [c] (MOVLconst [d])) -> (MOVLconst [d&^(1<<uint32(c))])
(BTCQconst [c] (MOVQconst [d])) -> (MOVQconst [d^(1<<uint32(c))])
(BTCLconst [c] (MOVLconst [d])) -> (MOVLconst [d^(1<<uint32(c))])
// generic simplifications
// TODO: more of this
@ -2292,11 +2320,11 @@
((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) -> ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) ->
((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) ->
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off] {sym} ptr x mem)
(MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) -> ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) ->
((ADD|SUB|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) ->
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off] {sym} ptr x mem)
// Merge ADDQconst and LEAQ into atomic loads.
(MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
@ -2380,12 +2408,12 @@
(MOVWQZX (MOVBQZX x)) -> (MOVBQZX x)
(MOVBQZX (MOVBQZX x)) -> (MOVBQZX x)
(MOVQstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Qconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) ->
((ADD|AND|OR|XOR)Qconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
(MOVLstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) ->
((ADD|AND|OR|XOR)Lconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
(MOVQstore [off] {sym} ptr a:((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) ->
((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
(MOVLstore [off] {sym} ptr a:((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) ->
((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
// float <-> int register moves, with no conversion.
// These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}.

View file

@ -272,14 +272,14 @@ func init() {
{name: "UCOMISS", argLength: 2, reg: fp2flags, asm: "UCOMISS", typ: "Flags"}, // arg0 compare to arg1, f32
{name: "UCOMISD", argLength: 2, reg: fp2flags, asm: "UCOMISD", typ: "Flags"}, // arg0 compare to arg1, f64
{name: "BTL", argLength: 2, reg: gp2flags, asm: "BTL", typ: "Flags"}, // test whether bit arg0 % 32 in arg1 is set
{name: "BTQ", argLength: 2, reg: gp2flags, asm: "BTQ", typ: "Flags"}, // test whether bit arg0 % 64 in arg1 is set
{name: "BTCL", argLength: 2, reg: gp21, asm: "BTCL", resultInArg0: true, clobberFlags: true}, // complement bit arg0 % 32 in arg1
{name: "BTCQ", argLength: 2, reg: gp21, asm: "BTCQ", resultInArg0: true, clobberFlags: true}, // complement bit arg0 % 64 in arg1
{name: "BTRL", argLength: 2, reg: gp21, asm: "BTRL", resultInArg0: true, clobberFlags: true}, // reset bit arg0 % 32 in arg1
{name: "BTRQ", argLength: 2, reg: gp21, asm: "BTRQ", resultInArg0: true, clobberFlags: true}, // reset bit arg0 % 64 in arg1
{name: "BTSL", argLength: 2, reg: gp21, asm: "BTSL", resultInArg0: true, clobberFlags: true}, // set bit arg0 % 32 in arg1
{name: "BTSQ", argLength: 2, reg: gp21, asm: "BTSQ", resultInArg0: true, clobberFlags: true}, // set bit arg0 % 64 in arg1
{name: "BTL", argLength: 2, reg: gp2flags, asm: "BTL", typ: "Flags"}, // test whether bit arg0%32 in arg1 is set
{name: "BTQ", argLength: 2, reg: gp2flags, asm: "BTQ", typ: "Flags"}, // test whether bit arg0%64 in arg1 is set
{name: "BTCL", argLength: 2, reg: gp21, asm: "BTCL", resultInArg0: true, clobberFlags: true}, // complement bit arg1%32 in arg0
{name: "BTCQ", argLength: 2, reg: gp21, asm: "BTCQ", resultInArg0: true, clobberFlags: true}, // complement bit arg1%64 in arg0
{name: "BTRL", argLength: 2, reg: gp21, asm: "BTRL", resultInArg0: true, clobberFlags: true}, // reset bit arg1%32 in arg0
{name: "BTRQ", argLength: 2, reg: gp21, asm: "BTRQ", resultInArg0: true, clobberFlags: true}, // reset bit arg1%64 in arg0
{name: "BTSL", argLength: 2, reg: gp21, asm: "BTSL", resultInArg0: true, clobberFlags: true}, // set bit arg1%32 in arg0
{name: "BTSQ", argLength: 2, reg: gp21, asm: "BTSQ", resultInArg0: true, clobberFlags: true}, // set bit arg1%64 in arg0
{name: "BTLconst", argLength: 1, reg: gp1flags, asm: "BTL", typ: "Flags", aux: "Int8"}, // test whether bit auxint in arg0 is set, 0 <= auxint < 32
{name: "BTQconst", argLength: 1, reg: gp1flags, asm: "BTQ", typ: "Flags", aux: "Int8"}, // test whether bit auxint in arg0 is set, 0 <= auxint < 64
{name: "BTCLconst", argLength: 1, reg: gp11, asm: "BTCL", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // complement bit auxint in arg0, 0 <= auxint < 32
@ -289,6 +289,20 @@ func init() {
{name: "BTSLconst", argLength: 1, reg: gp11, asm: "BTSL", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // set bit auxint in arg0, 0 <= auxint < 32
{name: "BTSQconst", argLength: 1, reg: gp11, asm: "BTSQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // set bit auxint in arg0, 0 <= auxint < 64
// direct bit operation on memory operand
{name: "BTCQmodify", argLength: 3, reg: gpstore, asm: "BTCQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // complement bit arg1 in 64-bit arg0+auxint+aux, arg2=mem
{name: "BTCLmodify", argLength: 3, reg: gpstore, asm: "BTCL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // complement bit arg1 in 32-bit arg0+auxint+aux, arg2=mem
{name: "BTSQmodify", argLength: 3, reg: gpstore, asm: "BTSQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // set bit arg1 in 64-bit arg0+auxint+aux, arg2=mem
{name: "BTSLmodify", argLength: 3, reg: gpstore, asm: "BTSL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // set bit arg1 in 32-bit arg0+auxint+aux, arg2=mem
{name: "BTRQmodify", argLength: 3, reg: gpstore, asm: "BTRQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // reset bit arg1 in 64-bit arg0+auxint+aux, arg2=mem
{name: "BTRLmodify", argLength: 3, reg: gpstore, asm: "BTRL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // reset bit arg1 in 32-bit arg0+auxint+aux, arg2=mem
{name: "BTCQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTCQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // complement bit ValAndOff(AuxInt).Val() in 64-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
{name: "BTCLconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTCL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // complement bit ValAndOff(AuxInt).Val() in 32-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
{name: "BTSQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTSQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // set bit ValAndOff(AuxInt).Val() in 64-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
{name: "BTSLconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTSL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // set bit ValAndOff(AuxInt).Val() in 32-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
{name: "BTRQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTRQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // reset bit ValAndOff(AuxInt).Val() in 64-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
{name: "BTRLconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTRL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // reset bit ValAndOff(AuxInt).Val() in 32-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
{name: "TESTQ", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTQ", typ: "Flags"}, // (arg0 & arg1) compare to 0
{name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0
{name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0

View file

@ -83,12 +83,18 @@
(Com8 x) -> (MVN x)
// math package intrinsics
(Abs x) -> (FABSD x)
(Sqrt x) -> (FSQRTD x)
(Ceil x) -> (FRINTPD x)
(Floor x) -> (FRINTMD x)
(Round x) -> (FRINTAD x)
(RoundToEven x) -> (FRINTND x)
(Trunc x) -> (FRINTZD x)
// lowering rotates
(RotateLeft32 x y) -> (RORW x (NEG <y.Type> y))
(RotateLeft64 x y) -> (ROR x (NEG <y.Type> y))
(Ctz64NonZero x) -> (Ctz64 x)
(Ctz32NonZero x) -> (Ctz32 x)
@ -101,9 +107,20 @@
// Load args directly into the register class where it will be used.
(FMOVDgpfp <t> (Arg [off] {sym})) -> @b.Func.Entry (Arg <t> [off] {sym})
(FMOVDfpgp <t> (Arg [off] {sym})) -> @b.Func.Entry (Arg <t> [off] {sym})
// Similarly for stores, if we see a store after FPR <-> GPR move, then redirect store to use the other register set.
(MOVDstore ptr (FMOVDfpgp val) mem) -> (FMOVDstore ptr val mem)
(FMOVDstore ptr (FMOVDgpfp val) mem) -> (MOVDstore ptr val mem)
(MOVDstore [off] {sym} ptr (FMOVDfpgp val) mem) -> (FMOVDstore [off] {sym} ptr val mem)
(FMOVDstore [off] {sym} ptr (FMOVDgpfp val) mem) -> (MOVDstore [off] {sym} ptr val mem)
(MOVWstore [off] {sym} ptr (FMOVSfpgp val) mem) -> (FMOVSstore [off] {sym} ptr val mem)
(FMOVSstore [off] {sym} ptr (FMOVSgpfp val) mem) -> (MOVWstore [off] {sym} ptr val mem)
// float <-> int register moves, with no conversion.
// These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}.
(MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr val _)) -> (FMOVDfpgp val)
(FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) -> (FMOVDgpfp val)
(MOVWUload [off] {sym} ptr (FMOVSstore [off] {sym} ptr val _)) -> (FMOVSfpgp val)
(FMOVSload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) -> (FMOVSgpfp val)
(BitLen64 x) -> (SUB (MOVDconst [64]) (CLZ <typ.Int> x))
@ -125,6 +142,8 @@
// shifts
// hardware instruction uses only the low 6 bits of the shift
// we compare to 64 to ensure Go semantics for large shifts
// Rules about rotates with non-const shift are based on the following rules,
// if the following rules change, please also modify the rules based on them.
(Lsh64x64 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
(Lsh64x32 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
(Lsh64x16 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
@ -1135,15 +1154,15 @@
(MULW (NEG x) y) -> (MNEGW x y)
// madd/msub
(ADD a l:(MUL x y)) && l.Uses==1 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l) -> (MADD a x y)
(SUB a l:(MUL x y)) && l.Uses==1 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l) -> (MSUB a x y)
(ADD a l:(MNEG x y)) && l.Uses==1 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l) -> (MSUB a x y)
(SUB a l:(MNEG x y)) && l.Uses==1 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l) -> (MADD a x y)
(ADD a l:(MUL x y)) && l.Uses==1 && clobber(l) -> (MADD a x y)
(SUB a l:(MUL x y)) && l.Uses==1 && clobber(l) -> (MSUB a x y)
(ADD a l:(MNEG x y)) && l.Uses==1 && clobber(l) -> (MSUB a x y)
(SUB a l:(MNEG x y)) && l.Uses==1 && clobber(l) -> (MADD a x y)
(ADD a l:(MULW x y)) && l.Uses==1 && a.Type.Size() != 8 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l) -> (MADDW a x y)
(SUB a l:(MULW x y)) && l.Uses==1 && a.Type.Size() != 8 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l) -> (MSUBW a x y)
(ADD a l:(MNEGW x y)) && l.Uses==1 && a.Type.Size() != 8 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l) -> (MSUBW a x y)
(SUB a l:(MNEGW x y)) && l.Uses==1 && a.Type.Size() != 8 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l) -> (MADDW a x y)
(ADD a l:(MULW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) -> (MADDW a x y)
(SUB a l:(MULW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) -> (MSUBW a x y)
(ADD a l:(MNEGW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) -> (MSUBW a x y)
(SUB a l:(MNEGW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) -> (MADDW a x y)
// mul by constant
(MUL x (MOVDconst [-1])) -> (NEG x)
@ -1191,6 +1210,94 @@
(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst <x.Type> [log2(c/7)] (SUBshiftLL <x.Type> x x [3]))
(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (NEG (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])))
(MADD a x (MOVDconst [-1])) -> (SUB a x)
(MADD a _ (MOVDconst [0])) -> a
(MADD a x (MOVDconst [1])) -> (ADD a x)
(MADD a x (MOVDconst [c])) && isPowerOfTwo(c) -> (ADDshiftLL a x [log2(c)])
(MADD a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 -> (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MADD a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 -> (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MADD a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) -> (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MADD a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) -> (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MADD a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) -> (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MADD a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) -> (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MADD a (MOVDconst [-1]) x) -> (SUB a x)
(MADD a (MOVDconst [0]) _) -> a
(MADD a (MOVDconst [1]) x) -> (ADD a x)
(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c) -> (ADDshiftLL a x [log2(c)])
(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 -> (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 -> (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MADD a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) -> (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MADD a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) -> (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) -> (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) -> (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MADDW a x (MOVDconst [c])) && int32(c)==-1 -> (SUB a x)
(MADDW a _ (MOVDconst [c])) && int32(c)==0 -> a
(MADDW a x (MOVDconst [c])) && int32(c)==1 -> (ADD a x)
(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c) -> (ADDshiftLL a x [log2(c)])
(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 -> (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 -> (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MADDW a (MOVDconst [c]) x) && int32(c)==-1 -> (SUB a x)
(MADDW a (MOVDconst [c]) _) && int32(c)==0 -> a
(MADDW a (MOVDconst [c]) x) && int32(c)==1 -> (ADD a x)
(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c) -> (ADDshiftLL a x [log2(c)])
(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 -> (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 -> (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MSUB a x (MOVDconst [-1])) -> (ADD a x)
(MSUB a _ (MOVDconst [0])) -> a
(MSUB a x (MOVDconst [1])) -> (SUB a x)
(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c) -> (SUBshiftLL a x [log2(c)])
(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 -> (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 -> (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MSUB a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) -> (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MSUB a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) -> (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MSUB a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) -> (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MSUB a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) -> (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MSUB a (MOVDconst [-1]) x) -> (ADD a x)
(MSUB a (MOVDconst [0]) _) -> a
(MSUB a (MOVDconst [1]) x) -> (SUB a x)
(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c) -> (SUBshiftLL a x [log2(c)])
(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 -> (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 -> (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MSUB a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) -> (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MSUB a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) -> (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) -> (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) -> (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MSUBW a x (MOVDconst [c])) && int32(c)==-1 -> (ADD a x)
(MSUBW a _ (MOVDconst [c])) && int32(c)==0 -> a
(MSUBW a x (MOVDconst [c])) && int32(c)==1 -> (SUB a x)
(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c) -> (SUBshiftLL a x [log2(c)])
(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 -> (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 -> (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MSUBW a (MOVDconst [c]) x) && int32(c)==-1 -> (ADD a x)
(MSUBW a (MOVDconst [c]) _) && int32(c)==0 -> a
(MSUBW a (MOVDconst [c]) x) && int32(c)==1 -> (SUB a x)
(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c) -> (SUBshiftLL a x [log2(c)])
(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 -> (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 -> (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
// div by constant
(UDIV x (MOVDconst [1])) -> x
(UDIV x (MOVDconst [c])) && isPowerOfTwo(c) -> (SRLconst [log2(c)] x)
@ -1242,6 +1349,14 @@
(MULW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(int32(c)*int32(d))])
(MNEG (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [-c*d])
(MNEGW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [-int64(int32(c)*int32(d))])
(MADD (MOVDconst [c]) x y) -> (ADDconst [c] (MUL <x.Type> x y))
(MADDW (MOVDconst [c]) x y) -> (ADDconst [c] (MULW <x.Type> x y))
(MSUB (MOVDconst [c]) x y) -> (ADDconst [c] (MNEG <x.Type> x y))
(MSUBW (MOVDconst [c]) x y) -> (ADDconst [c] (MNEGW <x.Type> x y))
(MADD a (MOVDconst [c]) (MOVDconst [d])) -> (ADDconst [c*d] a)
(MADDW a (MOVDconst [c]) (MOVDconst [d])) -> (ADDconst [int64(int32(c)*int32(d))] a)
(MSUB a (MOVDconst [c]) (MOVDconst [d])) -> (SUBconst [c*d] a)
(MSUBW a (MOVDconst [c]) (MOVDconst [d])) -> (SUBconst [int64(int32(c)*int32(d))] a)
(DIV (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c/d])
(UDIV (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint64(c)/uint64(d))])
(DIVW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(int32(c)/int32(d))])
@ -1490,6 +1605,12 @@
(CSEL0 {arm64Negate(bool.Op)} x flagArg(bool))
// absorb shifts into ops
(NEG x:(SLLconst [c] y)) && clobberIfDead(x) -> (NEGshiftLL [c] y)
(NEG x:(SRLconst [c] y)) && clobberIfDead(x) -> (NEGshiftRL [c] y)
(NEG x:(SRAconst [c] y)) && clobberIfDead(x) -> (NEGshiftRA [c] y)
(MVN x:(SLLconst [c] y)) && clobberIfDead(x) -> (MVNshiftLL [c] y)
(MVN x:(SRLconst [c] y)) && clobberIfDead(x) -> (MVNshiftRL [c] y)
(MVN x:(SRAconst [c] y)) && clobberIfDead(x) -> (MVNshiftRA [c] y)
(ADD x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (ADDshiftLL x0 y [c])
(ADD x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (ADDshiftRL x0 y [c])
(ADD x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (ADDshiftRA x0 y [c])
@ -1520,6 +1641,12 @@
(CMP x0:(SRLconst [c] y) x1) && clobberIfDead(x0) -> (InvertFlags (CMPshiftRL x1 y [c]))
(CMP x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (CMPshiftRA x0 y [c])
(CMP x0:(SRAconst [c] y) x1) && clobberIfDead(x0) -> (InvertFlags (CMPshiftRA x1 y [c]))
(CMN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (CMNshiftLL x0 y [c])
(CMN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (CMNshiftRL x0 y [c])
(CMN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (CMNshiftRA x0 y [c])
(TST x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (TSTshiftLL x0 y [c])
(TST x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (TSTshiftRL x0 y [c])
(TST x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (TSTshiftRA x0 y [c])
// prefer *const ops to *shift ops
(ADDshiftLL (MOVDconst [c]) x [d]) -> (ADDconst [c] (SLLconst <x.Type> x [d]))
@ -1537,8 +1664,20 @@
(CMPshiftLL (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
(CMPshiftRL (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
(CMPshiftRA (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
(CMNshiftLL (MOVDconst [c]) x [d]) -> (CMNconst [c] (SLLconst <x.Type> x [d]))
(CMNshiftRL (MOVDconst [c]) x [d]) -> (CMNconst [c] (SRLconst <x.Type> x [d]))
(CMNshiftRA (MOVDconst [c]) x [d]) -> (CMNconst [c] (SRAconst <x.Type> x [d]))
(TSTshiftLL (MOVDconst [c]) x [d]) -> (TSTconst [c] (SLLconst <x.Type> x [d]))
(TSTshiftRL (MOVDconst [c]) x [d]) -> (TSTconst [c] (SRLconst <x.Type> x [d]))
(TSTshiftRA (MOVDconst [c]) x [d]) -> (TSTconst [c] (SRAconst <x.Type> x [d]))
// constant folding in *shift ops
(MVNshiftLL (MOVDconst [c]) [d]) -> (MOVDconst [^int64(uint64(c)<<uint64(d))])
(MVNshiftRL (MOVDconst [c]) [d]) -> (MOVDconst [^int64(uint64(c)>>uint64(d))])
(MVNshiftRA (MOVDconst [c]) [d]) -> (MOVDconst [^(c>>uint64(d))])
(NEGshiftLL (MOVDconst [c]) [d]) -> (MOVDconst [-int64(uint64(c)<<uint64(d))])
(NEGshiftRL (MOVDconst [c]) [d]) -> (MOVDconst [-int64(uint64(c)>>uint64(d))])
(NEGshiftRA (MOVDconst [c]) [d]) -> (MOVDconst [-(c>>uint64(d))])
(ADDshiftLL x (MOVDconst [c]) [d]) -> (ADDconst x [int64(uint64(c)<<uint64(d))])
(ADDshiftRL x (MOVDconst [c]) [d]) -> (ADDconst x [int64(uint64(c)>>uint64(d))])
(ADDshiftRA x (MOVDconst [c]) [d]) -> (ADDconst x [c>>uint64(d)])
@ -1566,6 +1705,12 @@
(CMPshiftLL x (MOVDconst [c]) [d]) -> (CMPconst x [int64(uint64(c)<<uint64(d))])
(CMPshiftRL x (MOVDconst [c]) [d]) -> (CMPconst x [int64(uint64(c)>>uint64(d))])
(CMPshiftRA x (MOVDconst [c]) [d]) -> (CMPconst x [c>>uint64(d)])
(CMNshiftLL x (MOVDconst [c]) [d]) -> (CMNconst x [int64(uint64(c)<<uint64(d))])
(CMNshiftRL x (MOVDconst [c]) [d]) -> (CMNconst x [int64(uint64(c)>>uint64(d))])
(CMNshiftRA x (MOVDconst [c]) [d]) -> (CMNconst x [c>>uint64(d)])
(TSTshiftLL x (MOVDconst [c]) [d]) -> (TSTconst x [int64(uint64(c)<<uint64(d))])
(TSTshiftRL x (MOVDconst [c]) [d]) -> (TSTconst x [int64(uint64(c)>>uint64(d))])
(TSTshiftRA x (MOVDconst [c]) [d]) -> (TSTconst x [c>>uint64(d)])
// simplification with *shift ops
(SUBshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [0])
@ -1590,7 +1735,7 @@
(ORNshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [-1])
(ORNshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [-1])
// Generate rotates
// Generate rotates with const shift
(ADDshiftLL [c] (SRLconst x [64-c]) x) -> (RORconst [64-c] x)
( ORshiftLL [c] (SRLconst x [64-c]) x) -> (RORconst [64-c] x)
(XORshiftLL [c] (SRLconst x [64-c]) x) -> (RORconst [64-c] x)
@ -1608,6 +1753,38 @@
( ORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [c] x)
(XORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [c] x)
(RORconst [c] (RORconst [d] x)) -> (RORconst [(c+d)&63] x)
(RORWconst [c] (RORWconst [d] x)) -> (RORWconst [(c+d)&31] x)
// Generate rotates with non-const shift.
// These rules match the Go source code like
// y &= 63
// x << y | x >> (64-y)
// "|" can also be "^" or "+".
// As arm64 does not have a ROL instruction, so ROL(x, y) is replaced by ROR(x, -y).
((ADD|OR|XOR) (SLL x (ANDconst <t> [63] y))
(CSEL0 <typ.UInt64> {cc} (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))
(CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc.(Op) == OpARM64LessThanU
-> (ROR x (NEG <t> y))
((ADD|OR|XOR) (SRL <typ.UInt64> x (ANDconst <t> [63] y))
(CSEL0 <typ.UInt64> {cc} (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))
(CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc.(Op) == OpARM64LessThanU
-> (ROR x y)
// These rules match the Go source code like
// y &= 31
// x << y | x >> (32-y)
// "|" can also be "^" or "+".
// As arm64 does not have a ROLW instruction, so ROLW(x, y) is replaced by RORW(x, -y).
((ADD|OR|XOR) (SLL x (ANDconst <t> [31] y))
(CSEL0 <typ.UInt32> {cc} (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))
(CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc.(Op) == OpARM64LessThanU
-> (RORW x (NEG <t> y))
((ADD|OR|XOR) (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y))
(CSEL0 <typ.UInt32> {cc} (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))
(CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc.(Op) == OpARM64LessThanU
-> (RORW x y)
// Extract from reg pair
(ADDshiftLL [c] (SRLconst x [64-c]) x2) -> (EXTRconst [64-c] x2 x)
( ORshiftLL [c] (SRLconst x [64-c]) x2) -> (EXTRconst [64-c] x2 x)
@ -1626,6 +1803,9 @@
(SRLconst [c] (SLLconst [c] x)) && 0 < c && c < 64 -> (ANDconst [1<<uint(64-c)-1] x) // mask out high bits
(SLLconst [c] (SRLconst [c] x)) && 0 < c && c < 64 -> (ANDconst [^(1<<uint(c)-1)] x) // mask out low bits
// Special case setting bit as 1. An example is math.Copysign(c,-1)
(ORconst [c1] (ANDconst [c2] x)) && c2|c1 == ^0 -> (ORconst [c1] x)
// bitfield ops
// sbfiz

View file

@ -212,6 +212,7 @@ func init() {
// unary ops
{name: "MVN", argLength: 1, reg: gp11, asm: "MVN"}, // ^arg0
{name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0
{name: "FABSD", argLength: 1, reg: fp11, asm: "FABSD"}, // abs(arg0), float64
{name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS"}, // -arg0, float32
{name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD"}, // -arg0, float64
{name: "FSQRTD", argLength: 1, reg: fp11, asm: "FSQRTD"}, // sqrt(arg0), float64
@ -248,6 +249,8 @@ func init() {
{name: "SRLconst", argLength: 1, reg: gp11, asm: "LSR", aux: "Int64"}, // arg0 >> auxInt, unsigned
{name: "SRA", argLength: 2, reg: gp21, asm: "ASR"}, // arg0 >> arg1, signed, shift amount is mod 64
{name: "SRAconst", argLength: 1, reg: gp11, asm: "ASR", aux: "Int64"}, // arg0 >> auxInt, signed
{name: "ROR", argLength: 2, reg: gp21, asm: "ROR"}, // arg0 right rotate by (arg1 mod 64) bits
{name: "RORW", argLength: 2, reg: gp21, asm: "RORW"}, // arg0 right rotate by (arg1 mod 32) bits
{name: "RORconst", argLength: 1, reg: gp11, asm: "ROR", aux: "Int64"}, // arg0 right rotate by auxInt bits
{name: "RORWconst", argLength: 1, reg: gp11, asm: "RORW", aux: "Int64"}, // uint32(arg0) right rotate by auxInt bits
{name: "EXTRconst", argLength: 2, reg: gp21, asm: "EXTR", aux: "Int64"}, // extract 64 bits from arg0:arg1 starting at lsb auxInt
@ -270,6 +273,12 @@ func init() {
{name: "FCMPD", argLength: 2, reg: fp2flags, asm: "FCMPD", typ: "Flags"}, // arg0 compare to arg1, float64
// shifted ops
{name: "MVNshiftLL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0<<auxInt)
{name: "MVNshiftRL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0>>auxInt), unsigned shift
{name: "MVNshiftRA", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0>>auxInt), signed shift
{name: "NEGshiftLL", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0<<auxInt)
{name: "NEGshiftRL", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0>>auxInt), unsigned shift
{name: "NEGshiftRA", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0>>auxInt), signed shift
{name: "ADDshiftLL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1<<auxInt
{name: "ADDshiftRL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1>>auxInt, unsigned shift
{name: "ADDshiftRA", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1>>auxInt, signed shift
@ -297,6 +306,12 @@ func init() {
{name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1<<auxInt
{name: "CMPshiftRL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1>>auxInt, unsigned shift
{name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift
{name: "CMNshiftLL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1<<auxInt) compare to 0
{name: "CMNshiftRL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1>>auxInt) compare to 0, unsigned shift
{name: "CMNshiftRA", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1>>auxInt) compare to 0, signed shift
{name: "TSTshiftLL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1<<auxInt) compare to 0
{name: "TSTshiftRL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1>>auxInt) compare to 0, unsigned shift
{name: "TSTshiftRA", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1>>auxInt) compare to 0, signed shift
// bitfield ops
// for all bitfield ops lsb is auxInt>>8, width is auxInt&0xff
@ -388,6 +403,8 @@ func init() {
{name: "FMOVDgpfp", argLength: 1, reg: gpfp, asm: "FMOVD"}, // move int64 to float64 (no conversion)
{name: "FMOVDfpgp", argLength: 1, reg: fpgp, asm: "FMOVD"}, // move float64 to int64 (no conversion)
{name: "FMOVSgpfp", argLength: 1, reg: gpfp, asm: "FMOVS"}, // move 32bits from int to float reg (no conversion)
{name: "FMOVSfpgp", argLength: 1, reg: fpgp, asm: "FMOVS"}, // move 32bits from float to int reg, zero extend (no conversion)
// conversions
{name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
@ -422,6 +439,7 @@ func init() {
// floating-point round to integral
{name: "FRINTAD", argLength: 1, reg: fp11, asm: "FRINTAD"},
{name: "FRINTMD", argLength: 1, reg: fp11, asm: "FRINTMD"},
{name: "FRINTND", argLength: 1, reg: fp11, asm: "FRINTND"},
{name: "FRINTPD", argLength: 1, reg: fp11, asm: "FRINTPD"},
{name: "FRINTZD", argLength: 1, reg: fp11, asm: "FRINTZD"},

View file

@ -25,6 +25,7 @@
(Mul64 x y) -> (MULLD x y)
(Mul(32|16|8) x y) -> (MULLW x y)
(Mul64uhilo x y) -> (LoweredMuluhilo x y)
(Div64 x y) -> (DIVD x y)
(Div64u x y) -> (DIVDU x y)
@ -74,11 +75,11 @@
(ConstBool [b]) -> (MOVDconst [b])
// Constant folding
(FABS (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Abs(i2f(x)))])
(FSQRT (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Sqrt(i2f(x)))])
(FFLOOR (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Floor(i2f(x)))])
(FCEIL (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Ceil(i2f(x)))])
(FTRUNC (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Trunc(i2f(x)))])
(FABS (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Abs(auxTo64F(x)))])
(FSQRT (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))])
(FFLOOR (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Floor(auxTo64F(x)))])
(FCEIL (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Ceil(auxTo64F(x)))])
(FTRUNC (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Trunc(auxTo64F(x)))])
// Rotate generation with const shift
(ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
@ -168,6 +169,20 @@
(Rsh8x32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRAWconst (SignExt8to32 x) [c])
(Rsh8Ux32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c])
// Lower bounded shifts first. No need to check shift value.
(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLD x y)
(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLW x y)
(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLW x y)
(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLW x y)
(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRD x y)
(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRW x y)
(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRW (MOVHZreg x) y)
(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRW (MOVBZreg x) y)
(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAD x y)
(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAW x y)
(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAW (MOVHreg x) y)
(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAW (MOVBreg x) y)
// non-constant rotates
// These are subexpressions found in statements that can become rotates
// In these cases the shift count is known to be < 64 so the more complicated expressions
@ -660,14 +675,51 @@
(MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF -> y
// small and of zero-extend -> either zero-extend or small and
// degenerate-and
(ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF -> y
(ANDconst [0xFF] y:(MOVBreg _)) -> y
(ANDconst [c] y:(MOVHZreg _)) && c&0xFFFF == 0xFFFF -> y
(ANDconst [c] y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF -> y
// normal case
(ANDconst [c] (MOVBZreg x)) -> (ANDconst [c&0xFF] x)
(ANDconst [c] (MOVHZreg x)) -> (ANDconst [c&0xFFFF] x)
(ANDconst [c] (MOVWZreg x)) -> (ANDconst [c&0xFFFFFFFF] x)
(ANDconst [0xFFFF] y:(MOVHreg _)) -> y
(AND (MOVDconst [c]) y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF -> y
(AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) -> (MOVWZreg x)
// normal case
(ANDconst [c] (MOV(B|BZ)reg x)) -> (ANDconst [c&0xFF] x)
(ANDconst [c] (MOV(H|HZ)reg x)) -> (ANDconst [c&0xFFFF] x)
(ANDconst [c] (MOV(W|WZ)reg x)) -> (ANDconst [c&0xFFFFFFFF] x)
// Eliminate unnecessary sign/zero extend following right shift
(MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) -> (SRWconst [c] (MOVBZreg x))
(MOV(H|W)Zreg (SRWconst [c] (MOVHZreg x))) -> (SRWconst [c] (MOVHZreg x))
(MOVWZreg (SRWconst [c] (MOVWZreg x))) -> (SRWconst [c] (MOVWZreg x))
(MOV(B|H|W)reg (SRAWconst [c] (MOVBreg x))) -> (SRAWconst [c] (MOVBreg x))
(MOV(H|W)reg (SRAWconst [c] (MOVHreg x))) -> (SRAWconst [c] (MOVHreg x))
(MOVWreg (SRAWconst [c] (MOVWreg x))) -> (SRAWconst [c] (MOVWreg x))
(MOVWZreg (SRWconst [c] x)) && sizeof(x.Type) <= 32 -> (SRWconst [c] x)
(MOVHZreg (SRWconst [c] x)) && sizeof(x.Type) <= 16 -> (SRWconst [c] x)
(MOVBZreg (SRWconst [c] x)) && sizeof(x.Type) == 8 -> (SRWconst [c] x)
(MOVWreg (SRAWconst [c] x)) && sizeof(x.Type) <= 32 -> (SRAWconst [c] x)
(MOVHreg (SRAWconst [c] x)) && sizeof(x.Type) <= 16 -> (SRAWconst [c] x)
(MOVBreg (SRAWconst [c] x)) && sizeof(x.Type) == 8 -> (SRAWconst [c] x)
// initial right shift will handle sign/zero extend
(MOVBZreg (SRDconst [c] x)) && c>=56 -> (SRDconst [c] x)
(MOVBreg (SRDconst [c] x)) && c>56 -> (SRDconst [c] x)
(MOVBreg (SRDconst [c] x)) && c==56 -> (SRADconst [c] x)
(MOVBZreg (SRWconst [c] x)) && c>=24 -> (SRWconst [c] x)
(MOVBreg (SRWconst [c] x)) && c>24 -> (SRWconst [c] x)
(MOVBreg (SRWconst [c] x)) && c==24 -> (SRAWconst [c] x)
(MOVHZreg (SRDconst [c] x)) && c>=48 -> (SRDconst [c] x)
(MOVHreg (SRDconst [c] x)) && c>48 -> (SRDconst [c] x)
(MOVHreg (SRDconst [c] x)) && c==48 -> (SRADconst [c] x)
(MOVHZreg (SRWconst [c] x)) && c>=16 -> (SRWconst [c] x)
(MOVHreg (SRWconst [c] x)) && c>16 -> (SRWconst [c] x)
(MOVHreg (SRWconst [c] x)) && c==16 -> (SRAWconst [c] x)
(MOVWZreg (SRDconst [c] x)) && c>=32 -> (SRDconst [c] x)
(MOVWreg (SRDconst [c] x)) && c>32 -> (SRDconst [c] x)
(MOVWreg (SRDconst [c] x)) && c==32 -> (SRADconst [c] x)
// Various redundant zero/sign extension combinations.
(MOVBZreg y:(MOVBZreg _)) -> y // repeat
@ -796,11 +848,19 @@
(MOVHZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHZload [off1+off2] {sym} x mem)
(MOVBZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVBZload [off1+off2] {sym} x mem)
// Determine load + addressing that can be done as a register indexed load
(MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 -> (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem)
// Determine indexed loads with constant values that can be done without index
(MOV(D|W|WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) -> (MOV(D|W|WZ|H|HZ|BZ)load [c] ptr mem)
(MOV(D|W|WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) -> (MOV(D|W|WZ|H|HZ|BZ)load [c] ptr mem)
// Store of zero -> storezero
(MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVDstorezero [off] {sym} ptr mem)
(MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVWstorezero [off] {sym} ptr mem)
(MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVHstorezero [off] {sym} ptr mem)
(MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVBstorezero [off] {sym} ptr mem)
(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVDstorezero [off] {sym} ptr mem)
(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem)
(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem)
(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem)
// Fold offsets for storezero
(MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
@ -812,6 +872,13 @@
(MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
(MOVBstorezero [off1+off2] {sym} x mem)
// Stores with addressing that can be done as indexed stores
(MOV(D|W|H|B)store [off] {sym} p:(ADD ptr idx) val mem) && off == 0 && sym == nil && p.Uses == 1 -> (MOV(D|W|H|B)storeidx ptr idx val mem)
// Stores with constant index values can be done without indexed instructions
(MOV(D|W|H|B)storeidx ptr (MOVDconst [c]) val mem) && is16Bit(c) -> (MOV(D|W|H|B)store [c] ptr val mem)
(MOV(D|W|H|B)storeidx (MOVDconst [c]) ptr val mem) && is16Bit(c) -> (MOV(D|W|H|B)store [c] ptr val mem)
// Fold symbols into storezero
(MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
&& (x.Op != OpSB || p.Uses == 1) ->
@ -851,22 +918,43 @@
(ZeroExt16to(32|64) x) -> (MOVHZreg x)
(ZeroExt32to64 x) -> (MOVWZreg x)
(Trunc(16|32|64)to8 x) -> (MOVBreg x)
(Trunc(32|64)to16 x) -> (MOVHreg x)
(Trunc64to32 x) -> (MOVWreg x)
(Trunc(16|32|64)to8 x) && isSigned(x.Type) -> (MOVBreg x)
(Trunc(16|32|64)to8 x) -> (MOVBZreg x)
(Trunc(32|64)to16 x) && isSigned(x.Type) -> (MOVHreg x)
(Trunc(32|64)to16 x) -> (MOVHZreg x)
(Trunc64to32 x) && isSigned(x.Type) -> (MOVWreg x)
(Trunc64to32 x) -> (MOVWZreg x)
(Slicemask <t> x) -> (SRADconst (NEG <t> x) [63])
// Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
// This may interact with other patterns in the future. (Compare with arm64)
(MOVBZreg x:(MOVBZload _ _)) -> x
(MOVHZreg x:(MOVHZload _ _)) -> x
(MOVHreg x:(MOVHload _ _)) -> x
(MOV(B|H|W)Zreg x:(MOVBZload _ _)) -> x
(MOV(B|H|W)Zreg x:(MOVBZloadidx _ _ _)) -> x
(MOV(H|W)Zreg x:(MOVHZload _ _)) -> x
(MOV(H|W)Zreg x:(MOVHZloadidx _ _ _)) -> x
(MOV(H|W)reg x:(MOVHload _ _)) -> x
(MOV(H|W)reg x:(MOVHloadidx _ _ _)) -> x
(MOVWZreg x:(MOVWZload _ _)) -> x
(MOVWZreg x:(MOVWZloadidx _ _ _)) -> x
(MOVWreg x:(MOVWload _ _)) -> x
(MOVWreg x:(MOVWloadidx _ _ _)) -> x
// don't extend if argument is already extended
(MOVBreg x:(Arg <t>)) && is8BitInt(t) && isSigned(t) -> x
(MOVBZreg x:(Arg <t>)) && is8BitInt(t) && !isSigned(t) -> x
(MOVHreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && isSigned(t) -> x
(MOVHZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && !isSigned(t) -> x
(MOVWreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t) -> x
(MOVWZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t) -> x
(MOVBZreg (MOVDconst [c])) -> (MOVDconst [int64(uint8(c))])
(MOVBreg (MOVDconst [c])) -> (MOVDconst [int64(int8(c))])
(MOVHZreg (MOVDconst [c])) -> (MOVDconst [int64(uint16(c))])
(MOVHreg (MOVDconst [c])) -> (MOVDconst [int64(int16(c))])
(MOVWreg (MOVDconst [c])) -> (MOVDconst [int64(int32(c))])
(MOVWZreg (MOVDconst [c])) -> (MOVDconst [int64(uint32(c))])
// Lose widening ops fed to to stores
(MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
@ -874,6 +962,11 @@
(MOVWstore [off] {sym} ptr (MOV(W|WZ)reg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
(MOVBstore [off] {sym} ptr (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 -> (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
(MOVBstore [off] {sym} ptr (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 -> (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
(MOVBstoreidx [off] {sym} ptr idx (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) -> (MOVBstoreidx [off] {sym} ptr idx x mem)
(MOVHstoreidx [off] {sym} ptr idx (MOV(H|HZ|W|WZ)reg x) mem) -> (MOVHstoreidx [off] {sym} ptr idx x mem)
(MOVWstoreidx [off] {sym} ptr idx (MOV(W|WZ)reg x) mem) -> (MOVWstoreidx [off] {sym} ptr idx x mem)
(MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 -> (MOVBstoreidx [off] {sym} ptr idx (SRWconst <typ.UInt32> x [c]) mem)
(MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 -> (MOVBstoreidx [off] {sym} ptr idx (SRWconst <typ.UInt32> x [c]) mem)
(MOVHBRstore {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) -> (MOVHBRstore {sym} ptr x mem)
(MOVWBRstore {sym} ptr (MOV(W|WZ)reg x) mem) -> (MOVWBRstore {sym} ptr x mem)

View file

@ -135,11 +135,14 @@ func init() {
gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
gp11 = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}
gp21 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}}
gp22 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp, gp}}
gp1cr = regInfo{inputs: []regMask{gp | sp | sb}}
gp2cr = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}}
crgp = regInfo{inputs: nil, outputs: []regMask{gp}}
gpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}
gploadidx = regInfo{inputs: []regMask{gp | sp | sb, gp}, outputs: []regMask{gp}}
gpstore = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}}
gpstoreidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}}
gpstorezero = regInfo{inputs: []regMask{gp | sp | sb}} // ppc64.REGZERO is reserved zero value
gpxchg = regInfo{inputs: []regMask{gp | sp | sb, gp}, outputs: []regMask{gp}}
gpcas = regInfo{inputs: []regMask{gp | sp | sb, gp, gp}, outputs: []regMask{gp}}
@ -151,7 +154,9 @@ func init() {
fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}}
fp2cr = regInfo{inputs: []regMask{fp, fp}}
fpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{fp}}
fploadidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{fp}}
fpstore = regInfo{inputs: []regMask{gp | sp | sb, fp}}
fpstoreidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, fp}}
callerSave = regMask(gp | fp | gr)
)
ops := []opData{
@ -170,6 +175,7 @@ func init() {
{name: "MULHW", argLength: 2, reg: gp21, asm: "MULHW", commutative: true}, // (arg0 * arg1) >> 32, signed
{name: "MULHDU", argLength: 2, reg: gp21, asm: "MULHDU", commutative: true}, // (arg0 * arg1) >> 64, unsigned
{name: "MULHWU", argLength: 2, reg: gp21, asm: "MULHWU", commutative: true}, // (arg0 * arg1) >> 32, unsigned
{name: "LoweredMuluhilo", argLength: 2, reg: gp22, resultNotInArgs: true}, // arg0 * arg1, returns (hi, lo)
{name: "FMUL", argLength: 2, reg: fp21, asm: "FMUL", commutative: true}, // arg0*arg1
{name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true}, // arg0*arg1
@ -281,6 +287,19 @@ func init() {
{name: "MOVWBRload", argLength: 2, reg: gpload, asm: "MOVWBR", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes zero extend reverse order
{name: "MOVHBRload", argLength: 2, reg: gpload, asm: "MOVHBR", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes zero extend reverse order
// In these cases an index register is used in addition to a base register
{name: "MOVBZloadidx", argLength: 3, reg: gploadidx, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // zero extend uint8 to uint64
{name: "MOVHloadidx", argLength: 3, reg: gploadidx, asm: "MOVH", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // sign extend int16 to int64
{name: "MOVHZloadidx", argLength: 3, reg: gploadidx, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // zero extend uint16 to uint64
{name: "MOVWloadidx", argLength: 3, reg: gploadidx, asm: "MOVW", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // sign extend int32 to int64
{name: "MOVWZloadidx", argLength: 3, reg: gploadidx, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // zero extend uint32 to uint64
{name: "MOVDloadidx", argLength: 3, reg: gploadidx, asm: "MOVD", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"},
{name: "MOVHBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVHBR", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // sign extend int16 to int64
{name: "MOVWBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVWBR", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // sign extend int32 to int64
{name: "MOVDBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVDBR", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"},
{name: "FMOVDloadidx", argLength: 3, reg: fploadidx, asm: "FMOVD", aux: "SymOff", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"},
{name: "FMOVSloadidx", argLength: 3, reg: fploadidx, asm: "FMOVS", aux: "SymOff", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"},
// Store bytes in the reverse endian order of the arch into arg0.
// These are indexes stores with no offset field in the instruction so the aux fields are not used.
{name: "MOVDBRstore", argLength: 3, reg: gpstore, asm: "MOVDBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes reverse order
@ -301,6 +320,17 @@ func init() {
{name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "FMOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store double flot
{name: "FMOVSstore", argLength: 3, reg: fpstore, asm: "FMOVS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store single float
// Stores using index and base registers
{name: "MOVBstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store bye
{name: "MOVHstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store half word
{name: "MOVWstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store word
{name: "MOVDstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store double word
{name: "FMOVDstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store double float
{name: "FMOVSstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store single float
{name: "MOVHBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVHBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store half word reversed byte using index reg
{name: "MOVWBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVWBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store word reversed byte using index reg
{name: "MOVDBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVDBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store double word reversed byte using index reg
// The following ops store 0 into arg0+aux+auxint arg1=mem
{name: "MOVBstorezero", argLength: 2, reg: gpstorezero, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 1 byte
{name: "MOVHstorezero", argLength: 2, reg: gpstorezero, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 2 bytes

View file

@ -363,8 +363,8 @@
(I64And (I64Const [x]) (I64Const [y])) -> (I64Const [x & y])
(I64Or (I64Const [x]) (I64Const [y])) -> (I64Const [x | y])
(I64Xor (I64Const [x]) (I64Const [y])) -> (I64Const [x ^ y])
(F64Add (F64Const [x]) (F64Const [y])) -> (F64Const [f2i(i2f(x) + i2f(y))])
(F64Mul (F64Const [x]) (F64Const [y])) -> (F64Const [f2i(i2f(x) * i2f(y))])
(F64Add (F64Const [x]) (F64Const [y])) -> (F64Const [auxFrom64F(auxTo64F(x) + auxTo64F(y))])
(F64Mul (F64Const [x]) (F64Const [y])) -> (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))])
(I64Eq (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [1])
(I64Eq (I64Const [x]) (I64Const [y])) && x != y -> (I64Const [0])
(I64Ne (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [0])

View file

@ -44,16 +44,16 @@
(Trunc64to8 (Const64 [c])) -> (Const8 [int64(int8(c))])
(Trunc64to16 (Const64 [c])) -> (Const16 [int64(int16(c))])
(Trunc64to32 (Const64 [c])) -> (Const32 [int64(int32(c))])
(Cvt64Fto32F (Const64F [c])) -> (Const32F [f2i(float64(i2f32(c)))])
(Cvt64Fto32F (Const64F [c])) -> (Const32F [auxFrom32F(float32(auxTo64F(c)))])
(Cvt32Fto64F (Const32F [c])) -> (Const64F [c]) // c is already a 64 bit float
(Cvt32to32F (Const32 [c])) -> (Const32F [f2i(float64(float32(int32(c))))])
(Cvt32to64F (Const32 [c])) -> (Const64F [f2i(float64(int32(c)))])
(Cvt64to32F (Const64 [c])) -> (Const32F [f2i(float64(float32(c)))])
(Cvt64to64F (Const64 [c])) -> (Const64F [f2i(float64(c))])
(Cvt32Fto32 (Const32F [c])) -> (Const32 [int64(int32(i2f(c)))])
(Cvt32Fto64 (Const32F [c])) -> (Const64 [int64(i2f(c))])
(Cvt64Fto32 (Const64F [c])) -> (Const32 [int64(int32(i2f(c)))])
(Cvt64Fto64 (Const64F [c])) -> (Const64 [int64(i2f(c))])
(Cvt32to32F (Const32 [c])) -> (Const32F [auxFrom32F(float32(int32(c)))])
(Cvt32to64F (Const32 [c])) -> (Const64F [auxFrom64F(float64(int32(c)))])
(Cvt64to32F (Const64 [c])) -> (Const32F [auxFrom32F(float32(c))])
(Cvt64to64F (Const64 [c])) -> (Const64F [auxFrom64F(float64(c))])
(Cvt32Fto32 (Const32F [c])) -> (Const32 [int64(int32(auxTo32F(c)))])
(Cvt32Fto64 (Const32F [c])) -> (Const64 [int64(auxTo32F(c))])
(Cvt64Fto32 (Const64F [c])) -> (Const32 [int64(int32(auxTo64F(c)))])
(Cvt64Fto64 (Const64F [c])) -> (Const64 [int64(auxTo64F(c))])
(Round32F x:(Const32F)) -> x
(Round64F x:(Const64F)) -> x
@ -95,16 +95,15 @@
(Neg16 (Const16 [c])) -> (Const16 [int64(-int16(c))])
(Neg32 (Const32 [c])) -> (Const32 [int64(-int32(c))])
(Neg64 (Const64 [c])) -> (Const64 [-c])
(Neg32F (Const32F [c])) && i2f(c) != 0 -> (Const32F [f2i(-i2f(c))])
(Neg64F (Const64F [c])) && i2f(c) != 0 -> (Const64F [f2i(-i2f(c))])
(Neg32F (Const32F [c])) && auxTo32F(c) != 0 -> (Const32F [auxFrom32F(-auxTo32F(c))])
(Neg64F (Const64F [c])) && auxTo64F(c) != 0 -> (Const64F [auxFrom64F(-auxTo64F(c))])
(Add8 (Const8 [c]) (Const8 [d])) -> (Const8 [int64(int8(c+d))])
(Add16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c+d))])
(Add32 (Const32 [c]) (Const32 [d])) -> (Const32 [int64(int32(c+d))])
(Add64 (Const64 [c]) (Const64 [d])) -> (Const64 [c+d])
(Add32F (Const32F [c]) (Const32F [d])) ->
(Const32F [f2i(float64(i2f32(c) + i2f32(d)))]) // ensure we combine the operands with 32 bit precision
(Add64F (Const64F [c]) (Const64F [d])) -> (Const64F [f2i(i2f(c) + i2f(d))])
(Add32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) + auxTo32F(d))])
(Add64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) + auxTo64F(d))])
(AddPtr <t> x (Const64 [c])) -> (OffPtr <t> x [c])
(AddPtr <t> x (Const32 [c])) -> (OffPtr <t> x [c])
@ -112,17 +111,15 @@
(Sub16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c-d))])
(Sub32 (Const32 [c]) (Const32 [d])) -> (Const32 [int64(int32(c-d))])
(Sub64 (Const64 [c]) (Const64 [d])) -> (Const64 [c-d])
(Sub32F (Const32F [c]) (Const32F [d])) ->
(Const32F [f2i(float64(i2f32(c) - i2f32(d)))])
(Sub64F (Const64F [c]) (Const64F [d])) -> (Const64F [f2i(i2f(c) - i2f(d))])
(Sub32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) - auxTo32F(d))])
(Sub64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) - auxTo64F(d))])
(Mul8 (Const8 [c]) (Const8 [d])) -> (Const8 [int64(int8(c*d))])
(Mul16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c*d))])
(Mul32 (Const32 [c]) (Const32 [d])) -> (Const32 [int64(int32(c*d))])
(Mul64 (Const64 [c]) (Const64 [d])) -> (Const64 [c*d])
(Mul32F (Const32F [c]) (Const32F [d])) ->
(Const32F [f2i(float64(i2f32(c) * i2f32(d)))])
(Mul64F (Const64F [c]) (Const64F [d])) -> (Const64F [f2i(i2f(c) * i2f(d))])
(Mul32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))])
(Mul64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))])
(And8 (Const8 [c]) (Const8 [d])) -> (Const8 [int64(int8(c&d))])
(And16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c&d))])
@ -147,8 +144,8 @@
(Div16u (Const16 [c]) (Const16 [d])) && d != 0 -> (Const16 [int64(int16(uint16(c)/uint16(d)))])
(Div32u (Const32 [c]) (Const32 [d])) && d != 0 -> (Const32 [int64(int32(uint32(c)/uint32(d)))])
(Div64u (Const64 [c]) (Const64 [d])) && d != 0 -> (Const64 [int64(uint64(c)/uint64(d))])
(Div32F (Const32F [c]) (Const32F [d])) -> (Const32F [f2i(float64(i2f32(c) / i2f32(d)))])
(Div64F (Const64F [c]) (Const64F [d])) -> (Const64F [f2i(i2f(c) / i2f(d))])
(Div32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))])
(Div64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))])
(Not (ConstBool [c])) -> (ConstBool [1-c])
@ -444,12 +441,18 @@
(Leq8U (Const8 [c]) (Const8 [d])) -> (ConstBool [b2i(uint8(c) <= uint8(d))])
// constant floating point comparisons
(Eq(64|32)F (Const(64|32)F [c]) (Const(64|32)F [d])) -> (ConstBool [b2i(i2f(c) == i2f(d))])
(Neq(64|32)F (Const(64|32)F [c]) (Const(64|32)F [d])) -> (ConstBool [b2i(i2f(c) != i2f(d))])
(Greater(64|32)F (Const(64|32)F [c]) (Const(64|32)F [d])) -> (ConstBool [b2i(i2f(c) > i2f(d))])
(Geq(64|32)F (Const(64|32)F [c]) (Const(64|32)F [d])) -> (ConstBool [b2i(i2f(c) >= i2f(d))])
(Less(64|32)F (Const(64|32)F [c]) (Const(64|32)F [d])) -> (ConstBool [b2i(i2f(c) < i2f(d))])
(Leq(64|32)F (Const(64|32)F [c]) (Const(64|32)F [d])) -> (ConstBool [b2i(i2f(c) <= i2f(d))])
(Eq32F (Const32F [c]) (Const32F [d])) -> (ConstBool [b2i(auxTo32F(c) == auxTo32F(d))])
(Eq64F (Const64F [c]) (Const64F [d])) -> (ConstBool [b2i(auxTo64F(c) == auxTo64F(d))])
(Neq32F (Const32F [c]) (Const32F [d])) -> (ConstBool [b2i(auxTo32F(c) != auxTo32F(d))])
(Neq64F (Const64F [c]) (Const64F [d])) -> (ConstBool [b2i(auxTo64F(c) != auxTo64F(d))])
(Greater32F (Const32F [c]) (Const32F [d])) -> (ConstBool [b2i(auxTo32F(c) > auxTo32F(d))])
(Greater64F (Const64F [c]) (Const64F [d])) -> (ConstBool [b2i(auxTo64F(c) > auxTo64F(d))])
(Geq32F (Const32F [c]) (Const32F [d])) -> (ConstBool [b2i(auxTo32F(c) >= auxTo32F(d))])
(Geq64F (Const64F [c]) (Const64F [d])) -> (ConstBool [b2i(auxTo64F(c) >= auxTo64F(d))])
(Less32F (Const32F [c]) (Const32F [d])) -> (ConstBool [b2i(auxTo32F(c) < auxTo32F(d))])
(Less64F (Const64F [c]) (Const64F [d])) -> (ConstBool [b2i(auxTo64F(c) < auxTo64F(d))])
(Leq32F (Const32F [c]) (Const32F [d])) -> (ConstBool [b2i(auxTo32F(c) <= auxTo32F(d))])
(Leq64F (Const64F [c]) (Const64F [d])) -> (ConstBool [b2i(auxTo64F(c) <= auxTo64F(d))])
// simplifications
(Or(64|32|16|8) x x) -> x
@ -572,9 +575,9 @@
// Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits
(Load <t1> p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) -> (Const64F [x])
(Load <t1> p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) -> (Const32F [f2i(extend32Fto64F(math.Float32frombits(uint32(x))))])
(Load <t1> p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) -> (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))])
(Load <t1> p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) -> (Const64 [x])
(Load <t1> p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) -> (Const32 [int64(int32(math.Float32bits(truncate64Fto32F(i2f(x)))))])
(Load <t1> p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) -> (Const32 [int64(int32(math.Float32bits(auxTo32F(x))))])
// Float Loads up to Zeros so they can be constant folded.
(Load <t1> op:(OffPtr [o1] p1)
@ -1326,19 +1329,16 @@
(Mul8 (Const8 <t> [c]) (Mul8 (Const8 <t> [d]) x)) -> (Mul8 (Const8 <t> [int64(int8(c*d))]) x)
// floating point optimizations
(Add(32|64)F x (Const(32|64)F [0])) -> x
(Sub(32|64)F x (Const(32|64)F [0])) -> x
(Mul(32|64)F x (Const(32|64)F [auxFrom64F(1)])) -> x
(Mul32F x (Const32F [auxFrom32F(-1)])) -> (Neg32F x)
(Mul64F x (Const64F [auxFrom64F(-1)])) -> (Neg64F x)
(Mul32F x (Const32F [auxFrom32F(2)])) -> (Add32F x x)
(Mul64F x (Const64F [auxFrom64F(2)])) -> (Add64F x x)
(Mul(32|64)F x (Const(32|64)F [f2i(1)])) -> x
(Mul32F x (Const32F [f2i(-1)])) -> (Neg32F x)
(Mul64F x (Const64F [f2i(-1)])) -> (Neg64F x)
(Mul32F x (Const32F [f2i(2)])) -> (Add32F x x)
(Mul64F x (Const64F [f2i(2)])) -> (Add64F x x)
(Div32F x (Const32F <t> [c])) && reciprocalExact32(auxTo32F(c)) -> (Mul32F x (Const32F <t> [auxFrom32F(1/auxTo32F(c))]))
(Div64F x (Const64F <t> [c])) && reciprocalExact64(auxTo64F(c)) -> (Mul64F x (Const64F <t> [auxFrom64F(1/auxTo64F(c))]))
(Div32F x (Const32F <t> [c])) && reciprocalExact32(float32(i2f(c))) -> (Mul32F x (Const32F <t> [f2i(1/i2f(c))]))
(Div64F x (Const64F <t> [c])) && reciprocalExact64(i2f(c)) -> (Mul64F x (Const64F <t> [f2i(1/i2f(c))]))
(Sqrt (Const64F [c])) -> (Const64F [f2i(math.Sqrt(i2f(c)))])
(Sqrt (Const64F [c])) -> (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))])
// recognize runtime.newobject and don't Zero/Nilcheck it
(Zero (Load (OffPtr [c] (SP)) mem) mem)
@ -1363,12 +1363,12 @@
(NilCheck (Load (OffPtr [c] (SP)) (StaticCall {sym} _)) _)
&& isSameSym(sym, "runtime.newobject")
&& c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value
&& warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
-> (Invalid)
(NilCheck (OffPtr (Load (OffPtr [c] (SP)) (StaticCall {sym} _))) _)
&& isSameSym(sym, "runtime.newobject")
&& c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value
&& warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
-> (Invalid)
// Evaluate constant address comparisons.
@ -1545,8 +1545,8 @@
// Don't Move from memory if the values are likely to already be
// in registers.
(Move {t1} [n] dst p1
mem:(Store {t2} op2:(OffPtr [o2] p2) d1
(Store {t3} op3:(OffPtr [0] p3) d2 _)))
mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
(Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _)))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
@ -1554,12 +1554,12 @@
&& registerizable(b, t3)
&& o2 == sizeof(t3)
&& n == sizeof(t2) + sizeof(t3)
-> (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1
(Store {t3} (OffPtr <t3.(*types.Type)> [0] dst) d2 mem))
-> (Store {t2} (OffPtr <tt2> [o2] dst) d1
(Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
(Move {t1} [n] dst p1
mem:(Store {t2} op2:(OffPtr [o2] p2) d1
(Store {t3} op3:(OffPtr [o3] p3) d2
(Store {t4} op4:(OffPtr [0] p4) d3 _))))
mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
(Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
(Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
@ -1570,14 +1570,14 @@
&& o3 == sizeof(t4)
&& o2-o3 == sizeof(t3)
&& n == sizeof(t2) + sizeof(t3) + sizeof(t4)
-> (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1
(Store {t3} (OffPtr <t3.(*types.Type)> [o3] dst) d2
(Store {t4} (OffPtr <t4.(*types.Type)> [0] dst) d3 mem)))
-> (Store {t2} (OffPtr <tt2> [o2] dst) d1
(Store {t3} (OffPtr <tt3> [o3] dst) d2
(Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
(Move {t1} [n] dst p1
mem:(Store {t2} op2:(OffPtr [o2] p2) d1
(Store {t3} op3:(OffPtr [o3] p3) d2
(Store {t4} op4:(OffPtr [o4] p4) d3
(Store {t5} op5:(OffPtr [0] p5) d4 _)))))
mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
(Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
(Store {t4} op4:(OffPtr <tt4> [o4] p4) d3
(Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _)))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
@ -1591,16 +1591,16 @@
&& o3-o4 == sizeof(t4)
&& o2-o3 == sizeof(t3)
&& n == sizeof(t2) + sizeof(t3) + sizeof(t4) + sizeof(t5)
-> (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1
(Store {t3} (OffPtr <t3.(*types.Type)> [o3] dst) d2
(Store {t4} (OffPtr <t4.(*types.Type)> [o4] dst) d3
(Store {t5} (OffPtr <t5.(*types.Type)> [0] dst) d4 mem))))
-> (Store {t2} (OffPtr <tt2> [o2] dst) d1
(Store {t3} (OffPtr <tt3> [o3] dst) d2
(Store {t4} (OffPtr <tt4> [o4] dst) d3
(Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
// Same thing but with VarDef in the middle.
(Move {t1} [n] dst p1
mem:(VarDef
(Store {t2} op2:(OffPtr [o2] p2) d1
(Store {t3} op3:(OffPtr [0] p3) d2 _))))
(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
(Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
@ -1608,13 +1608,13 @@
&& registerizable(b, t3)
&& o2 == sizeof(t3)
&& n == sizeof(t2) + sizeof(t3)
-> (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1
(Store {t3} (OffPtr <t3.(*types.Type)> [0] dst) d2 mem))
-> (Store {t2} (OffPtr <tt2> [o2] dst) d1
(Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
(Move {t1} [n] dst p1
mem:(VarDef
(Store {t2} op2:(OffPtr [o2] p2) d1
(Store {t3} op3:(OffPtr [o3] p3) d2
(Store {t4} op4:(OffPtr [0] p4) d3 _)))))
(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
(Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
(Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _)))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
@ -1625,15 +1625,15 @@
&& o3 == sizeof(t4)
&& o2-o3 == sizeof(t3)
&& n == sizeof(t2) + sizeof(t3) + sizeof(t4)
-> (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1
(Store {t3} (OffPtr <t3.(*types.Type)> [o3] dst) d2
(Store {t4} (OffPtr <t4.(*types.Type)> [0] dst) d3 mem)))
-> (Store {t2} (OffPtr <tt2> [o2] dst) d1
(Store {t3} (OffPtr <tt3> [o3] dst) d2
(Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
(Move {t1} [n] dst p1
mem:(VarDef
(Store {t2} op2:(OffPtr [o2] p2) d1
(Store {t3} op3:(OffPtr [o3] p3) d2
(Store {t4} op4:(OffPtr [o4] p4) d3
(Store {t5} op5:(OffPtr [0] p5) d4 _))))))
(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
(Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
(Store {t4} op4:(OffPtr <tt4> [o4] p4) d3
(Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _))))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
@ -1647,10 +1647,10 @@
&& o3-o4 == sizeof(t4)
&& o2-o3 == sizeof(t3)
&& n == sizeof(t2) + sizeof(t3) + sizeof(t4) + sizeof(t5)
-> (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1
(Store {t3} (OffPtr <t3.(*types.Type)> [o3] dst) d2
(Store {t4} (OffPtr <t4.(*types.Type)> [o4] dst) d3
(Store {t5} (OffPtr <t5.(*types.Type)> [0] dst) d4 mem))))
-> (Store {t2} (OffPtr <tt2> [o2] dst) d1
(Store {t3} (OffPtr <tt3> [o3] dst) d2
(Store {t4} (OffPtr <tt4> [o4] dst) d3
(Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
// Prefer to Zero and Store than to Move.
(Move {t1} [n] dst p1

View file

@ -470,8 +470,9 @@ var genericOps = []opData{
{name: "VarDef", argLength: 1, aux: "Sym", typ: "Mem", symEffect: "None", zeroWidth: true}, // aux is a *gc.Node of a variable that is about to be initialized. arg0=mem, returns mem
{name: "VarKill", argLength: 1, aux: "Sym", symEffect: "None"}, // aux is a *gc.Node of a variable that is known to be dead. arg0=mem, returns mem
{name: "VarLive", argLength: 1, aux: "Sym", symEffect: "Read", zeroWidth: true}, // aux is a *gc.Node of a variable that must be kept live. arg0=mem, returns mem
{name: "KeepAlive", argLength: 2, typ: "Mem", zeroWidth: true}, // arg[0] is a value that must be kept alive until this mark. arg[1]=mem, returns mem
// TODO: what's the difference betweeen VarLive and KeepAlive?
{name: "VarLive", argLength: 1, aux: "Sym", symEffect: "Read", zeroWidth: true}, // aux is a *gc.Node of a variable that must be kept live. arg0=mem, returns mem
{name: "KeepAlive", argLength: 2, typ: "Mem", zeroWidth: true}, // arg[0] is a value that must be kept alive until this mark. arg[1]=mem, returns mem
// Ops for breaking 64-bit operations on 32-bit architectures
{name: "Int64Make", argLength: 2, typ: "UInt64"}, // arg0=hi, arg1=lo

View file

@ -63,9 +63,14 @@ type blockData struct {
}
type regInfo struct {
inputs []regMask
// inputs[i] encodes the set of registers allowed for the i'th input.
// Inputs that don't use registers (flags, memory, etc.) should be 0.
inputs []regMask
// clobbers encodes the set of registers that are overwritten by
// the instruction (other than the output registers).
clobbers regMask
outputs []regMask
// outpus[i] encodes the set of registers allowed for the i'th output.
outputs []regMask
}
type regMask uint64

View file

@ -484,7 +484,7 @@ func (x ByTopo) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x ByTopo) Less(i, j int) bool {
a := x[i]
b := x[j]
if a.Filename == a.Filename {
if a.Filename == b.Filename {
return a.StartLineno < b.StartLineno
}
return a.Filename < b.Filename

View file

@ -50,9 +50,17 @@ type outputInfo struct {
}
type regInfo struct {
inputs []inputInfo // ordered in register allocation order
// inputs encodes the register restrictions for an instruction's inputs.
// Each entry specifies an allowed register set for a particular input.
// They are listed in the order in which regalloc should pick a register
// from the register set (most constrained first).
// Inputs which do not need registers are not listed.
inputs []inputInfo
// clobbers encodes the set of registers that are overwritten by
// the instruction (other than the output registers).
clobbers regMask
outputs []outputInfo // ordered in register allocation order
// outputs is the same as inputs, but for the outputs of the instruction.
outputs []outputInfo
}
type auxType int8

View file

@ -550,6 +550,18 @@ const (
OpAMD64BTRQconst
OpAMD64BTSLconst
OpAMD64BTSQconst
OpAMD64BTCQmodify
OpAMD64BTCLmodify
OpAMD64BTSQmodify
OpAMD64BTSLmodify
OpAMD64BTRQmodify
OpAMD64BTRLmodify
OpAMD64BTCQconstmodify
OpAMD64BTCLconstmodify
OpAMD64BTSQconstmodify
OpAMD64BTSLconstmodify
OpAMD64BTRQconstmodify
OpAMD64BTRLconstmodify
OpAMD64TESTQ
OpAMD64TESTL
OpAMD64TESTW
@ -1107,6 +1119,7 @@ const (
OpARM64LoweredMuluhilo
OpARM64MVN
OpARM64NEG
OpARM64FABSD
OpARM64FNEGS
OpARM64FNEGD
OpARM64FSQRTD
@ -1139,6 +1152,8 @@ const (
OpARM64SRLconst
OpARM64SRA
OpARM64SRAconst
OpARM64ROR
OpARM64RORW
OpARM64RORconst
OpARM64RORWconst
OpARM64EXTRconst
@ -1157,6 +1172,12 @@ const (
OpARM64TSTWconst
OpARM64FCMPS
OpARM64FCMPD
OpARM64MVNshiftLL
OpARM64MVNshiftRL
OpARM64MVNshiftRA
OpARM64NEGshiftLL
OpARM64NEGshiftRL
OpARM64NEGshiftRA
OpARM64ADDshiftLL
OpARM64ADDshiftRL
OpARM64ADDshiftRA
@ -1184,6 +1205,12 @@ const (
OpARM64CMPshiftLL
OpARM64CMPshiftRL
OpARM64CMPshiftRA
OpARM64CMNshiftLL
OpARM64CMNshiftRL
OpARM64CMNshiftRA
OpARM64TSTshiftLL
OpARM64TSTshiftRL
OpARM64TSTshiftRA
OpARM64BFI
OpARM64BFXIL
OpARM64SBFIZ
@ -1247,6 +1274,8 @@ const (
OpARM64MOVDstorezeroidx8
OpARM64FMOVDgpfp
OpARM64FMOVDfpgp
OpARM64FMOVSgpfp
OpARM64FMOVSfpgp
OpARM64MOVBreg
OpARM64MOVBUreg
OpARM64MOVHreg
@ -1275,6 +1304,7 @@ const (
OpARM64FCVTDS
OpARM64FRINTAD
OpARM64FRINTMD
OpARM64FRINTND
OpARM64FRINTPD
OpARM64FRINTZD
OpARM64CSEL
@ -1551,6 +1581,7 @@ const (
OpPPC64MULHW
OpPPC64MULHDU
OpPPC64MULHWU
OpPPC64LoweredMuluhilo
OpPPC64FMUL
OpPPC64FMULS
OpPPC64FMADD
@ -1630,6 +1661,17 @@ const (
OpPPC64MOVDBRload
OpPPC64MOVWBRload
OpPPC64MOVHBRload
OpPPC64MOVBZloadidx
OpPPC64MOVHloadidx
OpPPC64MOVHZloadidx
OpPPC64MOVWloadidx
OpPPC64MOVWZloadidx
OpPPC64MOVDloadidx
OpPPC64MOVHBRloadidx
OpPPC64MOVWBRloadidx
OpPPC64MOVDBRloadidx
OpPPC64FMOVDloadidx
OpPPC64FMOVSloadidx
OpPPC64MOVDBRstore
OpPPC64MOVWBRstore
OpPPC64MOVHBRstore
@ -1641,6 +1683,15 @@ const (
OpPPC64MOVDstore
OpPPC64FMOVDstore
OpPPC64FMOVSstore
OpPPC64MOVBstoreidx
OpPPC64MOVHstoreidx
OpPPC64MOVWstoreidx
OpPPC64MOVDstoreidx
OpPPC64FMOVDstoreidx
OpPPC64FMOVSstoreidx
OpPPC64MOVHBRstoreidx
OpPPC64MOVWBRstoreidx
OpPPC64MOVDBRstoreidx
OpPPC64MOVBstorezero
OpPPC64MOVHstorezero
OpPPC64MOVWstorezero
@ -6895,6 +6946,180 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "BTCQmodify",
auxType: auxSymOff,
argLen: 3,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.ABTCQ,
reg: regInfo{
inputs: []inputInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
},
},
{
name: "BTCLmodify",
auxType: auxSymOff,
argLen: 3,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.ABTCL,
reg: regInfo{
inputs: []inputInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
},
},
{
name: "BTSQmodify",
auxType: auxSymOff,
argLen: 3,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.ABTSQ,
reg: regInfo{
inputs: []inputInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
},
},
{
name: "BTSLmodify",
auxType: auxSymOff,
argLen: 3,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.ABTSL,
reg: regInfo{
inputs: []inputInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
},
},
{
name: "BTRQmodify",
auxType: auxSymOff,
argLen: 3,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.ABTRQ,
reg: regInfo{
inputs: []inputInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
},
},
{
name: "BTRLmodify",
auxType: auxSymOff,
argLen: 3,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.ABTRL,
reg: regInfo{
inputs: []inputInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
},
},
{
name: "BTCQconstmodify",
auxType: auxSymValAndOff,
argLen: 2,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.ABTCQ,
reg: regInfo{
inputs: []inputInfo{
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
},
},
{
name: "BTCLconstmodify",
auxType: auxSymValAndOff,
argLen: 2,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.ABTCL,
reg: regInfo{
inputs: []inputInfo{
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
},
},
{
name: "BTSQconstmodify",
auxType: auxSymValAndOff,
argLen: 2,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.ABTSQ,
reg: regInfo{
inputs: []inputInfo{
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
},
},
{
name: "BTSLconstmodify",
auxType: auxSymValAndOff,
argLen: 2,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.ABTSL,
reg: regInfo{
inputs: []inputInfo{
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
},
},
{
name: "BTRQconstmodify",
auxType: auxSymValAndOff,
argLen: 2,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.ABTRQ,
reg: regInfo{
inputs: []inputInfo{
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
},
},
{
name: "BTRLconstmodify",
auxType: auxSymValAndOff,
argLen: 2,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.ABTRL,
reg: regInfo{
inputs: []inputInfo{
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
},
},
{
name: "TESTQ",
argLen: 2,
@ -14656,6 +14881,19 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "FABSD",
argLen: 1,
asm: arm64.AFABSD,
reg: regInfo{
inputs: []inputInfo{
{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
outputs: []outputInfo{
{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
},
},
{
name: "FNEGS",
argLen: 1,
@ -15104,6 +15342,34 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "ROR",
argLen: 2,
asm: arm64.AROR,
reg: regInfo{
inputs: []inputInfo{
{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
outputs: []outputInfo{
{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
},
},
},
{
name: "RORW",
argLen: 2,
asm: arm64.ARORW,
reg: regInfo{
inputs: []inputInfo{
{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
outputs: []outputInfo{
{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
},
},
},
{
name: "RORconst",
auxType: auxInt64,
@ -15320,6 +15586,90 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "MVNshiftLL",
auxType: auxInt64,
argLen: 1,
asm: arm64.AMVN,
reg: regInfo{
inputs: []inputInfo{
{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
outputs: []outputInfo{
{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
},
},
},
{
name: "MVNshiftRL",
auxType: auxInt64,
argLen: 1,
asm: arm64.AMVN,
reg: regInfo{
inputs: []inputInfo{
{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
outputs: []outputInfo{
{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
},
},
},
{
name: "MVNshiftRA",
auxType: auxInt64,
argLen: 1,
asm: arm64.AMVN,
reg: regInfo{
inputs: []inputInfo{
{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
outputs: []outputInfo{
{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
},
},
},
{
name: "NEGshiftLL",
auxType: auxInt64,
argLen: 1,
asm: arm64.ANEG,
reg: regInfo{
inputs: []inputInfo{
{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
outputs: []outputInfo{
{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
},
},
},
{
name: "NEGshiftRL",
auxType: auxInt64,
argLen: 1,
asm: arm64.ANEG,
reg: regInfo{
inputs: []inputInfo{
{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
outputs: []outputInfo{
{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
},
},
},
{
name: "NEGshiftRA",
auxType: auxInt64,
argLen: 1,
asm: arm64.ANEG,
reg: regInfo{
inputs: []inputInfo{
{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
outputs: []outputInfo{
{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
},
},
},
{
name: "ADDshiftLL",
auxType: auxInt64,
@ -15716,6 +16066,78 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "CMNshiftLL",
auxType: auxInt64,
argLen: 2,
asm: arm64.ACMN,
reg: regInfo{
inputs: []inputInfo{
{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
},
},
{
name: "CMNshiftRL",
auxType: auxInt64,
argLen: 2,
asm: arm64.ACMN,
reg: regInfo{
inputs: []inputInfo{
{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
},
},
{
name: "CMNshiftRA",
auxType: auxInt64,
argLen: 2,
asm: arm64.ACMN,
reg: regInfo{
inputs: []inputInfo{
{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
},
},
{
name: "TSTshiftLL",
auxType: auxInt64,
argLen: 2,
asm: arm64.ATST,
reg: regInfo{
inputs: []inputInfo{
{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
},
},
{
name: "TSTshiftRL",
auxType: auxInt64,
argLen: 2,
asm: arm64.ATST,
reg: regInfo{
inputs: []inputInfo{
{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
},
},
{
name: "TSTshiftRA",
auxType: auxInt64,
argLen: 2,
asm: arm64.ATST,
reg: regInfo{
inputs: []inputInfo{
{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
},
},
},
{
name: "BFI",
auxType: auxInt64,
@ -16571,6 +16993,32 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "FMOVSgpfp",
argLen: 1,
asm: arm64.AFMOVS,
reg: regInfo{
inputs: []inputInfo{
{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
},
outputs: []outputInfo{
{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
},
},
{
name: "FMOVSfpgp",
argLen: 1,
asm: arm64.AFMOVS,
reg: regInfo{
inputs: []inputInfo{
{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
outputs: []outputInfo{
{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
},
},
},
{
name: "MOVBreg",
argLen: 1,
@ -16935,6 +17383,19 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "FRINTND",
argLen: 1,
asm: arm64.AFRINTND,
reg: regInfo{
inputs: []inputInfo{
{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
outputs: []outputInfo{
{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
},
},
{
name: "FRINTPD",
argLen: 1,
@ -20589,6 +21050,21 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "LoweredMuluhilo",
argLen: 2,
resultNotInArgs: true,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
outputs: []outputInfo{
{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "FMUL",
argLen: 2,
@ -21690,6 +22166,193 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "MOVBZloadidx",
auxType: auxSymOff,
argLen: 3,
faultOnNilArg0: true,
symEffect: SymRead,
asm: ppc64.AMOVBZ,
reg: regInfo{
inputs: []inputInfo{
{1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
outputs: []outputInfo{
{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "MOVHloadidx",
auxType: auxSymOff,
argLen: 3,
faultOnNilArg0: true,
symEffect: SymRead,
asm: ppc64.AMOVH,
reg: regInfo{
inputs: []inputInfo{
{1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
outputs: []outputInfo{
{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "MOVHZloadidx",
auxType: auxSymOff,
argLen: 3,
faultOnNilArg0: true,
symEffect: SymRead,
asm: ppc64.AMOVHZ,
reg: regInfo{
inputs: []inputInfo{
{1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
outputs: []outputInfo{
{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "MOVWloadidx",
auxType: auxSymOff,
argLen: 3,
faultOnNilArg0: true,
symEffect: SymRead,
asm: ppc64.AMOVW,
reg: regInfo{
inputs: []inputInfo{
{1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
outputs: []outputInfo{
{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "MOVWZloadidx",
auxType: auxSymOff,
argLen: 3,
faultOnNilArg0: true,
symEffect: SymRead,
asm: ppc64.AMOVWZ,
reg: regInfo{
inputs: []inputInfo{
{1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
outputs: []outputInfo{
{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "MOVDloadidx",
auxType: auxSymOff,
argLen: 3,
faultOnNilArg0: true,
symEffect: SymRead,
asm: ppc64.AMOVD,
reg: regInfo{
inputs: []inputInfo{
{1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
outputs: []outputInfo{
{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "MOVHBRloadidx",
auxType: auxSymOff,
argLen: 3,
faultOnNilArg0: true,
symEffect: SymRead,
asm: ppc64.AMOVHBR,
reg: regInfo{
inputs: []inputInfo{
{1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
outputs: []outputInfo{
{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "MOVWBRloadidx",
auxType: auxSymOff,
argLen: 3,
faultOnNilArg0: true,
symEffect: SymRead,
asm: ppc64.AMOVWBR,
reg: regInfo{
inputs: []inputInfo{
{1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
outputs: []outputInfo{
{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "MOVDBRloadidx",
auxType: auxSymOff,
argLen: 3,
faultOnNilArg0: true,
symEffect: SymRead,
asm: ppc64.AMOVDBR,
reg: regInfo{
inputs: []inputInfo{
{1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
outputs: []outputInfo{
{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "FMOVDloadidx",
auxType: auxSymOff,
argLen: 3,
faultOnNilArg0: true,
symEffect: SymRead,
asm: ppc64.AFMOVD,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
outputs: []outputInfo{
{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
},
},
},
{
name: "FMOVSloadidx",
auxType: auxSymOff,
argLen: 3,
faultOnNilArg0: true,
symEffect: SymRead,
asm: ppc64.AFMOVS,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
outputs: []outputInfo{
{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
},
},
},
{
name: "MOVDBRstore",
auxType: auxSymOff,
@ -21848,6 +22511,141 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "MOVBstoreidx",
auxType: auxSymOff,
argLen: 4,
faultOnNilArg0: true,
symEffect: SymWrite,
asm: ppc64.AMOVB,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "MOVHstoreidx",
auxType: auxSymOff,
argLen: 4,
faultOnNilArg0: true,
symEffect: SymWrite,
asm: ppc64.AMOVH,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "MOVWstoreidx",
auxType: auxSymOff,
argLen: 4,
faultOnNilArg0: true,
symEffect: SymWrite,
asm: ppc64.AMOVW,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "MOVDstoreidx",
auxType: auxSymOff,
argLen: 4,
faultOnNilArg0: true,
symEffect: SymWrite,
asm: ppc64.AMOVD,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "FMOVDstoreidx",
auxType: auxSymOff,
argLen: 4,
faultOnNilArg0: true,
symEffect: SymWrite,
asm: ppc64.AFMOVD,
reg: regInfo{
inputs: []inputInfo{
{2, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "FMOVSstoreidx",
auxType: auxSymOff,
argLen: 4,
faultOnNilArg0: true,
symEffect: SymWrite,
asm: ppc64.AFMOVS,
reg: regInfo{
inputs: []inputInfo{
{2, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "MOVHBRstoreidx",
auxType: auxSymOff,
argLen: 4,
faultOnNilArg0: true,
symEffect: SymWrite,
asm: ppc64.AMOVHBR,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "MOVWBRstoreidx",
auxType: auxSymOff,
argLen: 4,
faultOnNilArg0: true,
symEffect: SymWrite,
asm: ppc64.AMOVWBR,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "MOVDBRstoreidx",
auxType: auxSymOff,
argLen: 4,
faultOnNilArg0: true,
symEffect: SymWrite,
asm: ppc64.AMOVDBR,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
{2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{
name: "MOVBstorezero",
auxType: auxSymOff,

View file

@ -58,7 +58,7 @@ func (r relation) String() string {
}
// domain represents the domain of a variable pair in which a set
// of relations is known. For example, relations learned for unsigned
// of relations is known. For example, relations learned for unsigned
// pairs cannot be transferred to signed pairs because the same bit
// representation can mean something else.
type domain uint
@ -625,7 +625,7 @@ var (
// For example:
// OpLess8: {signed, lt},
// v1 = (OpLess8 v2 v3).
// If v1 branch is taken than we learn that the rangeMaks
// If v1 branch is taken then we learn that the rangeMask
// can be at most lt.
domainRelationTable = map[Op]struct {
d domain

View file

@ -150,6 +150,8 @@ type register uint8
const noRegister register = 255
// A regMask encodes a set of machine registers.
// TODO: regMask -> regSet?
type regMask uint64
func (m regMask) String() string {

View file

@ -234,7 +234,6 @@ func canMergeLoad(target, load, x *Value) bool {
// memPreds contains memory states known to be predecessors of load's
// memory state. It is lazily initialized.
var memPreds map[*Value]bool
search:
for i := 0; len(args) > 0; i++ {
const limit = 100
if i >= limit {
@ -246,13 +245,13 @@ search:
if target.Block.ID != v.Block.ID {
// Since target and load are in the same block
// we can stop searching when we leave the block.
continue search
continue
}
if v.Op == OpPhi {
// A Phi implies we have reached the top of the block.
// The memory phi, if it exists, is always
// the first logical store in the block.
continue search
continue
}
if v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
// We could handle this situation however it is likely
@ -296,14 +295,14 @@ search:
// load = read ... mem
// target = add x load
if memPreds[v] {
continue search
continue
}
return false
}
if len(v.Args) > 0 && v.Args[len(v.Args)-1] == mem {
// If v takes mem as an input then we know mem
// is valid at this point.
continue search
continue
}
for _, a := range v.Args {
if target.Block.ID == a.Block.ID {
@ -450,21 +449,26 @@ func extend32Fto64F(f float32) float64 {
return math.Float64frombits(r)
}
// i2f is used in rules for converting from an AuxInt to a float.
func i2f(i int64) float64 {
return math.Float64frombits(uint64(i))
}
// i2f32 is used in rules for converting from an AuxInt to a float32.
func i2f32(i int64) float32 {
return float32(math.Float64frombits(uint64(i)))
}
// f2i is used in the rules for storing a float in AuxInt.
func f2i(f float64) int64 {
// auxFrom64F encodes a float64 value so it can be stored in an AuxInt.
func auxFrom64F(f float64) int64 {
return int64(math.Float64bits(f))
}
// auxFrom32F encodes a float32 value so it can be stored in an AuxInt.
func auxFrom32F(f float32) int64 {
return int64(math.Float64bits(extend32Fto64F(f)))
}
// auxTo32F decodes a float32 from the AuxInt value provided.
func auxTo32F(i int64) float32 {
return truncate64Fto32F(math.Float64frombits(uint64(i)))
}
// auxTo64F decodes a float64 from the AuxInt value provided.
func auxTo64F(i int64) float64 {
return math.Float64frombits(uint64(i))
}
// uaddOvf returns true if unsigned a+b would overflow.
func uaddOvf(a, b int64) bool {
return uint64(a)+uint64(b) < uint64(a)
@ -646,11 +650,11 @@ func noteRule(s string) bool {
return true
}
// warnRule generates a compiler debug output with string s when
// cond is true and the rule is fired.
// warnRule generates compiler debug output with string s when
// v is not in autogenerated code, cond is true and the rule has fired.
func warnRule(cond bool, v *Value, s string) bool {
if cond {
v.Block.Func.Warnl(v.Pos, s)
if pos := v.Pos; pos.Line() > 1 && cond {
v.Block.Func.Warnl(pos, s)
}
return true
}

View file

@ -2507,38 +2507,44 @@ func rewriteValue386_Op386CMPBconst_0(v *Value) bool {
v.reset(Op386FlagLT_ULT)
return true
}
// match: (CMPBconst (ANDL x y) [0])
// cond:
// match: (CMPBconst l:(ANDL x y) [0])
// cond: l.Uses==1
// result: (TESTB x y)
for {
if v.AuxInt != 0 {
break
}
v_0 := v.Args[0]
if v_0.Op != Op386ANDL {
l := v.Args[0]
if l.Op != Op386ANDL {
break
}
_ = l.Args[1]
x := l.Args[0]
y := l.Args[1]
if !(l.Uses == 1) {
break
}
_ = v_0.Args[1]
x := v_0.Args[0]
y := v_0.Args[1]
v.reset(Op386TESTB)
v.AddArg(x)
v.AddArg(y)
return true
}
// match: (CMPBconst (ANDLconst [c] x) [0])
// cond:
// match: (CMPBconst l:(ANDLconst [c] x) [0])
// cond: l.Uses==1
// result: (TESTBconst [int64(int8(c))] x)
for {
if v.AuxInt != 0 {
break
}
v_0 := v.Args[0]
if v_0.Op != Op386ANDLconst {
l := v.Args[0]
if l.Op != Op386ANDLconst {
break
}
c := l.AuxInt
x := l.Args[0]
if !(l.Uses == 1) {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
v.reset(Op386TESTBconst)
v.AuxInt = int64(int8(c))
v.AddArg(x)
@ -2819,38 +2825,44 @@ func rewriteValue386_Op386CMPLconst_0(v *Value) bool {
v.reset(Op386FlagLT_ULT)
return true
}
// match: (CMPLconst (ANDL x y) [0])
// cond:
// match: (CMPLconst l:(ANDL x y) [0])
// cond: l.Uses==1
// result: (TESTL x y)
for {
if v.AuxInt != 0 {
break
}
v_0 := v.Args[0]
if v_0.Op != Op386ANDL {
l := v.Args[0]
if l.Op != Op386ANDL {
break
}
_ = l.Args[1]
x := l.Args[0]
y := l.Args[1]
if !(l.Uses == 1) {
break
}
_ = v_0.Args[1]
x := v_0.Args[0]
y := v_0.Args[1]
v.reset(Op386TESTL)
v.AddArg(x)
v.AddArg(y)
return true
}
// match: (CMPLconst (ANDLconst [c] x) [0])
// cond:
// match: (CMPLconst l:(ANDLconst [c] x) [0])
// cond: l.Uses==1
// result: (TESTLconst [c] x)
for {
if v.AuxInt != 0 {
break
}
v_0 := v.Args[0]
if v_0.Op != Op386ANDLconst {
l := v.Args[0]
if l.Op != Op386ANDLconst {
break
}
c := l.AuxInt
x := l.Args[0]
if !(l.Uses == 1) {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
v.reset(Op386TESTLconst)
v.AuxInt = c
v.AddArg(x)
@ -3122,38 +3134,44 @@ func rewriteValue386_Op386CMPWconst_0(v *Value) bool {
v.reset(Op386FlagLT_ULT)
return true
}
// match: (CMPWconst (ANDL x y) [0])
// cond:
// match: (CMPWconst l:(ANDL x y) [0])
// cond: l.Uses==1
// result: (TESTW x y)
for {
if v.AuxInt != 0 {
break
}
v_0 := v.Args[0]
if v_0.Op != Op386ANDL {
l := v.Args[0]
if l.Op != Op386ANDL {
break
}
_ = l.Args[1]
x := l.Args[0]
y := l.Args[1]
if !(l.Uses == 1) {
break
}
_ = v_0.Args[1]
x := v_0.Args[0]
y := v_0.Args[1]
v.reset(Op386TESTW)
v.AddArg(x)
v.AddArg(y)
return true
}
// match: (CMPWconst (ANDLconst [c] x) [0])
// cond:
// match: (CMPWconst l:(ANDLconst [c] x) [0])
// cond: l.Uses==1
// result: (TESTWconst [int64(int16(c))] x)
for {
if v.AuxInt != 0 {
break
}
v_0 := v.Args[0]
if v_0.Op != Op386ANDLconst {
l := v.Args[0]
if l.Op != Op386ANDLconst {
break
}
c := l.AuxInt
x := l.Args[0]
if !(l.Uses == 1) {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
v.reset(Op386TESTWconst)
v.AuxInt = int64(int16(c))
v.AddArg(x)
@ -19804,7 +19822,7 @@ func rewriteValue386_OpNeg32F_0(v *Value) bool {
_ = typ
// match: (Neg32F x)
// cond: !config.use387
// result: (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))]))
// result: (PXOR x (MOVSSconst <typ.Float32> [auxFrom32F(float32(math.Copysign(0, -1)))]))
for {
x := v.Args[0]
if !(!config.use387) {
@ -19813,7 +19831,7 @@ func rewriteValue386_OpNeg32F_0(v *Value) bool {
v.reset(Op386PXOR)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386MOVSSconst, typ.Float32)
v0.AuxInt = f2i(math.Copysign(0, -1))
v0.AuxInt = auxFrom32F(float32(math.Copysign(0, -1)))
v.AddArg(v0)
return true
}
@ -19840,7 +19858,7 @@ func rewriteValue386_OpNeg64F_0(v *Value) bool {
_ = typ
// match: (Neg64F x)
// cond: !config.use387
// result: (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))]))
// result: (PXOR x (MOVSDconst <typ.Float64> [auxFrom64F(math.Copysign(0, -1))]))
for {
x := v.Args[0]
if !(!config.use387) {
@ -19849,7 +19867,7 @@ func rewriteValue386_OpNeg64F_0(v *Value) bool {
v.reset(Op386PXOR)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386MOVSDconst, typ.Float64)
v0.AuxInt = f2i(math.Copysign(0, -1))
v0.AuxInt = auxFrom64F(math.Copysign(0, -1))
v.AddArg(v0)
return true
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -5071,7 +5071,7 @@ func rewriteValueWasm_OpWasmF64Add_0(v *Value) bool {
_ = typ
// match: (F64Add (F64Const [x]) (F64Const [y]))
// cond:
// result: (F64Const [f2i(i2f(x) + i2f(y))])
// result: (F64Const [auxFrom64F(auxTo64F(x) + auxTo64F(y))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@ -5085,7 +5085,7 @@ func rewriteValueWasm_OpWasmF64Add_0(v *Value) bool {
}
y := v_1.AuxInt
v.reset(OpWasmF64Const)
v.AuxInt = f2i(i2f(x) + i2f(y))
v.AuxInt = auxFrom64F(auxTo64F(x) + auxTo64F(y))
return true
}
// match: (F64Add (F64Const [x]) y)
@ -5115,7 +5115,7 @@ func rewriteValueWasm_OpWasmF64Mul_0(v *Value) bool {
_ = typ
// match: (F64Mul (F64Const [x]) (F64Const [y]))
// cond:
// result: (F64Const [f2i(i2f(x) * i2f(y))])
// result: (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@ -5129,7 +5129,7 @@ func rewriteValueWasm_OpWasmF64Mul_0(v *Value) bool {
}
y := v_1.AuxInt
v.reset(OpWasmF64Const)
v.AuxInt = f2i(i2f(x) * i2f(y))
v.AuxInt = auxFrom64F(auxTo64F(x) * auxTo64F(y))
return true
}
// match: (F64Mul (F64Const [x]) y)

File diff suppressed because it is too large Load diff

View file

@ -25,7 +25,7 @@ func softfloat(f *Func) {
case OpConst32F:
v.Op = OpConst32
v.Type = f.Config.Types.UInt32
v.AuxInt = int64(int32(math.Float32bits(i2f32(v.AuxInt))))
v.AuxInt = int64(int32(math.Float32bits(auxTo32F(v.AuxInt))))
case OpConst64F:
v.Op = OpConst64
v.Type = f.Config.Types.UInt64

View file

@ -62,6 +62,9 @@ func TestStmtLines(t *testing.T) {
if pkgname == "runtime" {
continue
}
if e.Val(dwarf.AttrStmtList) == nil {
continue
}
lrdr, err := dw.LineReader(e)
must(err)

View file

@ -19,7 +19,7 @@ dy = <Optimized out, as expected>
65: if len(os.Args) > 1 {
73: scanner := bufio.NewScanner(reader)
74: for scanner.Scan() { //gdb-opt=(scanner/A)
scanner = (struct bufio.Scanner *) <A>
scanner = (bufio.Scanner *) <A>
75: s := scanner.Text()
76: i, err := strconv.ParseInt(s, 10, 64)
77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
@ -29,7 +29,7 @@ i = 1
81: hist = ensure(int(i), hist)
82: hist[int(i)]++
74: for scanner.Scan() { //gdb-opt=(scanner/A)
scanner = (struct bufio.Scanner *) <A>
scanner = (bufio.Scanner *) <A>
75: s := scanner.Text()
76: i, err := strconv.ParseInt(s, 10, 64)
77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
@ -39,7 +39,7 @@ i = 1
81: hist = ensure(int(i), hist)
82: hist[int(i)]++
74: for scanner.Scan() { //gdb-opt=(scanner/A)
scanner = (struct bufio.Scanner *) <A>
scanner = (bufio.Scanner *) <A>
75: s := scanner.Text()
76: i, err := strconv.ParseInt(s, 10, 64)
77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
@ -49,7 +49,7 @@ i = 1
81: hist = ensure(int(i), hist)
82: hist[int(i)]++
74: for scanner.Scan() { //gdb-opt=(scanner/A)
scanner = (struct bufio.Scanner *) <A>
scanner = (bufio.Scanner *) <A>
75: s := scanner.Text()
76: i, err := strconv.ParseInt(s, 10, 64)
77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
@ -59,7 +59,7 @@ i = 2
81: hist = ensure(int(i), hist)
82: hist[int(i)]++
74: for scanner.Scan() { //gdb-opt=(scanner/A)
scanner = (struct bufio.Scanner *) <A>
scanner = (bufio.Scanner *) <A>
75: s := scanner.Text()
76: i, err := strconv.ParseInt(s, 10, 64)
77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
@ -69,7 +69,7 @@ i = 2
81: hist = ensure(int(i), hist)
82: hist[int(i)]++
74: for scanner.Scan() { //gdb-opt=(scanner/A)
scanner = (struct bufio.Scanner *) <A>
scanner = (bufio.Scanner *) <A>
75: s := scanner.Text()
76: i, err := strconv.ParseInt(s, 10, 64)
77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
@ -79,7 +79,7 @@ i = 2
81: hist = ensure(int(i), hist)
82: hist[int(i)]++
74: for scanner.Scan() { //gdb-opt=(scanner/A)
scanner = (struct bufio.Scanner *) <A>
scanner = (bufio.Scanner *) <A>
75: s := scanner.Text()
76: i, err := strconv.ParseInt(s, 10, 64)
77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
@ -89,7 +89,7 @@ i = 4
81: hist = ensure(int(i), hist)
82: hist[int(i)]++
74: for scanner.Scan() { //gdb-opt=(scanner/A)
scanner = (struct bufio.Scanner *) <A>
scanner = (bufio.Scanner *) <A>
75: s := scanner.Text()
76: i, err := strconv.ParseInt(s, 10, 64)
77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
@ -99,7 +99,7 @@ i = 4
81: hist = ensure(int(i), hist)
82: hist[int(i)]++
74: for scanner.Scan() { //gdb-opt=(scanner/A)
scanner = (struct bufio.Scanner *) <A>
scanner = (bufio.Scanner *) <A>
75: s := scanner.Text()
76: i, err := strconv.ParseInt(s, 10, 64)
77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
@ -109,7 +109,7 @@ i = 5
81: hist = ensure(int(i), hist)
82: hist[int(i)]++
74: for scanner.Scan() { //gdb-opt=(scanner/A)
scanner = (struct bufio.Scanner *) <A>
scanner = (bufio.Scanner *) <A>
86: for i, a := range hist {
87: if a == 0 { //gdb-opt=(a,n,t)
a = 0

View file

@ -817,7 +817,7 @@ func (t *Type) ChanArgs() *Type {
return t.Extra.(ChanArgs).T
}
// FuncArgs returns the channel type for TFUNCARGS type t.
// FuncArgs returns the func type for TFUNCARGS type t.
func (t *Type) FuncArgs() *Type {
t.wantEtype(TFUNCARGS)
return t.Extra.(FuncArgs).T

View file

@ -547,22 +547,22 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
case ssa.Op386ADDLconstmodify:
var p *obj.Prog = nil
sc := v.AuxValAndOff()
off := sc.Off()
val := sc.Val()
if val == 1 {
p = s.Prog(x86.AINCL)
} else if val == -1 {
p = s.Prog(x86.ADECL)
} else {
p = s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = val
if val == 1 || val == -1 {
var p *obj.Prog
if val == 1 {
p = s.Prog(x86.AINCL)
} else {
p = s.Prog(x86.ADECL)
}
off := sc.Off()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, off)
break
}
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, off)
fallthrough
case ssa.Op386ANDLconstmodify, ssa.Op386ORLconstmodify, ssa.Op386XORLconstmodify:
sc := v.AuxValAndOff()
off := sc.Off()

View file

@ -69,6 +69,7 @@ var okgoarch = []string{
"ppc64le",
"riscv64",
"s390x",
"sparc64",
"wasm",
}
@ -86,6 +87,7 @@ var okgoos = []string{
"openbsd",
"plan9",
"windows",
"aix",
}
// find reports the first index of p in l[0:n], or else -1.
@ -1387,6 +1389,7 @@ func checkNotStale(goBinary string, targets ...string) {
// single point of truth for supported platforms. This list is used
// by 'go tool dist list'.
var cgoEnabled = map[string]bool{
"aix/ppc64": false,
"darwin/386": true,
"darwin/amd64": true,
"darwin/arm": true,
@ -1407,6 +1410,7 @@ var cgoEnabled = map[string]bool{
"linux/mips64le": true,
"linux/riscv64": true,
"linux/s390x": true,
"linux/sparc64": true,
"android/386": true,
"android/amd64": true,
"android/arm": true,

View file

@ -87,6 +87,10 @@ func mkzbootstrap(file string) {
// stack guard size. Larger multipliers are used for non-optimized
// builds that have larger stack frames.
func stackGuardMultiplier() int {
// On AIX, a larger stack is needed for syscalls
if goos == "aix" {
return 2
}
for _, s := range strings.Split(os.Getenv("GO_GCFLAGS"), " ") {
if s == "-N" {
return 2

View file

@ -81,6 +81,9 @@ func main() {
}
case "windows":
exe = ".exe"
case "aix":
// uname -m doesn't work under AIX
gohostarch = "ppc64"
}
sysinit()

View file

@ -1469,8 +1469,10 @@ func (t *tester) packageHasBenchmarks(pkg string) bool {
// because cmd/dist has to be buildable by Go 1.4.
func raceDetectorSupported(goos, goarch string) bool {
switch goos {
case "linux", "darwin", "freebsd", "netbsd", "windows":
case "linux":
return goarch == "amd64" || goarch == "ppc64le"
case "darwin", "freebsd", "netbsd", "windows":
return goarch == "amd64"
default:
return false
}

View file

@ -52,7 +52,7 @@ func usage() {
fmt.Fprintf(os.Stderr, "\n%s\n", f.name)
}
desc := strings.TrimSpace(f.desc)
desc = strings.Replace(desc, "\n", "\n\t", -1)
desc = strings.ReplaceAll(desc, "\n", "\n\t")
fmt.Fprintf(os.Stderr, "\t%s\n", desc)
}
os.Exit(2)

View file

@ -193,12 +193,12 @@ func typecheck(cfg *TypeConfig, f *ast.File) (typeof map[interface{}]string, ass
var params, results []string
for _, p := range fn.Type.Params.List {
t := gofmt(p.Type)
t = strings.Replace(t, "_Ctype_", "C.", -1)
t = strings.ReplaceAll(t, "_Ctype_", "C.")
params = append(params, t)
}
for _, r := range fn.Type.Results.List {
t := gofmt(r.Type)
t = strings.Replace(t, "_Ctype_", "C.", -1)
t = strings.ReplaceAll(t, "_Ctype_", "C.")
results = append(results, t)
}
cfg.External["C."+fn.Name.Name[7:]] = joinFunc(params, results)

View file

@ -144,7 +144,7 @@
// link against shared libraries previously created with
// -buildmode=shared.
// -mod mode
// module download mode to use: readonly, release, or vendor.
// module download mode to use: readonly or vendor.
// See 'go help modules' for more.
// -pkgdir dir
// install and load all packages from dir instead of the usual locations.
@ -1449,6 +1449,12 @@
// The directory where the go command will write
// temporary source files, packages, and binaries.
//
// Each entry in the GOFLAGS list must be a standalone flag.
// Because the entries are space-separated, flag values must
// not contain spaces. In some cases, you can provide multiple flag
// values instead: for example, to set '-ldflags=-s -w'
// you can use 'GOFLAGS=-ldflags=-s -ldflags=-w'.
//
// Environment variables for use with cgo:
//
// CC

View file

@ -1074,6 +1074,8 @@ func testMove(t *testing.T, vcs, url, base, config string) {
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.must(os.Mkdir(tg.path(".hg"), 0700))
tg.must(ioutil.WriteFile(filepath.Join(tg.path(".hg"), "hgrc"), nil, 0600))
tg.setenv("GOPATH", tg.path("."))
tg.run("get", "-d", url)
tg.run("get", "-d", "-u", url)
@ -1088,7 +1090,7 @@ func testMove(t *testing.T, vcs, url, base, config string) {
path := tg.path(filepath.Join("src", config))
data, err := ioutil.ReadFile(path)
tg.must(err)
data = bytes.Replace(data, []byte(base), []byte(base+"XXX"), -1)
data = bytes.ReplaceAll(data, []byte(base), []byte(base+"XXX"))
tg.must(ioutil.WriteFile(path, data, 0644))
}
if vcs == "git" {
@ -1185,6 +1187,7 @@ func TestImportCycle(t *testing.T) {
}
func TestListImportMap(t *testing.T) {
skipIfGccgo(t, "gccgo does not have standard packages")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
@ -1418,6 +1421,7 @@ func TestRelativeGOBINFail(t *testing.T) {
defer tg.cleanup()
tg.tempFile("triv.go", `package main; func main() {}`)
tg.setenv("GOBIN", ".")
tg.cd(tg.path("."))
tg.runFail("install")
tg.grepStderr("cannot install, GOBIN must be an absolute path", "go install must fail if $GOBIN is a relative path")
}
@ -1729,20 +1733,23 @@ func TestGoListDeps(t *testing.T) {
tg.run("list", "-deps", "p1")
tg.grepStdout("p1/p2/p3/p4", "-deps p1 does not mention p4")
// Check the list is in dependency order.
tg.run("list", "-deps", "math")
want := "internal/cpu\nunsafe\nmath\n"
out := tg.stdout.String()
if !strings.Contains(out, "internal/cpu") {
// Some systems don't use internal/cpu.
want = "unsafe\nmath\n"
}
if tg.stdout.String() != want {
t.Fatalf("list -deps math: wrong order\nhave %q\nwant %q", tg.stdout.String(), want)
if runtime.Compiler != "gccgo" {
// Check the list is in dependency order.
tg.run("list", "-deps", "math")
want := "internal/cpu\nunsafe\nmath\n"
out := tg.stdout.String()
if !strings.Contains(out, "internal/cpu") {
// Some systems don't use internal/cpu.
want = "unsafe\nmath\n"
}
if tg.stdout.String() != want {
t.Fatalf("list -deps math: wrong order\nhave %q\nwant %q", tg.stdout.String(), want)
}
}
}
func TestGoListTest(t *testing.T) {
skipIfGccgo(t, "gccgo does not have standard packages")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
@ -1815,6 +1822,7 @@ func TestGoListCompiledCgo(t *testing.T) {
}
func TestGoListExport(t *testing.T) {
skipIfGccgo(t, "gccgo does not have standard packages")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
@ -2051,6 +2059,7 @@ func TestGoTestCpuprofileLeavesBinaryBehind(t *testing.T) {
}
func TestGoTestCpuprofileDashOControlsBinaryLocation(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
@ -2107,6 +2116,7 @@ func TestGoTestDashCDashOControlsBinaryLocation(t *testing.T) {
}
func TestGoTestDashOWritesBinary(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
@ -2350,14 +2360,14 @@ func TestShadowingLogic(t *testing.T) {
// The math in root1 is not "math" because the standard math is.
tg.run("list", "-f", "({{.ImportPath}}) ({{.ConflictDir}})", "./testdata/shadow/root1/src/math")
pwdForwardSlash := strings.Replace(pwd, string(os.PathSeparator), "/", -1)
pwdForwardSlash := strings.ReplaceAll(pwd, string(os.PathSeparator), "/")
if !strings.HasPrefix(pwdForwardSlash, "/") {
pwdForwardSlash = "/" + pwdForwardSlash
}
// The output will have makeImportValid applies, but we only
// bother to deal with characters we might reasonably see.
for _, r := range " :" {
pwdForwardSlash = strings.Replace(pwdForwardSlash, string(r), "_", -1)
pwdForwardSlash = strings.ReplaceAll(pwdForwardSlash, string(r), "_")
}
want := "(_" + pwdForwardSlash + "/testdata/shadow/root1/src/math) (" + filepath.Join(runtime.GOROOT(), "src", "math") + ")"
if strings.TrimSpace(tg.getStdout()) != want {
@ -2402,6 +2412,7 @@ func checkCoverage(tg *testgoData, data string) {
}
func TestCoverageRuns(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
@ -2413,6 +2424,7 @@ func TestCoverageRuns(t *testing.T) {
}
func TestCoverageDotImport(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
@ -2425,6 +2437,7 @@ func TestCoverageDotImport(t *testing.T) {
// Check that coverage analysis uses set mode.
// Also check that coverage profiles merge correctly.
func TestCoverageUsesSetMode(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
@ -2455,6 +2468,7 @@ func TestCoverageUsesAtomicModeForRace(t *testing.T) {
if !canRace {
t.Skip("skipping because race detector not supported")
}
skipIfGccgo(t, "gccgo has no cover tool")
tg := testgo(t)
defer tg.cleanup()
@ -2472,6 +2486,7 @@ func TestCoverageUsesAtomicModeForRace(t *testing.T) {
}
func TestCoverageSyncAtomicImport(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
@ -2493,6 +2508,7 @@ func TestCoverageDepLoop(t *testing.T) {
}
func TestCoverageImportMainLoop(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
@ -2503,6 +2519,7 @@ func TestCoverageImportMainLoop(t *testing.T) {
}
func TestCoveragePattern(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
@ -2518,6 +2535,7 @@ func TestCoveragePattern(t *testing.T) {
}
func TestCoverageErrorLine(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
@ -2539,7 +2557,7 @@ func TestCoverageErrorLine(t *testing.T) {
// It's OK that stderr2 drops the character position in the error,
// because of the //line directive (see golang.org/issue/22662).
stderr = strings.Replace(stderr, "p.go:4:2:", "p.go:4:", -1)
stderr = strings.ReplaceAll(stderr, "p.go:4:2:", "p.go:4:")
if stderr != stderr2 {
t.Logf("test -cover changed error messages:\nbefore:\n%s\n\nafter:\n%s", stderr, stderr2)
t.Skip("golang.org/issue/22660")
@ -2561,6 +2579,7 @@ func TestTestBuildFailureOutput(t *testing.T) {
}
func TestCoverageFunc(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
@ -2576,6 +2595,7 @@ func TestCoverageFunc(t *testing.T) {
// Issue 24588.
func TestCoverageDashC(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
@ -2684,6 +2704,7 @@ func main() {
}
func TestCoverageWithCgo(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
if !canCgo {
t.Skip("skipping because cgo not enabled")
@ -5164,6 +5185,7 @@ func TestCacheCoverage(t *testing.T) {
}
func TestCacheVet(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
@ -6082,6 +6104,7 @@ func TestNoRelativeTmpdir(t *testing.T) {
// Issue 24704.
func TestLinkerTmpDirIsDeleted(t *testing.T) {
skipIfGccgo(t, "gccgo does not use cmd/link")
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
@ -6129,6 +6152,7 @@ func TestLinkerTmpDirIsDeleted(t *testing.T) {
}
func testCDAndGOPATHAreDifferent(tg *testgoData, cd, gopath string) {
skipIfGccgo(tg.t, "gccgo does not support -ldflags -X")
tg.setenv("GOPATH", gopath)
tg.tempDir("dir")
@ -6155,7 +6179,7 @@ func TestCDAndGOPATHAreDifferent(t *testing.T) {
testCDAndGOPATHAreDifferent(tg, cd, gopath)
if runtime.GOOS == "windows" {
testCDAndGOPATHAreDifferent(tg, cd, strings.Replace(gopath, `\`, `/`, -1))
testCDAndGOPATHAreDifferent(tg, cd, strings.ReplaceAll(gopath, `\`, `/`))
testCDAndGOPATHAreDifferent(tg, cd, strings.ToUpper(gopath))
testCDAndGOPATHAreDifferent(tg, cd, strings.ToLower(gopath))
}
@ -6184,6 +6208,7 @@ func TestGoBuildDashODevNull(t *testing.T) {
// Issue 25093.
func TestCoverpkgTestOnly(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()

View file

@ -112,9 +112,10 @@ func runClean(cmd *base.Command, args []string) {
}
}
var b work.Builder
b.Print = fmt.Print
if cleanCache {
var b work.Builder
b.Print = fmt.Print
dir := cache.DefaultDir()
if dir != "off" {
// Remove the cache subdirectories but not the top cache directory.
@ -156,8 +157,13 @@ func runClean(cmd *base.Command, args []string) {
if modfetch.PkgMod == "" {
base.Fatalf("go clean -modcache: no module cache")
}
if err := removeAll(modfetch.PkgMod); err != nil {
base.Errorf("go clean -modcache: %v", err)
if cfg.BuildN || cfg.BuildX {
b.Showcmd("", "rm -rf %s", modfetch.PkgMod)
}
if !cfg.BuildN {
if err := removeAll(modfetch.PkgMod); err != nil {
base.Errorf("go clean -modcache: %v", err)
}
}
}
}

View file

@ -203,7 +203,7 @@ func runEnv(cmd *base.Command, args []string) {
fmt.Printf("%s=\"%s\"\n", e.Name, e.Value)
case "plan9":
if strings.IndexByte(e.Value, '\x00') < 0 {
fmt.Printf("%s='%s'\n", e.Name, strings.Replace(e.Value, "'", "''", -1))
fmt.Printf("%s='%s'\n", e.Name, strings.ReplaceAll(e.Value, "'", "''"))
} else {
v := strings.Split(e.Value, "\x00")
fmt.Printf("%s=(", e.Name)

View file

@ -964,7 +964,7 @@ func matchGoImport(imports []metaImport, importPath string) (metaImport, error)
// expand rewrites s to replace {k} with match[k] for each key k in match.
func expand(match map[string]string, s string) string {
for k, v := range match {
s = strings.Replace(s, "{"+k+"}", v, -1)
s = strings.ReplaceAll(s, "{"+k+"}", v)
}
return s
}

View file

@ -507,6 +507,12 @@ General-purpose environment variables:
The directory where the go command will write
temporary source files, packages, and binaries.
Each entry in the GOFLAGS list must be a standalone flag.
Because the entries are space-separated, flag values must
not contain spaces. In some cases, you can provide multiple flag
values instead: for example, to set '-ldflags=-s -w'
you can use 'GOFLAGS=-ldflags=-s -ldflags=-w'.
Environment variables for use with cgo:
CC

View file

@ -146,7 +146,7 @@ func TestConvertLegacyConfig(t *testing.T) {
}
for _, tt := range tests {
t.Run(strings.Replace(tt.path, "/", "_", -1)+"_"+tt.vers, func(t *testing.T) {
t.Run(strings.ReplaceAll(tt.path, "/", "_")+"_"+tt.vers, func(t *testing.T) {
f, err := modfile.Parse("golden", []byte(tt.gomod), nil)
if err != nil {
t.Fatal(err)

View file

@ -185,7 +185,7 @@ func (e *RunError) Error() string {
text := e.Cmd + ": " + e.Err.Error()
stderr := bytes.TrimRight(e.Stderr, "\n")
if len(stderr) > 0 {
text += ":\n\t" + strings.Replace(string(stderr), "\n", "\n\t", -1)
text += ":\n\t" + strings.ReplaceAll(string(stderr), "\n", "\n\t")
}
return text
}

View file

@ -423,7 +423,7 @@ func TestCodeRepo(t *testing.T) {
}
}
}
t.Run(strings.Replace(tt.path, "/", "_", -1)+"/"+tt.rev, f)
t.Run(strings.ReplaceAll(tt.path, "/", "_")+"/"+tt.rev, f)
if strings.HasPrefix(tt.path, vgotest1git) {
for _, alt := range altVgotests {
// Note: Communicating with f through tt; should be cleaned up.
@ -442,7 +442,7 @@ func TestCodeRepo(t *testing.T) {
tt.rev = remap(tt.rev, m)
tt.gomoderr = remap(tt.gomoderr, m)
tt.ziperr = remap(tt.ziperr, m)
t.Run(strings.Replace(tt.path, "/", "_", -1)+"/"+tt.rev, f)
t.Run(strings.ReplaceAll(tt.path, "/", "_")+"/"+tt.rev, f)
tt = old
}
}
@ -473,9 +473,9 @@ func remap(name string, m map[string]string) string {
}
}
for k, v := range m {
name = strings.Replace(name, k, v, -1)
name = strings.ReplaceAll(name, k, v)
if codehost.AllHex(k) {
name = strings.Replace(name, k[:12], v[:12], -1)
name = strings.ReplaceAll(name, k[:12], v[:12])
}
}
return name
@ -505,11 +505,11 @@ var codeRepoVersionsTests = []struct {
},
{
path: "gopkg.in/russross/blackfriday.v2",
versions: []string{"v2.0.0"},
versions: []string{"v2.0.0", "v2.0.1"},
},
{
path: "gopkg.in/natefinch/lumberjack.v2",
versions: nil,
versions: []string{"v2.0.0"},
},
}
@ -522,7 +522,7 @@ func TestCodeRepoVersions(t *testing.T) {
}
defer os.RemoveAll(tmpdir)
for _, tt := range codeRepoVersionsTests {
t.Run(strings.Replace(tt.path, "/", "_", -1), func(t *testing.T) {
t.Run(strings.ReplaceAll(tt.path, "/", "_"), func(t *testing.T) {
repo, err := Lookup(tt.path)
if err != nil {
t.Fatalf("Lookup(%q): %v", tt.path, err)
@ -570,7 +570,7 @@ func TestLatest(t *testing.T) {
}
defer os.RemoveAll(tmpdir)
for _, tt := range latestTests {
name := strings.Replace(tt.path, "/", "_", -1)
name := strings.ReplaceAll(tt.path, "/", "_")
t.Run(name, func(t *testing.T) {
repo, err := Lookup(tt.path)
if err != nil {

View file

@ -123,7 +123,7 @@ func downloadZip(mod module.Version, target string) error {
for _, f := range z.File {
if !strings.HasPrefix(f.Name, prefix) {
z.Close()
return fmt.Errorf("zip for %s has unexpected file %s", prefix[:len(prefix)-1], f.Name)
return fmt.Errorf("zip for %s has unexpected file %s", prefix, f.Name)
}
}
z.Close()

View file

@ -248,5 +248,5 @@ func (p *proxyRepo) Zip(version string, tmpdir string) (tmpfile string, err erro
// That is, it escapes things like ? and # (which really shouldn't appear anyway).
// It does not escape / to %2F: our REST API is designed so that / can be left as is.
func pathEscape(s string) string {
return strings.Replace(url.PathEscape(s), "%2F", "/", -1)
return strings.ReplaceAll(url.PathEscape(s), "%2F", "/")
}

View file

@ -14,6 +14,7 @@ import (
"cmd/go/internal/search"
"encoding/hex"
"fmt"
"internal/goroot"
"os"
"path/filepath"
"strings"
@ -30,13 +31,11 @@ func isStandardImportPath(path string) bool {
func findStandardImportPath(path string) string {
if search.IsStandardImportPath(path) {
dir := filepath.Join(cfg.GOROOT, "src", path)
if _, err := os.Stat(dir); err == nil {
return dir
if goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) {
return filepath.Join(cfg.GOROOT, "src", path)
}
dir = filepath.Join(cfg.GOROOT, "src/vendor", path)
if _, err := os.Stat(dir); err == nil {
return dir
if goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, "vendor/"+path) {
return filepath.Join(cfg.GOROOT, "src/vendor", path)
}
}
return ""
@ -232,11 +231,16 @@ func findModule(target, path string) module.Version {
}
func ModInfoProg(info string) []byte {
// Inject a variable with the debug information as runtime/debug.modinfo,
// but compile it in package main so that it is specific to the binary.
// Populate it in an init func so that it will work with go:linkname,
// but use a string constant instead of the name 'string' in case
// package main shadows the built-in 'string' with some local declaration.
return []byte(fmt.Sprintf(`
package main
import _ "unsafe"
//go:linkname __debug_modinfo__ runtime/debug.modinfo
var __debug_modinfo__ string
var __debug_modinfo__ = ""
func init() {
__debug_modinfo__ = %q
}

View file

@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"go/build"
"internal/goroot"
"os"
"path/filepath"
"strings"
@ -60,8 +61,8 @@ func Import(path string) (m module.Version, dir string, err error) {
if strings.HasPrefix(path, "golang_org/") {
return module.Version{}, filepath.Join(cfg.GOROOT, "src/vendor", path), nil
}
dir := filepath.Join(cfg.GOROOT, "src", path)
if _, err := os.Stat(dir); err == nil {
if goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) {
dir := filepath.Join(cfg.GOROOT, "src", path)
return module.Version{}, dir, nil
}
}

View file

@ -45,7 +45,7 @@ func TestImport(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
for _, tt := range importTests {
t.Run(strings.Replace(tt.path, "/", "_", -1), func(t *testing.T) {
t.Run(strings.ReplaceAll(tt.path, "/", "_"), func(t *testing.T) {
// Note that there is no build list, so Import should always fail.
m, dir, err := Import(tt.path)
if err == nil {

View file

@ -207,7 +207,7 @@ func matchSemverPrefix(p, v string) bool {
// If multiple modules with revisions matching the query provide the requested
// package, QueryPackage picks the one with the longest module path.
//
// If the path is in the the main module and the query is "latest",
// If the path is in the main module and the query is "latest",
// QueryPackage returns Target as the version.
func QueryPackage(path, query string, allowed func(module.Version) bool) (module.Version, *modfetch.RevInfo, error) {
if _, ok := dirInModule(path, Target.Path, ModRoot, true); ok {
@ -221,7 +221,7 @@ func QueryPackage(path, query string, allowed func(module.Version) bool) (module
}
finalErr := errMissing
for p := path; p != "."; p = pathpkg.Dir(p) {
for p := path; p != "." && p != "/"; p = pathpkg.Dir(p) {
info, err := Query(p, query, allowed)
if err != nil {
if _, ok := err.(*codehost.VCSError); ok {

View file

@ -132,7 +132,7 @@ func TestQuery(t *testing.T) {
ok, _ := path.Match(allow, m.Version)
return ok
}
t.Run(strings.Replace(tt.path, "/", "_", -1)+"/"+tt.query+"/"+allow, func(t *testing.T) {
t.Run(strings.ReplaceAll(tt.path, "/", "_")+"/"+tt.query+"/"+allow, func(t *testing.T) {
info, err := Query(tt.path, tt.query, allowed)
if tt.err != "" {
if err != nil && err.Error() == tt.err {

View file

@ -275,7 +275,7 @@ func MatchPattern(pattern string) func(name string) bool {
case strings.HasSuffix(re, `/\.\.\.`):
re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?`
}
re = strings.Replace(re, `\.\.\.`, `[^`+vendorChar+`]*`, -1)
re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`)
reg := regexp.MustCompile(`^` + re + `$`)
@ -353,7 +353,7 @@ func CleanPatterns(patterns []string) []string {
// as a courtesy to Windows developers, rewrite \ to /
// in command-line arguments. Handles .\... and so on.
if filepath.Separator == '\\' {
a = strings.Replace(a, `\`, `/`, -1)
a = strings.ReplaceAll(a, `\`, `/`)
}
// Put argument in canonical form, but preserve leading ./.

View file

@ -99,7 +99,7 @@ and test commands:
link against shared libraries previously created with
-buildmode=shared.
-mod mode
module download mode to use: readonly, release, or vendor.
module download mode to use: readonly or vendor.
See 'go help modules' for more.
-pkgdir dir
install and load all packages from dir instead of the usual locations.
@ -398,10 +398,10 @@ func libname(args []string, pkgs []*load.Package) (string, error) {
arg = bp.ImportPath
}
}
appendName(strings.Replace(arg, "/", "-", -1))
appendName(strings.ReplaceAll(arg, "/", "-"))
} else {
for _, pkg := range pkgs {
appendName(strings.Replace(pkg.ImportPath, "/", "-", -1))
appendName(strings.ReplaceAll(pkg.ImportPath, "/", "-"))
}
}
} else if haveNonMeta { // have both meta package and a non-meta one

View file

@ -348,8 +348,12 @@ func (b *Builder) gccgoBuildIDELFFile(a *Action) (string, error) {
}
fmt.Fprintf(&buf, "\n")
if cfg.Goos != "solaris" {
fmt.Fprintf(&buf, "\t"+`.section .note.GNU-stack,"",@progbits`+"\n")
fmt.Fprintf(&buf, "\t"+`.section .note.GNU-split-stack,"",@progbits`+"\n")
secType := "@progbits"
if cfg.Goarch == "arm" {
secType = "%progbits"
}
fmt.Fprintf(&buf, "\t"+`.section .note.GNU-stack,"",%s`+"\n", secType)
fmt.Fprintf(&buf, "\t"+`.section .note.GNU-split-stack,"",%s`+"\n", secType)
}
if cfg.BuildN || cfg.BuildX {

View file

@ -1705,14 +1705,14 @@ func (b *Builder) fmtcmd(dir string, format string, args ...interface{}) string
if dir[len(dir)-1] == filepath.Separator {
dot += string(filepath.Separator)
}
cmd = strings.Replace(" "+cmd, " "+dir, dot, -1)[1:]
cmd = strings.ReplaceAll(" "+cmd, " "+dir, dot)[1:]
if b.scriptDir != dir {
b.scriptDir = dir
cmd = "cd " + dir + "\n" + cmd
}
}
if b.WorkDir != "" {
cmd = strings.Replace(cmd, b.WorkDir, "$WORK", -1)
cmd = strings.ReplaceAll(cmd, b.WorkDir, "$WORK")
}
return cmd
}
@ -1754,10 +1754,10 @@ func (b *Builder) showOutput(a *Action, dir, desc, out string) {
prefix := "# " + desc
suffix := "\n" + out
if reldir := base.ShortPath(dir); reldir != dir {
suffix = strings.Replace(suffix, " "+dir, " "+reldir, -1)
suffix = strings.Replace(suffix, "\n"+dir, "\n"+reldir, -1)
suffix = strings.ReplaceAll(suffix, " "+dir, " "+reldir)
suffix = strings.ReplaceAll(suffix, "\n"+dir, "\n"+reldir)
}
suffix = strings.Replace(suffix, " "+b.WorkDir, " $WORK", -1)
suffix = strings.ReplaceAll(suffix, " "+b.WorkDir, " $WORK")
if a != nil && a.output != nil {
a.output = append(a.output, prefix...)

Some files were not shown because too many files have changed in this diff Show more