[dev.link] all: merge branch 'master' into dev.link

Clean merge.

Change-Id: Ib773b0bc00fd99d494f9331c3613bcc8285e48e3
This commit is contained in:
Cherry Zhang 2020-09-11 12:07:44 -04:00
commit 3ab22052fb
427 changed files with 18808 additions and 14882 deletions

View file

@ -3,3 +3,17 @@ pkg unicode, var Chorasmian *RangeTable
pkg unicode, var Dives_Akuru *RangeTable pkg unicode, var Dives_Akuru *RangeTable
pkg unicode, var Khitan_Small_Script *RangeTable pkg unicode, var Khitan_Small_Script *RangeTable
pkg unicode, var Yezidi *RangeTable pkg unicode, var Yezidi *RangeTable
pkg text/template/parse, const NodeComment = 20
pkg text/template/parse, const NodeComment NodeType
pkg text/template/parse, const ParseComments = 1
pkg text/template/parse, const ParseComments Mode
pkg text/template/parse, method (*CommentNode) Copy() Node
pkg text/template/parse, method (*CommentNode) String() string
pkg text/template/parse, method (CommentNode) Position() Pos
pkg text/template/parse, method (CommentNode) Type() NodeType
pkg text/template/parse, type CommentNode struct
pkg text/template/parse, type CommentNode struct, Text string
pkg text/template/parse, type CommentNode struct, embedded NodeType
pkg text/template/parse, type CommentNode struct, embedded Pos
pkg text/template/parse, type Mode uint
pkg text/template/parse, type Tree struct, Mode Mode

View file

@ -687,6 +687,13 @@ MOVQ g(CX), AX // Move g into AX.
MOVQ g_m(AX), BX // Move g.m into BX. MOVQ g_m(AX), BX // Move g.m into BX.
</pre> </pre>
<p>
Register <code>BP</code> is callee-save.
The assembler automatically inserts <code>BP</code> save/restore when frame size is larger than zero.
Using <code>BP</code> as a general purpose register is allowed,
however it can interfere with sampling-based profiling.
</p>
<h3 id="arm">ARM</h3> <h3 id="arm">ARM</h3>
<p> <p>

View file

@ -609,6 +609,12 @@ Do not send CLs removing the interior tags from such phrases.
If a program needs to accept invalid numbers like the empty string, If a program needs to accept invalid numbers like the empty string,
consider wrapping the type with <a href="/pkg/encoding/json/#Unmarshaler"><code>Unmarshaler</code></a>. consider wrapping the type with <a href="/pkg/encoding/json/#Unmarshaler"><code>Unmarshaler</code></a>.
</p> </p>
<p><!-- CL 200237 -->
<a href="/pkg/encoding/json/#Unmarshal"><code>Unmarshal</code></a>
can now support map keys with string underlying type which implement
<a href="/pkg/encoding/#TextUnmarshaler"><code>encoding.TextUnmarshaler</code></a>.
</p>
</dd> </dd>
</dl><!-- encoding/json --> </dl><!-- encoding/json -->

View file

@ -43,6 +43,37 @@ Do not send CLs removing the interior tags from such phrases.
<h3 id="go-command">Go command</h3> <h3 id="go-command">Go command</h3>
<p><!-- golang.org/issue/24031 -->
<code>retract</code> directives may now be used in a <code>go.mod</code> file
to indicate that certain published versions of the module should not be used
by other modules. A module author may retract a version after a severe problem
is discovered or if the version was published unintentionally.<br>
TODO: write and link to section in golang.org/ref/mod<br>
TODO: write and link to tutorial or blog post
</p>
<p><!-- golang.org/issue/29062 -->
When using <code>go test</code>, a test that
calls <code>os.Exit(0)</code> during execution of a test function
will now be considered to fail.
This will help catch cases in which a test calls code that calls
os.Exit(0) and thereby stops running all future tests.
If a <code>TestMain</code> function calls <code>os.Exit(0)</code>
that is still considered to be a passing test.
</p>
<h4 id="all-pattern">The <code>all</code> pattern</h4>
<p><!-- golang.org/cl/240623 -->
When the main module's <code>go.mod</code> file
declares <code>go</code> <code>1.16</code> or higher, the <code>all</code>
package pattern now matches only those packages that are transitively imported
by a package or test found in the main module. (Packages imported by <em>tests
of</em> packages imported by the main module are no longer included.) This is
the same set of packages retained
by <code>go</code> <code>mod</code> <code>vendor</code> since Go 1.11.
</p>
<p> <p>
TODO TODO
</p> </p>
@ -90,6 +121,28 @@ Do not send CLs removing the interior tags from such phrases.
TODO TODO
</p> </p>
<h3 id="net"><a href="/pkg/net/">net</a></h3>
<p><!-- CL 250357 -->
The case of I/O on a closed network connection, or I/O on a network
connection that is closed before any of the I/O completes, can now
be detected using the new <a href="/pkg/net/#ErrClosed">ErrClosed</a> error.
A typical use would be <code>errors.Is(err, net.ErrClosed)</code>.
In earlier releases the only way to reliably detect this case was to
match the string returned by the <code>Error</code> method
with <code>"use of closed network connection"</code>.
</p>
<h3 id="text/template/parse"><a href="/pkg/text/template/parse/">text/template/parse</a></h3>
<p><!-- CL 229398, golang.org/issue/34652 -->
A new <a href="/pkg/text/template/parse/#CommentNode"><code>CommentNode</code></a>
was added to the parse tree. The <a href="/pkg/text/template/parse/#Mode"><code>Mode</code></a>
field in the <code>parse.Tree</code> enables access to it.
</p>
<!-- text/template/parse -->
<h3 id="unicode"><a href="/pkg/unicode/">unicode</a></h3> <h3 id="unicode"><a href="/pkg/unicode/">unicode</a></h3>
<p><!-- CL 248765 --> <p><!-- CL 248765 -->
@ -112,3 +165,27 @@ Do not send CLs removing the interior tags from such phrases.
<p> <p>
TODO TODO
</p> </p>
<dl id="net/http"><dt><a href="/pkg/net/http/">net/http</a></dt>
<dd>
<p><!-- CL 233637 -->
In the <a href="/pkg/net/http/"><code>net/http</code></a> package, the
behavior of <a href="/pkg/net/http/#StripPrefix"><code>StripPrefix</code></a>
has been changed to strip the prefix from the request URL's
<code>RawPath</code> field in addition to its <code>Path</code> field.
In past releases, only the <code>Path</code> field was trimmed, and so if the
request URL contained any escaped characters the URL would be modified to
have mismatched <code>Path</code> and <code>RawPath</code> fields.
In Go 1.16, <code>StripPrefix</code> trims both fields.
If there are escaped characters in the prefix part of the request URL the
handler serves a 404 instead of its previous behavior of invoking the
underlying handler with a mismatched <code>Path</code>/<code>RawPath</code> pair.
</p>
<p><!-- CL 252497 -->
The <a href="/pkg/net/http/"><code>net/http</code></a> package now rejects HTTP range requests
of the form <code>"Range": "bytes=--N"</code> where <code>"-N"</code> is a negative suffix length, for
example <code>"Range": "bytes=--2"</code>. It now replies with a <code>416 "Range Not Satisfiable"</code> response.
</p>
</dd>
</dl><!-- net/http -->

View file

@ -600,6 +600,9 @@ The valid combinations of <code>$GOOS</code> and <code>$GOARCH</code> are:
<td></td><td><code>linux</code></td> <td><code>mips64le</code></td> <td></td><td><code>linux</code></td> <td><code>mips64le</code></td>
</tr> </tr>
<tr> <tr>
<td></td><td><code>linux</code></td> <td><code>riscv64</code></td>
</tr>
<tr>
<td></td><td><code>linux</code></td> <td><code>s390x</code></td> <td></td><td><code>linux</code></td> <td><code>s390x</code></td>
</tr> </tr>
<tr> <tr>

View file

@ -319,6 +319,7 @@ typedef enum {
// issue 4339 // issue 4339
// We've historically permitted #include <>, so test it here. Issue 29333. // We've historically permitted #include <>, so test it here. Issue 29333.
// Also see issue 41059.
#include <issue4339.h> #include <issue4339.h>
// issue 4417 // issue 4417

View file

@ -11,6 +11,7 @@
// - Node.js // - Node.js
// - Electron // - Electron
// - Parcel // - Parcel
// - Webpack
if (typeof global !== "undefined") { if (typeof global !== "undefined") {
// global already exists // global already exists
@ -28,7 +29,7 @@
if (!global.fs && global.require) { if (!global.fs && global.require) {
const fs = require("fs"); const fs = require("fs");
if (Object.keys(fs) !== 0) { if (typeof fs === "object" && fs !== null && Object.keys(fs).length !== 0) {
global.fs = fs; global.fs = fs;
} }
} }
@ -556,6 +557,7 @@
} }
if ( if (
typeof module !== "undefined" &&
global.require && global.require &&
global.require.main === module && global.require.main === module &&
global.process && global.process &&

View file

@ -145,6 +145,37 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
VZIP2 V10.D2, V13.D2, V3.D2 // a379ca4e VZIP2 V10.D2, V13.D2, V3.D2 // a379ca4e
VZIP1 V17.S2, V4.S2, V26.S2 // 9a38910e VZIP1 V17.S2, V4.S2, V26.S2 // 9a38910e
VZIP2 V25.S2, V14.S2, V25.S2 // d979990e VZIP2 V25.S2, V14.S2, V25.S2 // d979990e
VUXTL V30.B8, V30.H8 // dea7082f
VUXTL V30.H4, V29.S4 // dda7102f
VUXTL V29.S2, V2.D2 // a2a7202f
VUXTL2 V30.H8, V30.S4 // dea7106f
VUXTL2 V29.S4, V2.D2 // a2a7206f
VUXTL2 V30.B16, V2.H8 // c2a7086f
VBIT V21.B16, V25.B16, V4.B16 // 241fb56e
VBSL V23.B16, V3.B16, V7.B16 // 671c776e
VCMTST V2.B8, V29.B8, V2.B8 // a28f220e
VCMTST V2.D2, V23.D2, V3.D2 // e38ee24e
VSUB V2.B8, V30.B8, V30.B8 // de87222e
VUZP1 V0.B8, V30.B8, V1.B8 // c11b000e
VUZP1 V1.B16, V29.B16, V2.B16 // a21b014e
VUZP1 V2.H4, V28.H4, V3.H4 // 831b420e
VUZP1 V3.H8, V27.H8, V4.H8 // 641b434e
VUZP1 V28.S2, V2.S2, V5.S2 // 45189c0e
VUZP1 V29.S4, V1.S4, V6.S4 // 26189d4e
VUZP1 V30.D2, V0.D2, V7.D2 // 0718de4e
VUZP2 V0.D2, V30.D2, V1.D2 // c15bc04e
VUZP2 V30.D2, V0.D2, V29.D2 // 1d58de4e
VUSHLL $0, V30.B8, V30.H8 // dea7082f
VUSHLL $0, V30.H4, V29.S4 // dda7102f
VUSHLL $0, V29.S2, V2.D2 // a2a7202f
VUSHLL2 $0, V30.B16, V2.H8 // c2a7086f
VUSHLL2 $0, V30.H8, V30.S4 // dea7106f
VUSHLL2 $0, V29.S4, V2.D2 // a2a7206f
VUSHLL $7, V30.B8, V30.H8 // dea70f2f
VUSHLL $15, V30.H4, V29.S4 // dda71f2f
VUSHLL2 $31, V30.S4, V2.D2 // c2a73f6f
VBIF V0.B8, V30.B8, V1.B8 // c11fe02e
VBIF V30.B16, V0.B16, V2.B16 // 021cfe6e
MOVD (R2)(R6.SXTW), R4 // 44c866f8 MOVD (R2)(R6.SXTW), R4 // 44c866f8
MOVD (R3)(R6), R5 // MOVD (R3)(R6*1), R5 // 656866f8 MOVD (R3)(R6), R5 // MOVD (R3)(R6*1), R5 // 656866f8
MOVD (R2)(R6), R4 // MOVD (R2)(R6*1), R4 // 446866f8 MOVD (R2)(R6), R4 // MOVD (R2)(R6*1), R4 // 446866f8
@ -186,6 +217,10 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
FMOVS $(0.96875), F3 // 03f02d1e FMOVS $(0.96875), F3 // 03f02d1e
FMOVD $(28.0), F4 // 0490671e FMOVD $(28.0), F4 // 0490671e
// move a large constant to a Vd.
FMOVD $0x8040201008040201, V20 // FMOVD $-9205322385119247871, V20
FMOVQ $0x8040201008040202, V29 // FMOVQ $-9205322385119247870, V29
FMOVS (R2)(R6), F4 // FMOVS (R2)(R6*1), F4 // 446866bc FMOVS (R2)(R6), F4 // FMOVS (R2)(R6*1), F4 // 446866bc
FMOVS (R2)(R6<<2), F4 // 447866bc FMOVS (R2)(R6<<2), F4 // 447866bc
FMOVD (R2)(R6), F4 // FMOVD (R2)(R6*1), F4 // 446866fc FMOVD (R2)(R6), F4 // FMOVD (R2)(R6*1), F4 // 446866fc
@ -359,18 +394,22 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
VLD4 (R15), [V10.H4, V11.H4, V12.H4, V13.H4] // ea05400c VLD4 (R15), [V10.H4, V11.H4, V12.H4, V13.H4] // ea05400c
VLD4.P 32(R24), [V31.B8, V0.B8, V1.B8, V2.B8] // 1f03df0c VLD4.P 32(R24), [V31.B8, V0.B8, V1.B8, V2.B8] // 1f03df0c
VLD4.P (R13)(R9), [V14.S2, V15.S2, V16.S2, V17.S2] // VLD4.P (R13)(R9*1), [V14.S2,V15.S2,V16.S2,V17.S2] // ae09c90c VLD4.P (R13)(R9), [V14.S2, V15.S2, V16.S2, V17.S2] // VLD4.P (R13)(R9*1), [V14.S2,V15.S2,V16.S2,V17.S2] // ae09c90c
VLD1R (R0), [V0.B16] // 00c0404d VLD1R (R1), [V9.B8] // 29c0400d
VLD1R.P 16(R0), [V0.B16] // 00c0df4d VLD1R.P (R1), [V9.B8] // 29c0df0d
VLD1R.P (R15)(R1), [V15.H4] // VLD1R.P (R15)(R1*1), [V15.H4] // efc5c10d VLD1R.P 1(R1), [V2.B8] // 22c0df0d
VLD2R (R15), [V15.H4, V16.H4] // efc5600d VLD1R.P 2(R1), [V2.H4] // 22c4df0d
VLD2R.P 32(R0), [V0.D2, V1.D2] // 00ccff4d VLD1R (R0), [V0.B16] // 00c0404d
VLD2R.P (R0)(R5), [V31.D1, V0.D1] // VLD2R.P (R0)(R5*1), [V31.D1, V0.D1] // 1fcce50d VLD1R.P (R0), [V0.B16] // 00c0df4d
VLD3R (RSP), [V31.S2, V0.S2, V1.S2] // ffeb400d VLD1R.P (R15)(R1), [V15.H4] // VLD1R.P (R15)(R1*1), [V15.H4] // efc5c10d
VLD3R.P 24(R15), [V15.H4, V16.H4, V17.H4] // efe5df0d VLD2R (R15), [V15.H4, V16.H4] // efc5600d
VLD3R.P (R15)(R6), [V15.H8, V16.H8, V17.H8] // VLD3R.P (R15)(R6*1), [V15.H8, V16.H8, V17.H8] // efe5c64d VLD2R.P 16(R0), [V0.D2, V1.D2] // 00ccff4d
VLD4R (R0), [V0.B8, V1.B8, V2.B8, V3.B8] // 00e0600d VLD2R.P (R0)(R5), [V31.D1, V0.D1] // VLD2R.P (R0)(R5*1), [V31.D1, V0.D1] // 1fcce50d
VLD4R.P 64(RSP), [V31.S4, V0.S4, V1.S4, V2.S4] // ffebff4d VLD3R (RSP), [V31.S2, V0.S2, V1.S2] // ffeb400d
VLD4R.P (R15)(R9), [V15.H4, V16.H4, V17.H4, V18.H4] // VLD4R.P (R15)(R9*1), [V15.H4, V16.H4, V17.H4, V18.H4] // efe5e90d VLD3R.P 6(R15), [V15.H4, V16.H4, V17.H4] // efe5df0d
VLD3R.P (R15)(R6), [V15.H8, V16.H8, V17.H8] // VLD3R.P (R15)(R6*1), [V15.H8, V16.H8, V17.H8] // efe5c64d
VLD4R (R0), [V0.B8, V1.B8, V2.B8, V3.B8] // 00e0600d
VLD4R.P 16(RSP), [V31.S4, V0.S4, V1.S4, V2.S4] // ffebff4d
VLD4R.P (R15)(R9), [V15.H4, V16.H4, V17.H4, V18.H4] // VLD4R.P (R15)(R9*1), [V15.H4, V16.H4, V17.H4, V18.H4] // efe5e90d
VST1.P [V24.S2], 8(R2) // 58789f0c VST1.P [V24.S2], 8(R2) // 58789f0c
VST1 [V29.S2, V30.S2], (R29) // bdab000c VST1 [V29.S2, V30.S2], (R29) // bdab000c
VST1 [V14.H4, V15.H4, V16.H4], (R27) // 6e67000c VST1 [V14.H4, V15.H4, V16.H4], (R27) // 6e67000c

View file

@ -591,7 +591,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8
FMOVS R8, F15 // 0f01271e FMOVS R8, F15 // 0f01271e
FMOVD F2, F9 // 4940601e FMOVD F2, F9 // 4940601e
FMOVS F4, F27 // 9b40201e FMOVS F4, F27 // 9b40201e
//TODO VFMOV $3.125, V8.2D // 28f5006f //TODO VFMOV $3.125, V8.D2 // 28f5006f
FMSUBS F13, F21, F13, F19 // b3d50d1f FMSUBS F13, F21, F13, F19 // b3d50d1f
FMSUBD F11, F7, F15, F31 // ff9d4b1f FMSUBD F11, F7, F15, F31 // ff9d4b1f
//TODO VFMUL V9.S[2], F21, F19 // b39a895f //TODO VFMUL V9.S[2], F21, F19 // b39a895f
@ -648,7 +648,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8
FSUBS F25, F23, F0 // e03a391e FSUBS F25, F23, F0 // e03a391e
FSUBD F11, F13, F24 // b8396b1e FSUBD F11, F13, F24 // b8396b1e
//TODO SCVTFSS F30, F20 // d4db215e //TODO SCVTFSS F30, F20 // d4db215e
//TODO VSCVTF V7.2S, V17.2S // f1d8210e //TODO VSCVTF V7.S2, V17.S2 // f1d8210e
SCVTFWS R3, F16 // 7000221e SCVTFWS R3, F16 // 7000221e
SCVTFWD R20, F4 // 8402621e SCVTFWD R20, F4 // 8402621e
SCVTFS R16, F12 // 0c02229e SCVTFS R16, F12 // 0c02229e

View file

@ -339,4 +339,18 @@ TEXT errors(SB),$0
MRS ICV_EOIR1_EL1, R3 // ERROR "system register is not readable" MRS ICV_EOIR1_EL1, R3 // ERROR "system register is not readable"
MRS PMSWINC_EL0, R3 // ERROR "system register is not readable" MRS PMSWINC_EL0, R3 // ERROR "system register is not readable"
MRS OSLAR_EL1, R3 // ERROR "system register is not readable" MRS OSLAR_EL1, R3 // ERROR "system register is not readable"
VLD3R.P 24(R15), [V15.H4,V16.H4,V17.H4] // ERROR "invalid post-increment offset"
VBIT V1.H4, V12.H4, V3.H4 // ERROR "invalid arrangement"
VBSL V1.D2, V12.D2, V3.D2 // ERROR "invalid arrangement"
VUXTL V30.D2, V30.H8 // ERROR "operand mismatch"
VUXTL2 V20.B8, V21.H8 // ERROR "operand mismatch"
VUXTL V3.D2, V4.B8 // ERROR "operand mismatch"
VUZP1 V0.B8, V30.B8, V1.B16 // ERROR "operand mismatch"
VUZP2 V0.Q1, V30.Q1, V1.Q1 // ERROR "invalid arrangement"
VUSHLL $0, V30.D2, V30.H8 // ERROR "operand mismatch"
VUSHLL2 $0, V20.B8, V21.H8 // ERROR "operand mismatch"
VUSHLL $8, V30.B8, V30.H8 // ERROR "shift amount out of range"
VUSHLL2 $32, V30.S4, V2.D2 // ERROR "shift amount out of range"
VBIF V0.B8, V1.B8, V2.B16 // ERROR "operand mismatch"
VBIF V0.D2, V1.D2, V2.D2 // ERROR "invalid arrangement"
RET RET

View file

@ -112,6 +112,13 @@ The default C and C++ compilers may be changed by the CC and CXX
environment variables, respectively; those environment variables environment variables, respectively; those environment variables
may include command line options. may include command line options.
The cgo tool will always invoke the C compiler with the source file's
directory in the include path; i.e. -I${SRCDIR} is always implied. This
means that if a header file foo/bar.h exists both in the source
directory and also in the system include directory (or some other place
specified by a -I flag), then "#include <foo/bar.h>" will always find the
local version in preference to any other version.
The cgo tool is enabled by default for native builds on systems where The cgo tool is enabled by default for native builds on systems where
it is expected to work. It is disabled by default when it is expected to work. It is disabled by default when
cross-compiling. You can control this by setting the CGO_ENABLED cross-compiling. You can control this by setting the CGO_ENABLED

View file

@ -369,7 +369,18 @@ func (p *Package) guessKinds(f *File) []*Name {
fmt.Fprintf(&b, "#line 1 \"completed\"\n"+ fmt.Fprintf(&b, "#line 1 \"completed\"\n"+
"int __cgo__1 = __cgo__2;\n") "int __cgo__1 = __cgo__2;\n")
stderr := p.gccErrors(b.Bytes()) // We need to parse the output from this gcc command, so ensure that it
// doesn't have any ANSI escape sequences in it. (TERM=dumb is
// insufficient; if the user specifies CGO_CFLAGS=-fdiagnostics-color,
// GCC will ignore TERM, and GCC can also be configured at compile-time
// to ignore TERM.)
stderr := p.gccErrors(b.Bytes(), "-fdiagnostics-color=never")
if strings.Contains(stderr, "unrecognized command line option") {
// We're using an old version of GCC that doesn't understand
// -fdiagnostics-color. Those versions can't print color anyway,
// so just rerun without that option.
stderr = p.gccErrors(b.Bytes())
}
if stderr == "" { if stderr == "" {
fatalf("%s produced no output\non input:\n%s", p.gccBaseCmd()[0], b.Bytes()) fatalf("%s produced no output\non input:\n%s", p.gccBaseCmd()[0], b.Bytes())
} }
@ -1970,22 +1981,25 @@ func (p *Package) gccDefines(stdin []byte) string {
// gccErrors runs gcc over the C program stdin and returns // gccErrors runs gcc over the C program stdin and returns
// the errors that gcc prints. That is, this function expects // the errors that gcc prints. That is, this function expects
// gcc to fail. // gcc to fail.
func (p *Package) gccErrors(stdin []byte) string { func (p *Package) gccErrors(stdin []byte, extraArgs ...string) string {
// TODO(rsc): require failure // TODO(rsc): require failure
args := p.gccCmd() args := p.gccCmd()
// Optimization options can confuse the error messages; remove them. // Optimization options can confuse the error messages; remove them.
nargs := make([]string, 0, len(args)) nargs := make([]string, 0, len(args)+len(extraArgs))
for _, arg := range args { for _, arg := range args {
if !strings.HasPrefix(arg, "-O") { if !strings.HasPrefix(arg, "-O") {
nargs = append(nargs, arg) nargs = append(nargs, arg)
} }
} }
// Force -O0 optimization but keep the trailing "-" at the end. // Force -O0 optimization and append extra arguments, but keep the
nargs = append(nargs, "-O0") // trailing "-" at the end.
nl := len(nargs) li := len(nargs) - 1
nargs[nl-2], nargs[nl-1] = nargs[nl-1], nargs[nl-2] last := nargs[li]
nargs[li] = "-O0"
nargs = append(nargs, extraArgs...)
nargs = append(nargs, last)
if *debugGcc { if *debugGcc {
fmt.Fprintf(os.Stderr, "$ %s <<EOF\n", strings.Join(nargs, " ")) fmt.Fprintf(os.Stderr, "$ %s <<EOF\n", strings.Join(nargs, " "))

View file

@ -319,8 +319,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// TODO(khr): issue only the -1 fixup code we need. // TODO(khr): issue only the -1 fixup code we need.
// For instance, if only the quotient is used, no point in zeroing the remainder. // For instance, if only the quotient is used, no point in zeroing the remainder.
j1.To.Val = n1 j1.To.SetTarget(n1)
j2.To.Val = s.Pc() j2.To.SetTarget(s.Pc())
} }
case ssa.OpAMD64HMULQ, ssa.OpAMD64HMULL, ssa.OpAMD64HMULQU, ssa.OpAMD64HMULLU: case ssa.OpAMD64HMULQ, ssa.OpAMD64HMULL, ssa.OpAMD64HMULQU, ssa.OpAMD64HMULLU:

View file

@ -816,7 +816,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} }
p := s.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p.From.Reg = condBits[v.Aux.(ssa.Op)] p.From.Reg = condBits[ssa.Op(v.AuxInt)]
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: r1}) p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: r1})
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG

View file

@ -429,8 +429,7 @@ func hashfor(t *types.Type) *Node {
} }
n := newname(sym) n := newname(sym)
n.SetClass(PFUNC) setNodeNameFunc(n)
n.Sym.SetFunc(true)
n.Type = functype(nil, []*Node{ n.Type = functype(nil, []*Node{
anonfield(types.NewPtr(t)), anonfield(types.NewPtr(t)),
anonfield(types.Types[TUINTPTR]), anonfield(types.Types[TUINTPTR]),
@ -646,17 +645,11 @@ func geneq(t *types.Type) *obj.LSym {
// Build a list of conditions to satisfy. // Build a list of conditions to satisfy.
// The conditions are a list-of-lists. Conditions are reorderable // The conditions are a list-of-lists. Conditions are reorderable
// within each inner list. The outer lists must be evaluated in order. // within each inner list. The outer lists must be evaluated in order.
// Even within each inner list, track their order so that we can preserve var conds [][]*Node
// aspects of that order. (TODO: latter part needed?) conds = append(conds, []*Node{})
type nodeIdx struct {
n *Node
idx int
}
var conds [][]nodeIdx
conds = append(conds, []nodeIdx{})
and := func(n *Node) { and := func(n *Node) {
i := len(conds) - 1 i := len(conds) - 1
conds[i] = append(conds[i], nodeIdx{n: n, idx: len(conds[i])}) conds[i] = append(conds[i], n)
} }
// Walk the struct using memequal for runs of AMEM // Walk the struct using memequal for runs of AMEM
@ -674,7 +667,7 @@ func geneq(t *types.Type) *obj.LSym {
if !IsRegularMemory(f.Type) { if !IsRegularMemory(f.Type) {
if EqCanPanic(f.Type) { if EqCanPanic(f.Type) {
// Enforce ordering by starting a new set of reorderable conditions. // Enforce ordering by starting a new set of reorderable conditions.
conds = append(conds, []nodeIdx{}) conds = append(conds, []*Node{})
} }
p := nodSym(OXDOT, np, f.Sym) p := nodSym(OXDOT, np, f.Sym)
q := nodSym(OXDOT, nq, f.Sym) q := nodSym(OXDOT, nq, f.Sym)
@ -688,7 +681,7 @@ func geneq(t *types.Type) *obj.LSym {
} }
if EqCanPanic(f.Type) { if EqCanPanic(f.Type) {
// Also enforce ordering after something that can panic. // Also enforce ordering after something that can panic.
conds = append(conds, []nodeIdx{}) conds = append(conds, []*Node{})
} }
i++ i++
continue continue
@ -713,14 +706,13 @@ func geneq(t *types.Type) *obj.LSym {
// Sort conditions to put runtime calls last. // Sort conditions to put runtime calls last.
// Preserve the rest of the ordering. // Preserve the rest of the ordering.
var flatConds []nodeIdx var flatConds []*Node
for _, c := range conds { for _, c := range conds {
isCall := func(n *Node) bool {
return n.Op == OCALL || n.Op == OCALLFUNC
}
sort.SliceStable(c, func(i, j int) bool { sort.SliceStable(c, func(i, j int) bool {
x, y := c[i], c[j] return !isCall(c[i]) && isCall(c[j])
if (x.n.Op != OCALL) == (y.n.Op != OCALL) {
return x.idx < y.idx
}
return x.n.Op != OCALL
}) })
flatConds = append(flatConds, c...) flatConds = append(flatConds, c...)
} }
@ -729,9 +721,9 @@ func geneq(t *types.Type) *obj.LSym {
if len(flatConds) == 0 { if len(flatConds) == 0 {
cond = nodbool(true) cond = nodbool(true)
} else { } else {
cond = flatConds[0].n cond = flatConds[0]
for _, c := range flatConds[1:] { for _, c := range flatConds[1:] {
cond = nod(OANDAND, cond, c.n) cond = nod(OANDAND, cond, c)
} }
} }

View file

@ -107,18 +107,7 @@ func typecheckclosure(clo *Node, top int) {
} }
xfunc.Func.Nname.Sym = closurename(Curfn) xfunc.Func.Nname.Sym = closurename(Curfn)
disableExport(xfunc.Func.Nname.Sym) setNodeNameFunc(xfunc.Func.Nname)
if xfunc.Func.Nname.Sym.Def != nil {
// The only case we can reach here is when the outer function was redeclared.
// In that case, don't bother to redeclare the closure. Otherwise, we will get
// a spurious error message, see #17758. While we are here, double check that
// we already reported other error.
if nsavederrors+nerrors == 0 {
Fatalf("unexpected symbol collision %v", xfunc.Func.Nname.Sym)
}
} else {
declare(xfunc.Func.Nname, PFUNC)
}
xfunc = typecheck(xfunc, ctxStmt) xfunc = typecheck(xfunc, ctxStmt)
// Type check the body now, but only if we're inside a function. // Type check the body now, but only if we're inside a function.
@ -473,7 +462,6 @@ func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
tfn.List.Set(structargs(t0.Params(), true)) tfn.List.Set(structargs(t0.Params(), true))
tfn.Rlist.Set(structargs(t0.Results(), false)) tfn.Rlist.Set(structargs(t0.Results(), false))
disableExport(sym)
xfunc := dclfunc(sym, tfn) xfunc := dclfunc(sym, tfn)
xfunc.Func.SetDupok(true) xfunc.Func.SetDupok(true)
xfunc.Func.SetNeedctxt(true) xfunc.Func.SetNeedctxt(true)

View file

@ -90,7 +90,7 @@ func declare(n *Node, ctxt Class) {
lineno = n.Pos lineno = n.Pos
Fatalf("automatic outside function") Fatalf("automatic outside function")
} }
if Curfn != nil { if Curfn != nil && ctxt != PFUNC {
Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
} }
if n.Op == OTYPE { if n.Op == OTYPE {
@ -297,6 +297,16 @@ func oldname(s *types.Sym) *Node {
return n return n
} }
// importName is like oldname, but it reports an error if sym is from another package and not exported.
func importName(sym *types.Sym) *Node {
n := oldname(sym)
if !types.IsExported(sym.Name) && sym.Pkg != localpkg {
n.SetDiag(true)
yyerror("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name)
}
return n
}
// := declarations // := declarations
func colasname(n *Node) bool { func colasname(n *Node) bool {
switch n.Op { switch n.Op {
@ -975,10 +985,14 @@ func makefuncsym(s *types.Sym) {
} }
} }
// disableExport prevents sym from being included in package export // setNodeNameFunc marks a node as a function.
// data. To be effectual, it must be called before declare. func setNodeNameFunc(n *Node) {
func disableExport(sym *types.Sym) { if n.Op != ONAME || n.Class() != Pxxx {
sym.SetOnExportList(true) Fatalf("expected ONAME/Pxxx node, got %v", n)
}
n.SetClass(PFUNC)
n.Sym.SetFunc(true)
} }
func dclfunc(sym *types.Sym, tfn *Node) *Node { func dclfunc(sym *types.Sym, tfn *Node) *Node {
@ -990,7 +1004,7 @@ func dclfunc(sym *types.Sym, tfn *Node) *Node {
fn.Func.Nname = newfuncnamel(lineno, sym) fn.Func.Nname = newfuncnamel(lineno, sym)
fn.Func.Nname.Name.Defn = fn fn.Func.Nname.Name.Defn = fn
fn.Func.Nname.Name.Param.Ntype = tfn fn.Func.Nname.Name.Param.Ntype = tfn
declare(fn.Func.Nname, PFUNC) setNodeNameFunc(fn.Func.Nname)
funchdr(fn) funchdr(fn)
fn.Func.Nname.Name.Param.Ntype = typecheck(fn.Func.Nname.Name.Param.Ntype, ctxType) fn.Func.Nname.Name.Param.Ntype = typecheck(fn.Func.Nname.Name.Param.Ntype, ctxType)
return fn return fn

View file

@ -377,14 +377,14 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
// This really doesn't have much to do with escape analysis per se, // This really doesn't have much to do with escape analysis per se,
// but we are reusing the ability to annotate an individual function // but we are reusing the ability to annotate an individual function
// argument and pass those annotations along to importing code. // argument and pass those annotations along to importing code.
if f.Type.Etype == TUINTPTR { if f.Type.IsUintptr() {
if Debug['m'] != 0 { if Debug['m'] != 0 {
Warnl(f.Pos, "assuming %v is unsafe uintptr", name()) Warnl(f.Pos, "assuming %v is unsafe uintptr", name())
} }
return unsafeUintptrTag return unsafeUintptrTag
} }
if !types.Haspointers(f.Type) { // don't bother tagging for scalars if !f.Type.HasPointers() { // don't bother tagging for scalars
return "" return ""
} }
@ -407,13 +407,13 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
} }
if fn.Func.Pragma&UintptrEscapes != 0 { if fn.Func.Pragma&UintptrEscapes != 0 {
if f.Type.Etype == TUINTPTR { if f.Type.IsUintptr() {
if Debug['m'] != 0 { if Debug['m'] != 0 {
Warnl(f.Pos, "marking %v as escaping uintptr", name()) Warnl(f.Pos, "marking %v as escaping uintptr", name())
} }
return uintptrEscapesTag return uintptrEscapesTag
} }
if f.IsDDD() && f.Type.Elem().Etype == TUINTPTR { if f.IsDDD() && f.Type.Elem().IsUintptr() {
// final argument is ...uintptr. // final argument is ...uintptr.
if Debug['m'] != 0 { if Debug['m'] != 0 {
Warnl(f.Pos, "marking %v as escaping ...uintptr", name()) Warnl(f.Pos, "marking %v as escaping ...uintptr", name())
@ -422,7 +422,7 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
} }
} }
if !types.Haspointers(f.Type) { // don't bother tagging for scalars if !f.Type.HasPointers() { // don't bother tagging for scalars
return "" return ""
} }

View file

@ -326,7 +326,7 @@ func (e *Escape) stmt(n *Node) {
if typesw && n.Left.Left != nil { if typesw && n.Left.Left != nil {
cv := cas.Rlist.First() cv := cas.Rlist.First()
k := e.dcl(cv) // type switch variables have no ODCL. k := e.dcl(cv) // type switch variables have no ODCL.
if types.Haspointers(cv.Type) { if cv.Type.HasPointers() {
ks = append(ks, k.dotType(cv.Type, cas, "switch case")) ks = append(ks, k.dotType(cv.Type, cas, "switch case"))
} }
} }
@ -433,7 +433,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) {
if uintptrEscapesHack && n.Op == OCONVNOP && n.Left.Type.IsUnsafePtr() { if uintptrEscapesHack && n.Op == OCONVNOP && n.Left.Type.IsUnsafePtr() {
// nop // nop
} else if k.derefs >= 0 && !types.Haspointers(n.Type) { } else if k.derefs >= 0 && !n.Type.HasPointers() {
k = e.discardHole() k = e.discardHole()
} }
@ -485,7 +485,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) {
e.discard(max) e.discard(max)
case OCONV, OCONVNOP: case OCONV, OCONVNOP:
if checkPtr(e.curfn, 2) && n.Type.Etype == TUNSAFEPTR && n.Left.Type.IsPtr() { if checkPtr(e.curfn, 2) && n.Type.IsUnsafePtr() && n.Left.Type.IsPtr() {
// When -d=checkptr=2 is enabled, treat // When -d=checkptr=2 is enabled, treat
// conversions to unsafe.Pointer as an // conversions to unsafe.Pointer as an
// escaping operation. This allows better // escaping operation. This allows better
@ -493,7 +493,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) {
// easily detect object boundaries on the heap // easily detect object boundaries on the heap
// than the stack. // than the stack.
e.assignHeap(n.Left, "conversion to unsafe.Pointer", n) e.assignHeap(n.Left, "conversion to unsafe.Pointer", n)
} else if n.Type.Etype == TUNSAFEPTR && n.Left.Type.Etype == TUINTPTR { } else if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() {
e.unsafeValue(k, n.Left) e.unsafeValue(k, n.Left)
} else { } else {
e.expr(k, n.Left) e.expr(k, n.Left)
@ -625,7 +625,7 @@ func (e *Escape) unsafeValue(k EscHole, n *Node) {
switch n.Op { switch n.Op {
case OCONV, OCONVNOP: case OCONV, OCONVNOP:
if n.Left.Type.Etype == TUNSAFEPTR { if n.Left.Type.IsUnsafePtr() {
e.expr(k, n.Left) e.expr(k, n.Left)
} else { } else {
e.discard(n.Left) e.discard(n.Left)
@ -698,7 +698,7 @@ func (e *Escape) addr(n *Node) EscHole {
e.assignHeap(n.Right, "key of map put", n) e.assignHeap(n.Right, "key of map put", n)
} }
if !types.Haspointers(n.Type) { if !n.Type.HasPointers() {
k = e.discardHole() k = e.discardHole()
} }
@ -811,14 +811,14 @@ func (e *Escape) call(ks []EscHole, call, where *Node) {
// slice might be allocated, and all slice elements // slice might be allocated, and all slice elements
// might flow to heap. // might flow to heap.
appendeeK := ks[0] appendeeK := ks[0]
if types.Haspointers(args[0].Type.Elem()) { if args[0].Type.Elem().HasPointers() {
appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice")) appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
} }
argument(appendeeK, args[0]) argument(appendeeK, args[0])
if call.IsDDD() { if call.IsDDD() {
appendedK := e.discardHole() appendedK := e.discardHole()
if args[1].Type.IsSlice() && types.Haspointers(args[1].Type.Elem()) { if args[1].Type.IsSlice() && args[1].Type.Elem().HasPointers() {
appendedK = e.heapHole().deref(call, "appended slice...") appendedK = e.heapHole().deref(call, "appended slice...")
} }
argument(appendedK, args[1]) argument(appendedK, args[1])
@ -832,7 +832,7 @@ func (e *Escape) call(ks []EscHole, call, where *Node) {
argument(e.discardHole(), call.Left) argument(e.discardHole(), call.Left)
copiedK := e.discardHole() copiedK := e.discardHole()
if call.Right.Type.IsSlice() && types.Haspointers(call.Right.Type.Elem()) { if call.Right.Type.IsSlice() && call.Right.Type.Elem().HasPointers() {
copiedK = e.heapHole().deref(call, "copied slice") copiedK = e.heapHole().deref(call, "copied slice")
} }
argument(copiedK, call.Right) argument(copiedK, call.Right)
@ -1029,6 +1029,9 @@ func (e *Escape) newLoc(n *Node, transient bool) *EscLocation {
if e.curfn == nil { if e.curfn == nil {
Fatalf("e.curfn isn't set") Fatalf("e.curfn isn't set")
} }
if n != nil && n.Type != nil && n.Type.NotInHeap() {
yyerrorl(n.Pos, "%v is go:notinheap; stack allocation disallowed", n.Type)
}
n = canonicalNode(n) n = canonicalNode(n)
loc := &EscLocation{ loc := &EscLocation{

View file

@ -1616,7 +1616,8 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
} }
n1.exprfmt(s, nprec, mode) n1.exprfmt(s, nprec, mode)
} }
case ODDD:
mode.Fprintf(s, "...")
default: default:
mode.Fprintf(s, "<node %v>", n.Op) mode.Fprintf(s, "<node %v>", n.Op)
} }

View file

@ -32,7 +32,6 @@ package gc
import ( import (
"cmd/compile/internal/ssa" "cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/objabi" "cmd/internal/objabi"
"cmd/internal/src" "cmd/internal/src"
@ -316,7 +315,7 @@ func ggloblnod(nam *Node) {
if nam.Name.Readonly() { if nam.Name.Readonly() {
flags = obj.RODATA flags = obj.RODATA
} }
if nam.Type != nil && !types.Haspointers(nam.Type) { if nam.Type != nil && !nam.Type.HasPointers() {
flags |= obj.NOPTR flags |= obj.NOPTR
} }
Ctxt.Globl(s, nam.Type.Width, flags) Ctxt.Globl(s, nam.Type.Width, flags)
@ -343,6 +342,6 @@ func Patch(p *obj.Prog, to *obj.Prog) {
if p.To.Type != obj.TYPE_BRANCH { if p.To.Type != obj.TYPE_BRANCH {
Fatalf("patch: not a branch") Fatalf("patch: not a branch")
} }
p.To.Val = to p.To.SetTarget(to)
p.To.Offset = to.Pc p.To.Offset = to.Pc
} }

View file

@ -45,7 +45,6 @@ func fninit(n []*Node) {
if len(nf) > 0 { if len(nf) > 0 {
lineno = nf[0].Pos // prolog/epilog gets line number of first init stmt lineno = nf[0].Pos // prolog/epilog gets line number of first init stmt
initializers := lookup("init") initializers := lookup("init")
disableExport(initializers)
fn := dclfunc(initializers, nod(OTFUNC, nil, nil)) fn := dclfunc(initializers, nod(OTFUNC, nil, nil))
for _, dcl := range dummyInitFn.Func.Dcl { for _, dcl := range dummyInitFn.Func.Dcl {
dcl.Name.Curfn = fn dcl.Name.Curfn = fn

View file

@ -653,7 +653,7 @@ func (p *noder) expr(expr syntax.Expr) *Node {
obj := p.expr(expr.X) obj := p.expr(expr.X)
if obj.Op == OPACK { if obj.Op == OPACK {
obj.Name.SetUsed(true) obj.Name.SetUsed(true)
return oldname(restrictlookup(expr.Sel.Value, obj.Name.Pkg)) return importName(obj.Name.Pkg.Lookup(expr.Sel.Value))
} }
n := nodSym(OXDOT, obj, p.name(expr.Sel)) n := nodSym(OXDOT, obj, p.name(expr.Sel))
n.Pos = p.pos(expr) // lineno may have been changed by p.expr(expr.X) n.Pos = p.pos(expr) // lineno may have been changed by p.expr(expr.X)
@ -857,7 +857,7 @@ func (p *noder) interfaceType(expr *syntax.InterfaceType) *Node {
p.setlineno(method) p.setlineno(method)
var n *Node var n *Node
if method.Name == nil { if method.Name == nil {
n = p.nodSym(method, ODCLFIELD, oldname(p.packname(method.Type)), nil) n = p.nodSym(method, ODCLFIELD, importName(p.packname(method.Type)), nil)
} else { } else {
mname := p.name(method.Name) mname := p.name(method.Name)
sig := p.typeExpr(method.Type) sig := p.typeExpr(method.Type)
@ -896,7 +896,7 @@ func (p *noder) packname(expr syntax.Expr) *types.Sym {
def.Name.SetUsed(true) def.Name.SetUsed(true)
pkg = def.Name.Pkg pkg = def.Name.Pkg
} }
return restrictlookup(expr.Sel.Value, pkg) return pkg.Lookup(expr.Sel.Value)
} }
panic(fmt.Sprintf("unexpected packname: %#v", expr)) panic(fmt.Sprintf("unexpected packname: %#v", expr))
} }
@ -911,7 +911,7 @@ func (p *noder) embedded(typ syntax.Expr) *Node {
} }
sym := p.packname(typ) sym := p.packname(typ)
n := p.nodSym(typ, ODCLFIELD, oldname(sym), lookup(sym.Name)) n := p.nodSym(typ, ODCLFIELD, importName(sym), lookup(sym.Name))
n.SetEmbedded(true) n.SetEmbedded(true)
if isStar { if isStar {
@ -1641,10 +1641,3 @@ func mkname(sym *types.Sym) *Node {
} }
return n return n
} }
func unparen(x *Node) *Node {
for x.Op == OPAREN {
x = x.Left
}
return x
}

View file

@ -502,6 +502,7 @@ func (o *Order) call(n *Node) {
x := o.copyExpr(arg.Left, arg.Left.Type, false) x := o.copyExpr(arg.Left, arg.Left.Type, false)
x.Name.SetKeepalive(true) x.Name.SetKeepalive(true)
arg.Left = x arg.Left = x
n.SetNeedsWrapper(true)
} }
} }
@ -927,7 +928,7 @@ func (o *Order) stmt(n *Node) {
n2.Ninit.Append(tmp2) n2.Ninit.Append(tmp2)
} }
r.Left = o.newTemp(r.Right.Left.Type.Elem(), types.Haspointers(r.Right.Left.Type.Elem())) r.Left = o.newTemp(r.Right.Left.Type.Elem(), r.Right.Left.Type.Elem().HasPointers())
tmp2 := nod(OAS, tmp1, r.Left) tmp2 := nod(OAS, tmp1, r.Left)
tmp2 = typecheck(tmp2, ctxStmt) tmp2 = typecheck(tmp2, ctxStmt)
n2.Ninit.Append(tmp2) n2.Ninit.Append(tmp2)
@ -1406,7 +1407,7 @@ func (o *Order) as2(n *Node) {
left := []*Node{} left := []*Node{}
for ni, l := range n.List.Slice() { for ni, l := range n.List.Slice() {
if !l.isBlank() { if !l.isBlank() {
tmp := o.newTemp(l.Type, types.Haspointers(l.Type)) tmp := o.newTemp(l.Type, l.Type.HasPointers())
n.List.SetIndex(ni, tmp) n.List.SetIndex(ni, tmp)
tmplist = append(tmplist, tmp) tmplist = append(tmplist, tmp)
left = append(left, l) left = append(left, l)
@ -1428,7 +1429,7 @@ func (o *Order) okAs2(n *Node) {
var tmp1, tmp2 *Node var tmp1, tmp2 *Node
if !n.List.First().isBlank() { if !n.List.First().isBlank() {
typ := n.Right.Type typ := n.Right.Type
tmp1 = o.newTemp(typ, types.Haspointers(typ)) tmp1 = o.newTemp(typ, typ.HasPointers())
} }
if !n.List.Second().isBlank() { if !n.List.Second().isBlank() {

View file

@ -80,8 +80,8 @@ func cmpstackvarlt(a, b *Node) bool {
return a.Name.Used() return a.Name.Used()
} }
ap := types.Haspointers(a.Type) ap := a.Type.HasPointers()
bp := types.Haspointers(b.Type) bp := b.Type.HasPointers()
if ap != bp { if ap != bp {
return ap return ap
} }
@ -176,7 +176,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
} }
s.stksize += w s.stksize += w
s.stksize = Rnd(s.stksize, int64(n.Type.Align)) s.stksize = Rnd(s.stksize, int64(n.Type.Align))
if types.Haspointers(n.Type) { if n.Type.HasPointers() {
s.stkptrsize = s.stksize s.stkptrsize = s.stksize
lastHasPtr = true lastHasPtr = true
} else { } else {
@ -507,7 +507,7 @@ func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var {
if Ctxt.FixedFrameSize() == 0 { if Ctxt.FixedFrameSize() == 0 {
offs -= int64(Widthptr) offs -= int64(Widthptr)
} }
if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) || objabi.GOARCH == "arm64" { if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
// There is a word space for FP on ARM64 even if the frame pointer is disabled // There is a word space for FP on ARM64 even if the frame pointer is disabled
offs -= int64(Widthptr) offs -= int64(Widthptr)
} }
@ -703,7 +703,7 @@ func stackOffset(slot ssa.LocalSlot) int32 {
if Ctxt.FixedFrameSize() == 0 { if Ctxt.FixedFrameSize() == 0 {
base -= int64(Widthptr) base -= int64(Widthptr)
} }
if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) || objabi.GOARCH == "arm64" { if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
// There is a word space for FP on ARM64 even if the frame pointer is disabled // There is a word space for FP on ARM64 even if the frame pointer is disabled
base -= int64(Widthptr) base -= int64(Widthptr)
} }

View file

@ -20,7 +20,7 @@ func typeWithoutPointers() *types.Type {
func typeWithPointers() *types.Type { func typeWithPointers() *types.Type {
t := types.New(TSTRUCT) t := types.New(TSTRUCT)
f := &types.Field{Type: types.New(TPTR)} f := &types.Field{Type: types.NewPtr(types.New(TINT))}
t.SetFields([]*types.Field{f}) t.SetFields([]*types.Field{f})
return t return t
} }
@ -181,14 +181,6 @@ func TestStackvarSort(t *testing.T) {
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO), nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO), nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO),
} }
// haspointers updates Type.Haspointers as a side effect, so
// exercise this function on all inputs so that reflect.DeepEqual
// doesn't produce false positives.
for i := range want {
types.Haspointers(want[i].Type)
types.Haspointers(inp[i].Type)
}
sort.Sort(byStackVar(inp)) sort.Sort(byStackVar(inp))
if !reflect.DeepEqual(want, inp) { if !reflect.DeepEqual(want, inp) {
t.Error("sort failed") t.Error("sort failed")

View file

@ -259,7 +259,7 @@ func (v *varRegVec) AndNot(v1, v2 varRegVec) {
// nor do we care about empty structs (handled by the pointer check), // nor do we care about empty structs (handled by the pointer check),
// nor do we care about the fake PAUTOHEAP variables. // nor do we care about the fake PAUTOHEAP variables.
func livenessShouldTrack(n *Node) bool { func livenessShouldTrack(n *Node) bool {
return n.Op == ONAME && (n.Class() == PAUTO || n.Class() == PPARAM || n.Class() == PPARAMOUT) && types.Haspointers(n.Type) return n.Op == ONAME && (n.Class() == PAUTO || n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Type.HasPointers()
} }
// getvariables returns the list of on-stack variables that we need to track // getvariables returns the list of on-stack variables that we need to track
@ -436,7 +436,7 @@ func (lv *Liveness) regEffects(v *ssa.Value) (uevar, kill liveRegMask) {
case ssa.LocalSlot: case ssa.LocalSlot:
return mask return mask
case *ssa.Register: case *ssa.Register:
if ptrOnly && !v.Type.HasHeapPointer() { if ptrOnly && !v.Type.HasPointers() {
return mask return mask
} }
regs[0] = loc regs[0] = loc
@ -451,7 +451,7 @@ func (lv *Liveness) regEffects(v *ssa.Value) (uevar, kill liveRegMask) {
if loc1 == nil { if loc1 == nil {
continue continue
} }
if ptrOnly && !v.Type.FieldType(i).HasHeapPointer() { if ptrOnly && !v.Type.FieldType(i).HasPointers() {
continue continue
} }
regs[nreg] = loc1.(*ssa.Register) regs[nreg] = loc1.(*ssa.Register)
@ -568,13 +568,13 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) {
if t.Align > 0 && off&int64(t.Align-1) != 0 { if t.Align > 0 && off&int64(t.Align-1) != 0 {
Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off) Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
} }
if !t.HasPointers() {
// Note: this case ensures that pointers to go:notinheap types
// are not considered pointers by garbage collection and stack copying.
return
}
switch t.Etype { switch t.Etype {
case TINT8, TUINT8, TINT16, TUINT16,
TINT32, TUINT32, TINT64, TUINT64,
TINT, TUINT, TUINTPTR, TBOOL,
TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128:
case TPTR, TUNSAFEPTR, TFUNC, TCHAN, TMAP: case TPTR, TUNSAFEPTR, TFUNC, TCHAN, TMAP:
if off&int64(Widthptr-1) != 0 { if off&int64(Widthptr-1) != 0 {
Fatalf("onebitwalktype1: invalid alignment, %v", t) Fatalf("onebitwalktype1: invalid alignment, %v", t)

View file

@ -334,7 +334,7 @@ func walkrange(n *Node) *Node {
hv1 := temp(t.Elem()) hv1 := temp(t.Elem())
hv1.SetTypecheck(1) hv1.SetTypecheck(1)
if types.Haspointers(t.Elem()) { if t.Elem().HasPointers() {
init = append(init, nod(OAS, hv1, nil)) init = append(init, nod(OAS, hv1, nil))
} }
hb := temp(types.Types[TBOOL]) hb := temp(types.Types[TBOOL])
@ -586,7 +586,7 @@ func arrayClear(n, v1, v2, a *Node) bool {
n.Nbody.Append(nod(OAS, hn, tmp)) n.Nbody.Append(nod(OAS, hn, tmp))
var fn *Node var fn *Node
if a.Type.Elem().HasHeapPointer() { if a.Type.Elem().HasPointers() {
// memclrHasPointers(hp, hn) // memclrHasPointers(hp, hn)
Curfn.Func.setWBPos(stmt.Pos) Curfn.Func.setWBPos(stmt.Pos)
fn = mkcall("memclrHasPointers", nil, nil, hp, hn) fn = mkcall("memclrHasPointers", nil, nil, hp, hn)

View file

@ -119,7 +119,7 @@ func bmap(t *types.Type) *types.Type {
// the type of the overflow field to uintptr in this case. // the type of the overflow field to uintptr in this case.
// See comment on hmap.overflow in runtime/map.go. // See comment on hmap.overflow in runtime/map.go.
otyp := types.NewPtr(bucket) otyp := types.NewPtr(bucket)
if !types.Haspointers(elemtype) && !types.Haspointers(keytype) { if !elemtype.HasPointers() && !keytype.HasPointers() {
otyp = types.Types[TUINTPTR] otyp = types.Types[TUINTPTR]
} }
overflow := makefield("overflow", otyp) overflow := makefield("overflow", otyp)
@ -754,7 +754,7 @@ var kinds = []int{
// typeptrdata returns the length in bytes of the prefix of t // typeptrdata returns the length in bytes of the prefix of t
// containing pointer data. Anything after this offset is scalar data. // containing pointer data. Anything after this offset is scalar data.
func typeptrdata(t *types.Type) int64 { func typeptrdata(t *types.Type) int64 {
if !types.Haspointers(t) { if !t.HasPointers() {
return 0 return 0
} }
@ -788,7 +788,7 @@ func typeptrdata(t *types.Type) int64 {
// Find the last field that has pointers. // Find the last field that has pointers.
var lastPtrField *types.Field var lastPtrField *types.Field
for _, t1 := range t.Fields().Slice() { for _, t1 := range t.Fields().Slice() {
if types.Haspointers(t1.Type) { if t1.Type.HasPointers() {
lastPtrField = t1 lastPtrField = t1
} }
} }
@ -1734,7 +1734,7 @@ func fillptrmask(t *types.Type, ptrmask []byte) {
for i := range ptrmask { for i := range ptrmask {
ptrmask[i] = 0 ptrmask[i] = 0
} }
if !types.Haspointers(t) { if !t.HasPointers() {
return return
} }
@ -1803,7 +1803,7 @@ func (p *GCProg) end() {
func (p *GCProg) emit(t *types.Type, offset int64) { func (p *GCProg) emit(t *types.Type, offset int64) {
dowidth(t) dowidth(t)
if !types.Haspointers(t) { if !t.HasPointers() {
return return
} }
if t.Width == int64(Widthptr) { if t.Width == int64(Widthptr) {

View file

@ -251,10 +251,8 @@ func walkselectcases(cases *Nodes) []*Node {
r = typecheck(r, ctxStmt) r = typecheck(r, ctxStmt)
init = append(init, r) init = append(init, r)
// No initialization for order; runtime.selectgo is responsible for that.
order := temp(types.NewArray(types.Types[TUINT16], 2*int64(ncas))) order := temp(types.NewArray(types.Types[TUINT16], 2*int64(ncas)))
r = nod(OAS, order, nil)
r = typecheck(r, ctxStmt)
init = append(init, r)
var pc0, pcs *Node var pc0, pcs *Node
if flag_race { if flag_race {

View file

@ -295,7 +295,10 @@ func (s *state) emitOpenDeferInfo() {
// worker indicates which of the backend workers is doing the processing. // worker indicates which of the backend workers is doing the processing.
func buildssa(fn *Node, worker int) *ssa.Func { func buildssa(fn *Node, worker int) *ssa.Func {
name := fn.funcname() name := fn.funcname()
printssa := name == ssaDump printssa := false
if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset"
printssa = name == ssaDump || myimportpath+"."+name == ssaDump
}
var astBuf *bytes.Buffer var astBuf *bytes.Buffer
if printssa { if printssa {
astBuf = &bytes.Buffer{} astBuf = &bytes.Buffer{}
@ -329,8 +332,8 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.f.Config = ssaConfig s.f.Config = ssaConfig
s.f.Cache = &ssaCaches[worker] s.f.Cache = &ssaCaches[worker]
s.f.Cache.Reset() s.f.Cache.Reset()
s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH", name)
s.f.Name = name s.f.Name = name
s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH")
s.f.PrintOrHtmlSSA = printssa s.f.PrintOrHtmlSSA = printssa
if fn.Func.Pragma&Nosplit != 0 { if fn.Func.Pragma&Nosplit != 0 {
s.f.NoSplit = true s.f.NoSplit = true
@ -2110,7 +2113,7 @@ func (s *state) expr(n *Node) *ssa.Value {
} }
// unsafe.Pointer <--> *T // unsafe.Pointer <--> *T
if to.Etype == TUNSAFEPTR && from.IsPtrShaped() || from.Etype == TUNSAFEPTR && to.IsPtrShaped() { if to.IsUnsafePtr() && from.IsPtrShaped() || from.IsUnsafePtr() && to.IsPtrShaped() {
return v return v
} }
@ -4207,7 +4210,7 @@ func (s *state) openDeferSave(n *Node, t *types.Type, val *ssa.Value) *ssa.Value
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false) s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.mem(), false) addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.mem(), false)
} }
if types.Haspointers(t) { if t.HasPointers() {
// Since we may use this argTemp during exit depending on the // Since we may use this argTemp during exit depending on the
// deferBits, we must define it unconditionally on entry. // deferBits, we must define it unconditionally on entry.
// Therefore, we must make sure it is zeroed out in the entry // Therefore, we must make sure it is zeroed out in the entry
@ -4309,12 +4312,12 @@ func (s *state) openDeferExit() {
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false) s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
} }
if r.rcvrNode != nil { if r.rcvrNode != nil {
if types.Haspointers(r.rcvrNode.Type) { if r.rcvrNode.Type.HasPointers() {
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false) s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
} }
} }
for _, argNode := range r.argNodes { for _, argNode := range r.argNodes {
if types.Haspointers(argNode.Type) { if argNode.Type.HasPointers() {
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false) s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
} }
} }
@ -4954,7 +4957,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) { func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) {
s.instrument(t, left, true) s.instrument(t, left, true)
if skip == 0 && (!types.Haspointers(t) || ssa.IsStackAddr(left)) { if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) {
// Known to not have write barrier. Store the whole type. // Known to not have write barrier. Store the whole type.
s.vars[&memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt) s.vars[&memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
return return
@ -4966,7 +4969,7 @@ func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask,
// TODO: if the writebarrier pass knows how to reorder stores, // TODO: if the writebarrier pass knows how to reorder stores,
// we can do a single store here as long as skip==0. // we can do a single store here as long as skip==0.
s.storeTypeScalars(t, left, right, skip) s.storeTypeScalars(t, left, right, skip)
if skip&skipPtr == 0 && types.Haspointers(t) { if skip&skipPtr == 0 && t.HasPointers() {
s.storeTypePtrs(t, left, right) s.storeTypePtrs(t, left, right)
} }
} }
@ -5038,7 +5041,7 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
n := t.NumFields() n := t.NumFields()
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
ft := t.FieldType(i) ft := t.FieldType(i)
if !types.Haspointers(ft) { if !ft.HasPointers() {
continue continue
} }
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
@ -6179,7 +6182,7 @@ func genssa(f *ssa.Func, pp *Progs) {
// Resolve branches, and relax DefaultStmt into NotStmt // Resolve branches, and relax DefaultStmt into NotStmt
for _, br := range s.Branches { for _, br := range s.Branches {
br.P.To.Val = s.bstart[br.B.ID] br.P.To.SetTarget(s.bstart[br.B.ID])
if br.P.Pos.IsStmt() != src.PosIsStmt { if br.P.Pos.IsStmt() != src.PosIsStmt {
br.P.Pos = br.P.Pos.WithNotStmt() br.P.Pos = br.P.Pos.WithNotStmt()
} else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt { } else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt {
@ -6863,6 +6866,10 @@ func (e *ssafn) SetWBPos(pos src.XPos) {
e.curfn.Func.setWBPos(pos) e.curfn.Func.setWBPos(pos)
} }
func (e *ssafn) MyImportPath() string {
return myimportpath
}
func (n *Node) Typ() *types.Type { func (n *Node) Typ() *types.Type {
return n.Type return n.Type
} }

View file

@ -271,13 +271,6 @@ func autolabel(prefix string) *types.Sym {
return lookupN(prefix, int(n)) return lookupN(prefix, int(n))
} }
func restrictlookup(name string, pkg *types.Pkg) *types.Sym {
if !types.IsExported(name) && pkg != localpkg {
yyerror("cannot refer to unexported name %s.%s", pkg.Name, name)
}
return pkg.Lookup(name)
}
// find all the exported symbols in package opkg // find all the exported symbols in package opkg
// and make them available in the current package // and make them available in the current package
func importdot(opkg *types.Pkg, pack *Node) { func importdot(opkg *types.Pkg, pack *Node) {
@ -788,12 +781,12 @@ func convertop(srcConstant bool, src, dst *types.Type, why *string) Op {
} }
// 8. src is a pointer or uintptr and dst is unsafe.Pointer. // 8. src is a pointer or uintptr and dst is unsafe.Pointer.
if (src.IsPtr() || src.Etype == TUINTPTR) && dst.Etype == TUNSAFEPTR { if (src.IsPtr() || src.IsUintptr()) && dst.IsUnsafePtr() {
return OCONVNOP return OCONVNOP
} }
// 9. src is unsafe.Pointer and dst is a pointer or uintptr. // 9. src is unsafe.Pointer and dst is a pointer or uintptr.
if src.Etype == TUNSAFEPTR && (dst.IsPtr() || dst.Etype == TUINTPTR) { if src.IsUnsafePtr() && (dst.IsPtr() || dst.IsUintptr()) {
return OCONVNOP return OCONVNOP
} }
@ -1550,7 +1543,6 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
tfn.List.Set(structargs(method.Type.Params(), true)) tfn.List.Set(structargs(method.Type.Params(), true))
tfn.Rlist.Set(structargs(method.Type.Results(), false)) tfn.Rlist.Set(structargs(method.Type.Results(), false))
disableExport(newnam)
fn := dclfunc(newnam, tfn) fn := dclfunc(newnam, tfn)
fn.Func.SetDupok(true) fn.Func.SetDupok(true)
@ -1638,8 +1630,7 @@ func hashmem(t *types.Type) *Node {
sym := Runtimepkg.Lookup("memhash") sym := Runtimepkg.Lookup("memhash")
n := newname(sym) n := newname(sym)
n.SetClass(PFUNC) setNodeNameFunc(n)
n.Sym.SetFunc(true)
n.Type = functype(nil, []*Node{ n.Type = functype(nil, []*Node{
anonfield(types.NewPtr(t)), anonfield(types.NewPtr(t)),
anonfield(types.Types[TUINTPTR]), anonfield(types.Types[TUINTPTR]),

View file

@ -141,19 +141,20 @@ const (
nodeInitorder, _ // tracks state during init1; two bits nodeInitorder, _ // tracks state during init1; two bits
_, _ // second nodeInitorder bit _, _ // second nodeInitorder bit
_, nodeHasBreak _, nodeHasBreak
_, nodeNoInline // used internally by inliner to indicate that a function call should not be inlined; set for OCALLFUNC and OCALLMETH only _, nodeNoInline // used internally by inliner to indicate that a function call should not be inlined; set for OCALLFUNC and OCALLMETH only
_, nodeImplicit // implicit OADDR or ODEREF; ++/-- statement represented as OASOP; or ANDNOT lowered to OAND _, nodeImplicit // implicit OADDR or ODEREF; ++/-- statement represented as OASOP; or ANDNOT lowered to OAND
_, nodeIsDDD // is the argument variadic _, nodeIsDDD // is the argument variadic
_, nodeDiag // already printed error about this _, nodeDiag // already printed error about this
_, nodeColas // OAS resulting from := _, nodeColas // OAS resulting from :=
_, nodeNonNil // guaranteed to be non-nil _, nodeNonNil // guaranteed to be non-nil
_, nodeTransient // storage can be reused immediately after this statement _, nodeTransient // storage can be reused immediately after this statement
_, nodeBounded // bounds check unnecessary _, nodeBounded // bounds check unnecessary
_, nodeHasCall // expression contains a function call _, nodeHasCall // expression contains a function call
_, nodeLikely // if statement condition likely _, nodeLikely // if statement condition likely
_, nodeHasVal // node.E contains a Val _, nodeHasVal // node.E contains a Val
_, nodeHasOpt // node.E contains an Opt _, nodeHasOpt // node.E contains an Opt
_, nodeEmbedded // ODCLFIELD embedded type _, nodeEmbedded // ODCLFIELD embedded type
_, nodeNeedsWrapper // OCALLxxx node that needs to be wrapped
) )
func (n *Node) Class() Class { return Class(n.flags.get3(nodeClass)) } func (n *Node) Class() Class { return Class(n.flags.get3(nodeClass)) }
@ -286,6 +287,20 @@ func (n *Node) SetIota(x int64) {
n.Xoffset = x n.Xoffset = x
} }
func (n *Node) NeedsWrapper() bool {
return n.flags&nodeNeedsWrapper != 0
}
// SetNeedsWrapper indicates that OCALLxxx node needs to be wrapped by a closure.
func (n *Node) SetNeedsWrapper(b bool) {
switch n.Op {
case OCALLFUNC, OCALLMETH, OCALLINTER:
default:
Fatalf("Node.SetNeedsWrapper %v", n.Op)
}
n.flags.set(nodeNeedsWrapper, b)
}
// mayBeShared reports whether n may occur in multiple places in the AST. // mayBeShared reports whether n may occur in multiple places in the AST.
// Extra care must be taken when mutating such a node. // Extra care must be taken when mutating such a node.
func (n *Node) mayBeShared() bool { func (n *Node) mayBeShared() bool {

View file

@ -232,7 +232,11 @@ func walkstmt(n *Node) *Node {
n.Left = copyany(n.Left, &n.Ninit, true) n.Left = copyany(n.Left, &n.Ninit, true)
default: default:
n.Left = walkexpr(n.Left, &n.Ninit) if n.Left.NeedsWrapper() {
n.Left = wrapCall(n.Left, &n.Ninit)
} else {
n.Left = walkexpr(n.Left, &n.Ninit)
}
} }
case OFOR, OFORUNTIL: case OFOR, OFORUNTIL:
@ -381,9 +385,9 @@ func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
switch { switch {
case from.Size() == 2 && from.Align == 2: case from.Size() == 2 && from.Align == 2:
return "convT16", false return "convT16", false
case from.Size() == 4 && from.Align == 4 && !types.Haspointers(from): case from.Size() == 4 && from.Align == 4 && !from.HasPointers():
return "convT32", false return "convT32", false
case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !types.Haspointers(from): case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !from.HasPointers():
return "convT64", false return "convT64", false
} }
if sc := from.SoleComponent(); sc != nil { if sc := from.SoleComponent(); sc != nil {
@ -397,12 +401,12 @@ func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
switch tkind { switch tkind {
case 'E': case 'E':
if !types.Haspointers(from) { if !from.HasPointers() {
return "convT2Enoptr", true return "convT2Enoptr", true
} }
return "convT2E", true return "convT2E", true
case 'I': case 'I':
if !types.Haspointers(from) { if !from.HasPointers() {
return "convT2Inoptr", true return "convT2Inoptr", true
} }
return "convT2I", true return "convT2I", true
@ -954,11 +958,11 @@ opswitch:
case OCONV, OCONVNOP: case OCONV, OCONVNOP:
n.Left = walkexpr(n.Left, init) n.Left = walkexpr(n.Left, init)
if n.Op == OCONVNOP && checkPtr(Curfn, 1) { if n.Op == OCONVNOP && checkPtr(Curfn, 1) {
if n.Type.IsPtr() && n.Left.Type.Etype == TUNSAFEPTR { // unsafe.Pointer to *T if n.Type.IsPtr() && n.Left.Type.IsUnsafePtr() { // unsafe.Pointer to *T
n = walkCheckPtrAlignment(n, init, nil) n = walkCheckPtrAlignment(n, init, nil)
break break
} }
if n.Type.Etype == TUNSAFEPTR && n.Left.Type.Etype == TUINTPTR { // uintptr to unsafe.Pointer if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() { // uintptr to unsafe.Pointer
n = walkCheckPtrArithmetic(n, init) n = walkCheckPtrArithmetic(n, init)
break break
} }
@ -1123,7 +1127,7 @@ opswitch:
n.List.SetSecond(walkexpr(n.List.Second(), init)) n.List.SetSecond(walkexpr(n.List.Second(), init))
case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR: case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
checkSlice := checkPtr(Curfn, 1) && n.Op == OSLICE3ARR && n.Left.Op == OCONVNOP && n.Left.Left.Type.Etype == TUNSAFEPTR checkSlice := checkPtr(Curfn, 1) && n.Op == OSLICE3ARR && n.Left.Op == OCONVNOP && n.Left.Left.Type.IsUnsafePtr()
if checkSlice { if checkSlice {
n.Left.Left = walkexpr(n.Left.Left, init) n.Left.Left = walkexpr(n.Left.Left, init)
} else { } else {
@ -1156,6 +1160,9 @@ opswitch:
} }
case ONEW: case ONEW:
if n.Type.Elem().NotInHeap() {
yyerror("%v is go:notinheap; heap allocation disallowed", n.Type.Elem())
}
if n.Esc == EscNone { if n.Esc == EscNone {
if n.Type.Elem().Width >= maxImplicitStackVarSize { if n.Type.Elem().Width >= maxImplicitStackVarSize {
Fatalf("large ONEW with EscNone: %v", n) Fatalf("large ONEW with EscNone: %v", n)
@ -1324,6 +1331,9 @@ opswitch:
l = r l = r
} }
t := n.Type t := n.Type
if t.Elem().NotInHeap() {
yyerror("%v is go:notinheap; heap allocation disallowed", t.Elem())
}
if n.Esc == EscNone { if n.Esc == EscNone {
if !isSmallMakeSlice(n) { if !isSmallMakeSlice(n) {
Fatalf("non-small OMAKESLICE with EscNone: %v", n) Fatalf("non-small OMAKESLICE with EscNone: %v", n)
@ -1365,10 +1375,6 @@ opswitch:
// When len and cap can fit into int, use makeslice instead of // When len and cap can fit into int, use makeslice instead of
// makeslice64, which is faster and shorter on 32 bit platforms. // makeslice64, which is faster and shorter on 32 bit platforms.
if t.Elem().NotInHeap() {
yyerror("%v is go:notinheap; heap allocation disallowed", t.Elem())
}
len, cap := l, r len, cap := l, r
fnname := "makeslice64" fnname := "makeslice64"
@ -1403,14 +1409,14 @@ opswitch:
t := n.Type t := n.Type
if t.Elem().NotInHeap() { if t.Elem().NotInHeap() {
Fatalf("%v is go:notinheap; heap allocation disallowed", t.Elem()) yyerror("%v is go:notinheap; heap allocation disallowed", t.Elem())
} }
length := conv(n.Left, types.Types[TINT]) length := conv(n.Left, types.Types[TINT])
copylen := nod(OLEN, n.Right, nil) copylen := nod(OLEN, n.Right, nil)
copyptr := nod(OSPTR, n.Right, nil) copyptr := nod(OSPTR, n.Right, nil)
if !types.Haspointers(t.Elem()) && n.Bounded() { if !t.Elem().HasPointers() && n.Bounded() {
// When len(to)==len(from) and elements have no pointers: // When len(to)==len(from) and elements have no pointers:
// replace make+copy with runtime.mallocgc+runtime.memmove. // replace make+copy with runtime.mallocgc+runtime.memmove.
@ -2012,9 +2018,6 @@ func walkprint(nn *Node, init *Nodes) *Node {
} }
func callnew(t *types.Type) *Node { func callnew(t *types.Type) *Node {
if t.NotInHeap() {
yyerror("%v is go:notinheap; heap allocation disallowed", t)
}
dowidth(t) dowidth(t)
n := nod(ONEWOBJ, typename(t), nil) n := nod(ONEWOBJ, typename(t), nil)
n.Type = types.NewPtr(t) n.Type = types.NewPtr(t)
@ -2589,7 +2592,7 @@ func mapfast(t *types.Type) int {
} }
switch algtype(t.Key()) { switch algtype(t.Key()) {
case AMEM32: case AMEM32:
if !t.Key().HasHeapPointer() { if !t.Key().HasPointers() {
return mapfast32 return mapfast32
} }
if Widthptr == 4 { if Widthptr == 4 {
@ -2597,7 +2600,7 @@ func mapfast(t *types.Type) int {
} }
Fatalf("small pointer %v", t.Key()) Fatalf("small pointer %v", t.Key())
case AMEM64: case AMEM64:
if !t.Key().HasHeapPointer() { if !t.Key().HasPointers() {
return mapfast64 return mapfast64
} }
if Widthptr == 8 { if Widthptr == 8 {
@ -2744,7 +2747,7 @@ func appendslice(n *Node, init *Nodes) *Node {
nodes.Append(nod(OAS, s, nt)) nodes.Append(nod(OAS, s, nt))
var ncopy *Node var ncopy *Node
if elemtype.HasHeapPointer() { if elemtype.HasPointers() {
// copy(s[len(l1):], l2) // copy(s[len(l1):], l2)
nptr1 := nod(OSLICE, s, nil) nptr1 := nod(OSLICE, s, nil)
nptr1.Type = s.Type nptr1.Type = s.Type
@ -2865,7 +2868,7 @@ func isAppendOfMake(n *Node) bool {
// s = s[:n] // s = s[:n]
// lptr := &l1[0] // lptr := &l1[0]
// sptr := &s[0] // sptr := &s[0]
// if lptr == sptr || !hasPointers(T) { // if lptr == sptr || !T.HasPointers() {
// // growslice did not clear the whole underlying array (or did not get called) // // growslice did not clear the whole underlying array (or did not get called)
// hp := &s[len(l1)] // hp := &s[len(l1)]
// hn := l2 * sizeof(T) // hn := l2 * sizeof(T)
@ -2946,7 +2949,7 @@ func extendslice(n *Node, init *Nodes) *Node {
hn = conv(hn, types.Types[TUINTPTR]) hn = conv(hn, types.Types[TUINTPTR])
clrname := "memclrNoHeapPointers" clrname := "memclrNoHeapPointers"
hasPointers := types.Haspointers(elemtype) hasPointers := elemtype.HasPointers()
if hasPointers { if hasPointers {
clrname = "memclrHasPointers" clrname = "memclrHasPointers"
Curfn.Func.setWBPos(n.Pos) Curfn.Func.setWBPos(n.Pos)
@ -3082,7 +3085,7 @@ func walkappend(n *Node, init *Nodes, dst *Node) *Node {
// Also works if b is a string. // Also works if b is a string.
// //
func copyany(n *Node, init *Nodes, runtimecall bool) *Node { func copyany(n *Node, init *Nodes, runtimecall bool) *Node {
if n.Left.Type.Elem().HasHeapPointer() { if n.Left.Type.Elem().HasPointers() {
Curfn.Func.setWBPos(n.Pos) Curfn.Func.setWBPos(n.Pos)
fn := writebarrierfn("typedslicecopy", n.Left.Type.Elem(), n.Right.Type.Elem()) fn := writebarrierfn("typedslicecopy", n.Left.Type.Elem(), n.Right.Type.Elem())
n.Left = cheapexpr(n.Left, init) n.Left = cheapexpr(n.Left, init)
@ -3167,8 +3170,7 @@ func eqfor(t *types.Type) (n *Node, needsize bool) {
case ASPECIAL: case ASPECIAL:
sym := typesymprefix(".eq", t) sym := typesymprefix(".eq", t)
n := newname(sym) n := newname(sym)
n.SetClass(PFUNC) setNodeNameFunc(n)
n.Sym.SetFunc(true)
n.Type = functype(nil, []*Node{ n.Type = functype(nil, []*Node{
anonfield(types.NewPtr(t)), anonfield(types.NewPtr(t)),
anonfield(types.NewPtr(t)), anonfield(types.NewPtr(t)),
@ -3859,6 +3861,14 @@ func candiscard(n *Node) bool {
// builtin(a1, a2, a3) // builtin(a1, a2, a3)
// }(x, y, z) // }(x, y, z)
// for print, println, and delete. // for print, println, and delete.
//
// Rewrite
// go f(x, y, uintptr(unsafe.Pointer(z)))
// into
// go func(a1, a2, a3) {
// builtin(a1, a2, uintptr(a3))
// }(x, y, unsafe.Pointer(z))
// for function contains unsafe-uintptr arguments.
var wrapCall_prgen int var wrapCall_prgen int
@ -3870,9 +3880,17 @@ func wrapCall(n *Node, init *Nodes) *Node {
init.AppendNodes(&n.Ninit) init.AppendNodes(&n.Ninit)
} }
isBuiltinCall := n.Op != OCALLFUNC && n.Op != OCALLMETH && n.Op != OCALLINTER
// origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion.
origArgs := make([]*Node, n.List.Len())
t := nod(OTFUNC, nil, nil) t := nod(OTFUNC, nil, nil)
for i, arg := range n.List.Slice() { for i, arg := range n.List.Slice() {
s := lookupN("a", i) s := lookupN("a", i)
if !isBuiltinCall && arg.Op == OCONVNOP && arg.Type.IsUintptr() && arg.Left.Type.IsUnsafePtr() {
origArgs[i] = arg
arg = arg.Left
n.List.SetIndex(i, arg)
}
t.List.Append(symfield(s, arg.Type)) t.List.Append(symfield(s, arg.Type))
} }
@ -3880,10 +3898,22 @@ func wrapCall(n *Node, init *Nodes) *Node {
sym := lookupN("wrap·", wrapCall_prgen) sym := lookupN("wrap·", wrapCall_prgen)
fn := dclfunc(sym, t) fn := dclfunc(sym, t)
a := nod(n.Op, nil, nil) args := paramNnames(t.Type)
a.List.Set(paramNnames(t.Type)) for i, origArg := range origArgs {
a = typecheck(a, ctxStmt) if origArg == nil {
fn.Nbody.Set1(a) continue
}
arg := nod(origArg.Op, args[i], nil)
arg.Type = origArg.Type
args[i] = arg
}
call := nod(n.Op, nil, nil)
if !isBuiltinCall {
call.Op = OCALL
call.Left = n.Left
}
call.List.Set(args)
fn.Nbody.Set1(call)
funcbody() funcbody()
@ -3891,12 +3921,12 @@ func wrapCall(n *Node, init *Nodes) *Node {
typecheckslice(fn.Nbody.Slice(), ctxStmt) typecheckslice(fn.Nbody.Slice(), ctxStmt)
xtop = append(xtop, fn) xtop = append(xtop, fn)
a = nod(OCALL, nil, nil) call = nod(OCALL, nil, nil)
a.Left = fn.Func.Nname call.Left = fn.Func.Nname
a.List.Set(n.List.Slice()) call.List.Set(n.List.Slice())
a = typecheck(a, ctxStmt) call = typecheck(call, ctxStmt)
a = walkexpr(a, init) call = walkexpr(call, init)
return a return call
} }
// substArgTypes substitutes the given list of types for // substArgTypes substitutes the given list of types for
@ -4011,7 +4041,7 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node {
walk(n.Left) walk(n.Left)
} }
case OCONVNOP: case OCONVNOP:
if n.Left.Type.Etype == TUNSAFEPTR { if n.Left.Type.IsUnsafePtr() {
n.Left = cheapexpr(n.Left, init) n.Left = cheapexpr(n.Left, init)
originals = append(originals, convnop(n.Left, types.Types[TUNSAFEPTR])) originals = append(originals, convnop(n.Left, types.Types[TUNSAFEPTR]))
} }

View file

@ -629,23 +629,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
case ssa.OpPPC64MaskIfNotCarry:
r := v.Reg()
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REGZERO
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpPPC64ADDconstForCarry:
r1 := v.Args[0].Reg()
p := s.Prog(v.Op.Asm())
p.Reg = r1
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP // Ignored; this is for the carry effect.
case ssa.OpPPC64NEG, ssa.OpPPC64FNEG, ssa.OpPPC64FSQRT, ssa.OpPPC64FSQRTS, ssa.OpPPC64FFLOOR, ssa.OpPPC64FTRUNC, ssa.OpPPC64FCEIL, case ssa.OpPPC64NEG, ssa.OpPPC64FNEG, ssa.OpPPC64FSQRT, ssa.OpPPC64FSQRTS, ssa.OpPPC64FFLOOR, ssa.OpPPC64FTRUNC, ssa.OpPPC64FCEIL,
ssa.OpPPC64FCTIDZ, ssa.OpPPC64FCTIWZ, ssa.OpPPC64FCFID, ssa.OpPPC64FCFIDS, ssa.OpPPC64FRSP, ssa.OpPPC64CNTLZD, ssa.OpPPC64CNTLZW, ssa.OpPPC64FCTIDZ, ssa.OpPPC64FCTIWZ, ssa.OpPPC64FCFID, ssa.OpPPC64FCFIDS, ssa.OpPPC64FRSP, ssa.OpPPC64CNTLZD, ssa.OpPPC64CNTLZW,
ssa.OpPPC64POPCNTD, ssa.OpPPC64POPCNTW, ssa.OpPPC64POPCNTB, ssa.OpPPC64MFVSRD, ssa.OpPPC64MTVSRD, ssa.OpPPC64FABS, ssa.OpPPC64FNABS, ssa.OpPPC64POPCNTD, ssa.OpPPC64POPCNTW, ssa.OpPPC64POPCNTB, ssa.OpPPC64MFVSRD, ssa.OpPPC64MTVSRD, ssa.OpPPC64FABS, ssa.OpPPC64FNABS,
@ -666,6 +649,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpPPC64SUBFCconst:
p := s.Prog(v.Op.Asm())
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt})
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpPPC64ANDCCconst: case ssa.OpPPC64ANDCCconst:
p := s.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.Reg = v.Args[0].Reg() p.Reg = v.Args[0].Reg()
@ -1802,7 +1793,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("Pseudo-op should not make it to codegen: %s ###\n", v.LongString()) v.Fatalf("Pseudo-op should not make it to codegen: %s ###\n", v.LongString())
case ssa.OpPPC64InvertFlags: case ssa.OpPPC64InvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT, ssa.OpPPC64FlagCarrySet, ssa.OpPPC64FlagCarryClear: case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT:
v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
case ssa.OpClobber: case ssa.OpClobber:
// TODO: implement for clobberdead experiment. Nop is ok for now. // TODO: implement for clobberdead experiment. Nop is ok for now.

View file

@ -338,8 +338,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
n.To.Reg = dividend n.To.Reg = dividend
} }
j.To.Val = n j.To.SetTarget(n)
j2.To.Val = s.Pc() j2.To.SetTarget(s.Pc())
} }
case ssa.OpS390XADDconst, ssa.OpS390XADDWconst: case ssa.OpS390XADDconst, ssa.OpS390XADDWconst:
opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt) opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt)

View file

@ -7,12 +7,14 @@ package ssa
// addressingModes combines address calculations into memory operations // addressingModes combines address calculations into memory operations
// that can perform complicated addressing modes. // that can perform complicated addressing modes.
func addressingModes(f *Func) { func addressingModes(f *Func) {
isInImmediateRange := is32Bit
switch f.Config.arch { switch f.Config.arch {
default: default:
// Most architectures can't do this. // Most architectures can't do this.
return return
case "amd64", "386": case "amd64", "386":
// TODO: s390x? case "s390x":
isInImmediateRange = is20Bit
} }
var tmp []*Value var tmp []*Value
@ -40,7 +42,7 @@ func addressingModes(f *Func) {
switch [2]auxType{opcodeTable[v.Op].auxType, opcodeTable[p.Op].auxType} { switch [2]auxType{opcodeTable[v.Op].auxType, opcodeTable[p.Op].auxType} {
case [2]auxType{auxSymOff, auxInt32}: case [2]auxType{auxSymOff, auxInt32}:
// TODO: introduce auxSymOff32 // TODO: introduce auxSymOff32
if !is32Bit(v.AuxInt + p.AuxInt) { if !isInImmediateRange(v.AuxInt + p.AuxInt) {
continue continue
} }
v.AuxInt += p.AuxInt v.AuxInt += p.AuxInt
@ -48,7 +50,7 @@ func addressingModes(f *Func) {
if v.Aux != nil && p.Aux != nil { if v.Aux != nil && p.Aux != nil {
continue continue
} }
if !is32Bit(v.AuxInt + p.AuxInt) { if !isInImmediateRange(v.AuxInt + p.AuxInt) {
continue continue
} }
if p.Aux != nil { if p.Aux != nil {
@ -398,4 +400,61 @@ var combine = map[[2]Op]Op{
[2]Op{Op386ANDLconstmodify, Op386LEAL4}: Op386ANDLconstmodifyidx4, [2]Op{Op386ANDLconstmodify, Op386LEAL4}: Op386ANDLconstmodifyidx4,
[2]Op{Op386ORLconstmodify, Op386LEAL4}: Op386ORLconstmodifyidx4, [2]Op{Op386ORLconstmodify, Op386LEAL4}: Op386ORLconstmodifyidx4,
[2]Op{Op386XORLconstmodify, Op386LEAL4}: Op386XORLconstmodifyidx4, [2]Op{Op386XORLconstmodify, Op386LEAL4}: Op386XORLconstmodifyidx4,
// s390x
[2]Op{OpS390XMOVDload, OpS390XADD}: OpS390XMOVDloadidx,
[2]Op{OpS390XMOVWload, OpS390XADD}: OpS390XMOVWloadidx,
[2]Op{OpS390XMOVHload, OpS390XADD}: OpS390XMOVHloadidx,
[2]Op{OpS390XMOVBload, OpS390XADD}: OpS390XMOVBloadidx,
[2]Op{OpS390XMOVWZload, OpS390XADD}: OpS390XMOVWZloadidx,
[2]Op{OpS390XMOVHZload, OpS390XADD}: OpS390XMOVHZloadidx,
[2]Op{OpS390XMOVBZload, OpS390XADD}: OpS390XMOVBZloadidx,
[2]Op{OpS390XMOVDBRload, OpS390XADD}: OpS390XMOVDBRloadidx,
[2]Op{OpS390XMOVWBRload, OpS390XADD}: OpS390XMOVWBRloadidx,
[2]Op{OpS390XMOVHBRload, OpS390XADD}: OpS390XMOVHBRloadidx,
[2]Op{OpS390XFMOVDload, OpS390XADD}: OpS390XFMOVDloadidx,
[2]Op{OpS390XFMOVSload, OpS390XADD}: OpS390XFMOVSloadidx,
[2]Op{OpS390XMOVDstore, OpS390XADD}: OpS390XMOVDstoreidx,
[2]Op{OpS390XMOVWstore, OpS390XADD}: OpS390XMOVWstoreidx,
[2]Op{OpS390XMOVHstore, OpS390XADD}: OpS390XMOVHstoreidx,
[2]Op{OpS390XMOVBstore, OpS390XADD}: OpS390XMOVBstoreidx,
[2]Op{OpS390XMOVDBRstore, OpS390XADD}: OpS390XMOVDBRstoreidx,
[2]Op{OpS390XMOVWBRstore, OpS390XADD}: OpS390XMOVWBRstoreidx,
[2]Op{OpS390XMOVHBRstore, OpS390XADD}: OpS390XMOVHBRstoreidx,
[2]Op{OpS390XFMOVDstore, OpS390XADD}: OpS390XFMOVDstoreidx,
[2]Op{OpS390XFMOVSstore, OpS390XADD}: OpS390XFMOVSstoreidx,
[2]Op{OpS390XMOVDload, OpS390XMOVDaddridx}: OpS390XMOVDloadidx,
[2]Op{OpS390XMOVWload, OpS390XMOVDaddridx}: OpS390XMOVWloadidx,
[2]Op{OpS390XMOVHload, OpS390XMOVDaddridx}: OpS390XMOVHloadidx,
[2]Op{OpS390XMOVBload, OpS390XMOVDaddridx}: OpS390XMOVBloadidx,
[2]Op{OpS390XMOVWZload, OpS390XMOVDaddridx}: OpS390XMOVWZloadidx,
[2]Op{OpS390XMOVHZload, OpS390XMOVDaddridx}: OpS390XMOVHZloadidx,
[2]Op{OpS390XMOVBZload, OpS390XMOVDaddridx}: OpS390XMOVBZloadidx,
[2]Op{OpS390XMOVDBRload, OpS390XMOVDaddridx}: OpS390XMOVDBRloadidx,
[2]Op{OpS390XMOVWBRload, OpS390XMOVDaddridx}: OpS390XMOVWBRloadidx,
[2]Op{OpS390XMOVHBRload, OpS390XMOVDaddridx}: OpS390XMOVHBRloadidx,
[2]Op{OpS390XFMOVDload, OpS390XMOVDaddridx}: OpS390XFMOVDloadidx,
[2]Op{OpS390XFMOVSload, OpS390XMOVDaddridx}: OpS390XFMOVSloadidx,
[2]Op{OpS390XMOVDstore, OpS390XMOVDaddridx}: OpS390XMOVDstoreidx,
[2]Op{OpS390XMOVWstore, OpS390XMOVDaddridx}: OpS390XMOVWstoreidx,
[2]Op{OpS390XMOVHstore, OpS390XMOVDaddridx}: OpS390XMOVHstoreidx,
[2]Op{OpS390XMOVBstore, OpS390XMOVDaddridx}: OpS390XMOVBstoreidx,
[2]Op{OpS390XMOVDBRstore, OpS390XMOVDaddridx}: OpS390XMOVDBRstoreidx,
[2]Op{OpS390XMOVWBRstore, OpS390XMOVDaddridx}: OpS390XMOVWBRstoreidx,
[2]Op{OpS390XMOVHBRstore, OpS390XMOVDaddridx}: OpS390XMOVHBRstoreidx,
[2]Op{OpS390XFMOVDstore, OpS390XMOVDaddridx}: OpS390XFMOVDstoreidx,
[2]Op{OpS390XFMOVSstore, OpS390XMOVDaddridx}: OpS390XFMOVSstoreidx,
} }

View file

@ -171,10 +171,10 @@ func checkFunc(f *Func) {
canHaveAuxInt = true canHaveAuxInt = true
canHaveAux = true canHaveAux = true
case auxCCop: case auxCCop:
if _, ok := v.Aux.(Op); !ok { if opcodeTable[Op(v.AuxInt)].name == "OpInvalid" {
f.Fatalf("bad type %T for CCop in %v", v.Aux, v) f.Fatalf("value %v has an AuxInt value that is a valid opcode", v)
} }
canHaveAux = true canHaveAuxInt = true
case auxS390XCCMask: case auxS390XCCMask:
if _, ok := v.Aux.(s390x.CCMask); !ok { if _, ok := v.Aux.(s390x.CCMask); !ok {
f.Fatalf("bad type %T for S390XCCMask in %v", v.Aux, v) f.Fatalf("bad type %T for S390XCCMask in %v", v.Aux, v)

View file

@ -173,6 +173,9 @@ type Frontend interface {
// SetWBPos indicates that a write barrier has been inserted // SetWBPos indicates that a write barrier has been inserted
// in this function at position pos. // in this function at position pos.
SetWBPos(pos src.XPos) SetWBPos(pos src.XPos)
// MyImportPath provides the import name (roughly, the package) for the function being compiled.
MyImportPath() string
} }
// interface used to hold a *gc.Node (a stack variable). // interface used to hold a *gc.Node (a stack variable).

View file

@ -23,9 +23,11 @@ func decomposeBuiltIn(f *Func) {
} }
// Decompose other values // Decompose other values
applyRewrite(f, rewriteBlockdec, rewriteValuedec) // Note: deadcode is false because we need to keep the original
// values around so the name component resolution below can still work.
applyRewrite(f, rewriteBlockdec, rewriteValuedec, leaveDeadValues)
if f.Config.RegSize == 4 { if f.Config.RegSize == 4 {
applyRewrite(f, rewriteBlockdec64, rewriteValuedec64) applyRewrite(f, rewriteBlockdec64, rewriteValuedec64, leaveDeadValues)
} }
// Split up named values into their components. // Split up named values into their components.
@ -139,7 +141,7 @@ func decomposeStringPhi(v *Value) {
func decomposeSlicePhi(v *Value) { func decomposeSlicePhi(v *Value) {
types := &v.Block.Func.Config.Types types := &v.Block.Func.Config.Types
ptrType := types.BytePtr ptrType := v.Type.Elem().PtrTo()
lenType := types.Int lenType := types.Int
ptr := v.Block.NewValue0(v.Pos, OpPhi, ptrType) ptr := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
@ -215,7 +217,7 @@ func decomposeInterfacePhi(v *Value) {
} }
func decomposeArgs(f *Func) { func decomposeArgs(f *Func) {
applyRewrite(f, rewriteBlockdecArgs, rewriteValuedecArgs) applyRewrite(f, rewriteBlockdecArgs, rewriteValuedecArgs, removeDeadValues)
} }
func decomposeUser(f *Func) { func decomposeUser(f *Func) {

View file

@ -146,6 +146,10 @@ func (d DummyFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t
func (d DummyFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) } func (d DummyFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) }
func (d DummyFrontend) Debug_checknil() bool { return false } func (d DummyFrontend) Debug_checknil() bool { return false }
func (d DummyFrontend) MyImportPath() string {
return "my/import/path"
}
var dummyTypes Types var dummyTypes Types
func init() { func init() {

View file

@ -257,6 +257,49 @@ func (f *Func) LogStat(key string, args ...interface{}) {
f.Warnl(f.Entry.Pos, "\t%s\t%s%s\t%s", n, key, value, f.Name) f.Warnl(f.Entry.Pos, "\t%s\t%s%s\t%s", n, key, value, f.Name)
} }
// unCacheLine removes v from f's constant cache "line" for aux,
// resets v.InCache when it is found (and removed),
// and returns whether v was found in that line.
func (f *Func) unCacheLine(v *Value, aux int64) bool {
vv := f.constants[aux]
for i, cv := range vv {
if v == cv {
vv[i] = vv[len(vv)-1]
vv[len(vv)-1] = nil
f.constants[aux] = vv[0 : len(vv)-1]
v.InCache = false
return true
}
}
return false
}
// unCache removes v from f's constant cache.
func (f *Func) unCache(v *Value) {
if v.InCache {
aux := v.AuxInt
if f.unCacheLine(v, aux) {
return
}
if aux == 0 {
switch v.Op {
case OpConstNil:
aux = constNilMagic
case OpConstSlice:
aux = constSliceMagic
case OpConstString:
aux = constEmptyStringMagic
case OpConstInterface:
aux = constInterfaceMagic
}
if aux != 0 && f.unCacheLine(v, aux) {
return
}
}
f.Fatalf("unCached value %s not found in cache, auxInt=0x%x, adjusted aux=0x%x", v.LongString(), v.AuxInt, aux)
}
}
// freeValue frees a value. It must no longer be referenced or have any args. // freeValue frees a value. It must no longer be referenced or have any args.
func (f *Func) freeValue(v *Value) { func (f *Func) freeValue(v *Value) {
if v.Block == nil { if v.Block == nil {
@ -270,19 +313,8 @@ func (f *Func) freeValue(v *Value) {
} }
// Clear everything but ID (which we reuse). // Clear everything but ID (which we reuse).
id := v.ID id := v.ID
if v.InCache {
// Values with zero arguments and OpOffPtr values might be cached, so remove them there. f.unCache(v)
nArgs := opcodeTable[v.Op].argLen
if nArgs == 0 || v.Op == OpOffPtr {
vv := f.constants[v.AuxInt]
for i, cv := range vv {
if v == cv {
vv[i] = vv[len(vv)-1]
vv[len(vv)-1] = nil
f.constants[v.AuxInt] = vv[0 : len(vv)-1]
break
}
}
} }
*v = Value{} *v = Value{}
v.ID = id v.ID = id
@ -548,6 +580,7 @@ func (f *Func) constVal(op Op, t *types.Type, c int64, setAuxInt bool) *Value {
v = f.Entry.NewValue0(src.NoXPos, op, t) v = f.Entry.NewValue0(src.NoXPos, op, t)
} }
f.constants[c] = append(vv, v) f.constants[c] = append(vv, v)
v.InCache = true
return v return v
} }
@ -678,7 +711,8 @@ func (f *Func) invalidateCFG() {
// GSHS_LOGFILE // GSHS_LOGFILE
// or standard out if that is empty or there is an error // or standard out if that is empty or there is an error
// opening the file. // opening the file.
func (f *Func) DebugHashMatch(evname, name string) bool { func (f *Func) DebugHashMatch(evname string) bool {
name := f.fe.MyImportPath() + "." + f.Name
evhash := os.Getenv(evname) evhash := os.Getenv(evname)
switch evhash { switch evhash {
case "": case "":
@ -727,7 +761,7 @@ func (f *Func) logDebugHashMatch(evname, name string) {
file = os.Stdout file = os.Stdout
if tmpfile := os.Getenv("GSHS_LOGFILE"); tmpfile != "" { if tmpfile := os.Getenv("GSHS_LOGFILE"); tmpfile != "" {
var err error var err error
file, err = os.Create(tmpfile) file, err = os.OpenFile(tmpfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil { if err != nil {
f.Fatalf("could not open hash-testing logfile %s", tmpfile) f.Fatalf("could not open hash-testing logfile %s", tmpfile)
} }

File diff suppressed because it is too large Load diff

View file

@ -1138,7 +1138,7 @@
// ((x>>8) | (x<<8)) -> (REV16 x), the type of x is uint16, "|" can also be "^" or "+". // ((x>>8) | (x<<8)) -> (REV16 x), the type of x is uint16, "|" can also be "^" or "+".
// UBFX instruction is supported by ARMv6T2, ARMv7 and above versions, REV16 is supported by // UBFX instruction is supported by ARMv6T2, ARMv7 and above versions, REV16 is supported by
// ARMv6 and above versions. So for ARMv6, we need to match SLLconst, SRLconst and ORshiftLL. // ARMv6 and above versions. So for ARMv6, we need to match SLLconst, SRLconst and ORshiftLL.
((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (BFXU <typ.UInt16> [armBFAuxInt(8, 8)] x) x) -> (REV16 x) ((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x) => (REV16 x)
((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && objabi.GOARM>=6 -> (REV16 x) ((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && objabi.GOARM>=6 -> (REV16 x)
// use indexed loads and stores // use indexed loads and stores

File diff suppressed because it is too large Load diff

View file

@ -467,8 +467,8 @@ func init() {
// conditional instructions; auxint is // conditional instructions; auxint is
// one of the arm64 comparison pseudo-ops (LessThan, LessThanU, etc.) // one of the arm64 comparison pseudo-ops (LessThan, LessThanU, etc.)
{name: "CSEL", argLength: 3, reg: gp2flags1, asm: "CSEL", aux: "CCop"}, // aux(flags) ? arg0 : arg1 {name: "CSEL", argLength: 3, reg: gp2flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : arg1
{name: "CSEL0", argLength: 2, reg: gp1flags1, asm: "CSEL", aux: "CCop"}, // aux(flags) ? arg0 : 0 {name: "CSEL0", argLength: 2, reg: gp1flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : 0
// function calls // function calls
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem

View file

@ -11,8 +11,8 @@
(Mul(64|32|16|8) x y) => (Select1 (MULVU x y)) (Mul(64|32|16|8) x y) => (Select1 (MULVU x y))
(Mul(32|64)F ...) => (MUL(F|D) ...) (Mul(32|64)F ...) => (MUL(F|D) ...)
(Mul64uhilo ...) => (MULVU ...) (Mul64uhilo ...) => (MULVU ...)
(Select0 (Mul64uover x y)) -> (Select1 <typ.UInt64> (MULVU x y)) (Select0 (Mul64uover x y)) => (Select1 <typ.UInt64> (MULVU x y))
(Select1 (Mul64uover x y)) -> (SGTU <typ.Bool> (Select0 <typ.UInt64> (MULVU x y)) (MOVVconst <typ.UInt64> [0])) (Select1 (Mul64uover x y)) => (SGTU <typ.Bool> (Select0 <typ.UInt64> (MULVU x y)) (MOVVconst <typ.UInt64> [0]))
(Hmul64 x y) => (Select0 (MULV x y)) (Hmul64 x y) => (Select0 (MULV x y))
(Hmul64u x y) => (Select0 (MULVU x y)) (Hmul64u x y) => (Select0 (MULVU x y))
@ -38,8 +38,8 @@
(Mod8 x y) => (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y))) (Mod8 x y) => (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
(Mod8u x y) => (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) (Mod8u x y) => (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
// (x + y) / 2 with x>=y -> (x - y) / 2 + y // (x + y) / 2 with x>=y => (x - y) / 2 + y
(Avg64u <t> x y) -> (ADDV (SRLVconst <t> (SUBV <t> x y) [1]) y) (Avg64u <t> x y) => (ADDV (SRLVconst <t> (SUBV <t> x y) [1]) y)
(And(64|32|16|8) ...) => (AND ...) (And(64|32|16|8) ...) => (AND ...)
(Or(64|32|16|8) ...) => (OR ...) (Or(64|32|16|8) ...) => (OR ...)
@ -130,10 +130,10 @@
(Not x) => (XORconst [1] x) (Not x) => (XORconst [1] x)
// constants // constants
(Const(64|32|16|8) ...) -> (MOVVconst ...) (Const(64|32|16|8) [val]) => (MOVVconst [int64(val)])
(Const(32|64)F ...) -> (MOV(F|D)const ...) (Const(32|64)F [val]) => (MOV(F|D)const [float64(val)])
(ConstNil) => (MOVVconst [0]) (ConstNil) => (MOVVconst [0])
(ConstBool ...) -> (MOVVconst ...) (ConstBool [b]) => (MOVVconst [int64(b2i(b))])
(Slicemask <t> x) => (SRAVconst (NEGV <t> x) [63]) (Slicemask <t> x) => (SRAVconst (NEGV <t> x) [63])
@ -161,7 +161,7 @@
(SignExt16to64 ...) => (MOVHreg ...) (SignExt16to64 ...) => (MOVHreg ...)
(SignExt32to64 ...) => (MOVWreg ...) (SignExt32to64 ...) => (MOVWreg ...)
// float <-> int conversion // float <=> int conversion
(Cvt32to32F ...) => (MOVWF ...) (Cvt32to32F ...) => (MOVWF ...)
(Cvt32to64F ...) => (MOVWD ...) (Cvt32to64F ...) => (MOVWD ...)
(Cvt64to32F ...) => (MOVVF ...) (Cvt64to32F ...) => (MOVVF ...)
@ -214,11 +214,11 @@
(Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))) (Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
(Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y)) (Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y))
(OffPtr [off] ptr:(SP)) -> (MOVVaddr [off] ptr) (OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVVaddr [int32(off)] ptr)
(OffPtr [off] ptr) -> (ADDVconst [off] ptr) (OffPtr [off] ptr) => (ADDVconst [off] ptr)
(Addr ...) -> (MOVVaddr ...) (Addr {sym} base) => (MOVVaddr {sym} base)
(LocalAddr {sym} base _) -> (MOVVaddr {sym} base) (LocalAddr {sym} base _) => (MOVVaddr {sym} base)
// loads // loads
(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem) (Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
@ -380,24 +380,17 @@
(InterCall ...) => (CALLinter ...) (InterCall ...) => (CALLinter ...)
// atomic intrinsics // atomic intrinsics
(AtomicLoad8 ...) -> (LoweredAtomicLoad8 ...) (AtomicLoad(8|32|64) ...) => (LoweredAtomicLoad(8|32|64) ...)
(AtomicLoad32 ...) -> (LoweredAtomicLoad32 ...) (AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...)
(AtomicLoad64 ...) -> (LoweredAtomicLoad64 ...)
(AtomicLoadPtr ...) -> (LoweredAtomicLoad64 ...)
(AtomicStore8 ...) -> (LoweredAtomicStore8 ...) (AtomicStore(8|32|64) ...) => (LoweredAtomicStore(8|32|64) ...)
(AtomicStore32 ...) -> (LoweredAtomicStore32 ...) (AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...)
(AtomicStore64 ...) -> (LoweredAtomicStore64 ...)
(AtomicStorePtrNoWB ...) -> (LoweredAtomicStore64 ...)
(AtomicExchange32 ...) -> (LoweredAtomicExchange32 ...) (AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
(AtomicExchange64 ...) -> (LoweredAtomicExchange64 ...)
(AtomicAdd32 ...) -> (LoweredAtomicAdd32 ...) (AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
(AtomicAdd64 ...) -> (LoweredAtomicAdd64 ...)
(AtomicCompareAndSwap32 ...) -> (LoweredAtomicCas32 ...) (AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64) ...)
(AtomicCompareAndSwap64 ...) -> (LoweredAtomicCas64 ...)
// checks // checks
(NilCheck ...) => (LoweredNilCheck ...) (NilCheck ...) => (LoweredNilCheck ...)
@ -444,69 +437,69 @@
(EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no) (EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no)
// fold offset into address // fold offset into address
(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) -> (MOVVaddr [off1+off2] {sym} ptr) (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr)
// fold address into load/store // fold address into load/store
(MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload [off1+off2] {sym} ptr mem) (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBload [off1+int32(off2)] {sym} ptr mem)
(MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBUload [off1+off2] {sym} ptr mem) (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBUload [off1+int32(off2)] {sym} ptr mem)
(MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHload [off1+off2] {sym} ptr mem) (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHload [off1+int32(off2)] {sym} ptr mem)
(MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHUload [off1+off2] {sym} ptr mem) (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHUload [off1+int32(off2)] {sym} ptr mem)
(MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload [off1+off2] {sym} ptr mem) (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWload [off1+int32(off2)] {sym} ptr mem)
(MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWUload [off1+off2] {sym} ptr mem) (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWUload [off1+int32(off2)] {sym} ptr mem)
(MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVVload [off1+off2] {sym} ptr mem) (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVload [off1+int32(off2)] {sym} ptr mem)
(MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVFload [off1+off2] {sym} ptr mem) (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVFload [off1+int32(off2)] {sym} ptr mem)
(MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVDload [off1+off2] {sym} ptr mem) (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDload [off1+int32(off2)] {sym} ptr mem)
(MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem) (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
(MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVHstore [off1+off2] {sym} ptr val mem) (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
(MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem) (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
(MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVVstore [off1+off2] {sym} ptr val mem) (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVVstore [off1+int32(off2)] {sym} ptr val mem)
(MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVFstore [off1+off2] {sym} ptr val mem) (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem)
(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} ptr val mem) (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBstorezero [off1+off2] {sym} ptr mem) (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHstorezero [off1+off2] {sym} ptr mem) (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWstorezero [off1+off2] {sym} ptr mem) (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVVstorezero [off1+off2] {sym} ptr mem) (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem)
(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVBload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVBUload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVHload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVHUload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVWload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVWUload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVVload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVVload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVFload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVDload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (MOVBstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (MOVHstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (MOVWstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVVstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (MOVVstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (MOVFstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (MOVDstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVBstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVHstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVWstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVVstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVVstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
// store zero // store zero
(MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem) (MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
@ -643,10 +636,9 @@
(MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))]) (MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))])
(MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))]) (MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))])
(MOVVreg (MOVVconst [c])) => (MOVVconst [c]) (MOVVreg (MOVVconst [c])) => (MOVVconst [c])
(LoweredAtomicStore32 ptr (MOVVconst [0]) mem) -> (LoweredAtomicStorezero32 ptr mem) (LoweredAtomicStore(32|64) ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero(32|64) ptr mem)
(LoweredAtomicStore64 ptr (MOVVconst [0]) mem) -> (LoweredAtomicStorezero64 ptr mem) (LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [int32(c)] ptr mem)
(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) -> (LoweredAtomicAddconst32 [c] ptr mem) (LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst64 [c] ptr mem)
(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) -> (LoweredAtomicAddconst64 [c] ptr mem)
// constant comparisons // constant comparisons
(SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1]) (SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1])

View file

@ -110,13 +110,21 @@
// Rotate generation with non-const shift // Rotate generation with non-const shift
// these match patterns from math/bits/RotateLeft[32|64], but there could be others // these match patterns from math/bits/RotateLeft[32|64], but there could be others
(ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y) (ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
(ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y) ( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
(XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y) (XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
(XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
(ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
(ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y) (ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y) ( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
(XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
(XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y) (XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
// Lowering rotates // Lowering rotates
(RotateLeft32 x y) => (ROTLW x y) (RotateLeft32 x y) => (ROTLW x y)
(RotateLeft64 x y) => (ROTL x y) (RotateLeft64 x y) => (ROTL x y)
@ -192,11 +200,15 @@
(Rsh64Ux64 x (AND y (MOVDconst [63]))) => (SRD x (ANDconst <typ.Int64> [63] y)) (Rsh64Ux64 x (AND y (MOVDconst [63]))) => (SRD x (ANDconst <typ.Int64> [63] y))
(Rsh64Ux64 x (ANDconst <typ.UInt> [63] y)) => (SRD x (ANDconst <typ.UInt> [63] y)) (Rsh64Ux64 x (ANDconst <typ.UInt> [63] y)) => (SRD x (ANDconst <typ.UInt> [63] y))
(Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) => (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) => (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
(Rsh64Ux64 x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))) => (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
(Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) => (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) => (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
(Rsh64Ux64 x (SUBFCconst <typ.UInt> [64] (AND <typ.UInt> y (MOVDconst [63])))) => (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
(Rsh64x64 x (AND y (MOVDconst [63]))) => (SRAD x (ANDconst <typ.Int64> [63] y)) (Rsh64x64 x (AND y (MOVDconst [63]))) => (SRAD x (ANDconst <typ.Int64> [63] y))
(Rsh64x64 x (ANDconst <typ.UInt> [63] y)) => (SRAD x (ANDconst <typ.UInt> [63] y)) (Rsh64x64 x (ANDconst <typ.UInt> [63] y)) => (SRAD x (ANDconst <typ.UInt> [63] y))
(Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) => (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) => (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
(Rsh64x64 x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))) => (SRAD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
(Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) => (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) => (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
(Rsh64x64 x (SUBFCconst <typ.UInt> [64] (AND <typ.UInt> y (MOVDconst [63])))) => (SRAD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
(Lsh64x64 x y) => (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64])))) (Lsh64x64 x y) => (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
(Rsh64x64 x y) => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64])))) (Rsh64x64 x y) => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
@ -208,12 +220,16 @@
(Rsh32Ux64 x (AND y (MOVDconst [31]))) => (SRW x (ANDconst <typ.Int32> [31] y)) (Rsh32Ux64 x (AND y (MOVDconst [31]))) => (SRW x (ANDconst <typ.Int32> [31] y))
(Rsh32Ux64 x (ANDconst <typ.UInt> [31] y)) => (SRW x (ANDconst <typ.UInt> [31] y)) (Rsh32Ux64 x (ANDconst <typ.UInt> [31] y)) => (SRW x (ANDconst <typ.UInt> [31] y))
(Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) => (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) => (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
(Rsh32Ux64 x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))) => (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
(Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) => (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) => (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
(Rsh32Ux64 x (SUBFCconst <typ.UInt> [32] (AND <typ.UInt> y (MOVDconst [31])))) => (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
(Rsh32x64 x (AND y (MOVDconst [31]))) => (SRAW x (ANDconst <typ.Int32> [31] y)) (Rsh32x64 x (AND y (MOVDconst [31]))) => (SRAW x (ANDconst <typ.Int32> [31] y))
(Rsh32x64 x (ANDconst <typ.UInt> [31] y)) => (SRAW x (ANDconst <typ.UInt> [31] y)) (Rsh32x64 x (ANDconst <typ.UInt> [31] y)) => (SRAW x (ANDconst <typ.UInt> [31] y))
(Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) => (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) => (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
(Rsh32x64 x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))) => (SRAW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
(Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) => (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) => (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
(Rsh32x64 x (SUBFCconst <typ.UInt> [32] (AND <typ.UInt> y (MOVDconst [31])))) => (SRAW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
(Rsh32x64 x y) => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32])))) (Rsh32x64 x y) => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
(Rsh32Ux64 x y) => (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32])))) (Rsh32Ux64 x y) => (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
@ -276,18 +292,11 @@
(Rsh8Ux8 x y) => (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8])))) (Rsh8Ux8 x y) => (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
(Lsh8x8 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8])))) (Lsh8x8 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
// Cleaning up shift ops when input is masked // Cleaning up shift ops
(MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && int64(c) + d < 0 => (MOVDconst [-1])
(ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPU (ANDconst [d] y) (MOVDconst [c]))) && c >= d => (ANDconst [d] y) (ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPU (ANDconst [d] y) (MOVDconst [c]))) && c >= d => (ANDconst [d] y)
(ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPUconst [c] (ANDconst [d] y))) && c >= d => (ANDconst [d] y) (ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPUconst [c] (ANDconst [d] y))) && c >= d => (ANDconst [d] y)
(ORN x (MOVDconst [-1])) => x (ORN x (MOVDconst [-1])) => x
(ADDconstForCarry [c] (MOVDconst [d])) && c < 0 && (c < 0 || int64(c) + d >= 0) => (FlagCarryClear)
(ADDconstForCarry [c] (MOVDconst [d])) && c < 0 && c >= 0 && int64(c) + d < 0 => (FlagCarrySet)
(MaskIfNotCarry (FlagCarrySet)) => (MOVDconst [0])
(MaskIfNotCarry (FlagCarryClear)) => (MOVDconst [-1])
(S(RAD|RD|LD) x (MOVDconst [c])) => (S(RAD|RD|LD)const [c&63 | (c>>6&1*63)] x) (S(RAD|RD|LD) x (MOVDconst [c])) => (S(RAD|RD|LD)const [c&63 | (c>>6&1*63)] x)
(S(RAW|RW|LW) x (MOVDconst [c])) => (S(RAW|RW|LW)const [c&31 | (c>>5&1*31)] x) (S(RAW|RW|LW) x (MOVDconst [c])) => (S(RAW|RW|LW)const [c&31 | (c>>5&1*31)] x)
@ -306,8 +315,8 @@
(Ctz16 x) => (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x))) (Ctz16 x) => (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x)))
(Ctz8 x) => (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x))) (Ctz8 x) => (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x)))
(BitLen64 x) => (SUB (MOVDconst [64]) (CNTLZD <typ.Int> x)) (BitLen64 x) => (SUBFCconst [64] (CNTLZD <typ.Int> x))
(BitLen32 x) => (SUB (MOVDconst [32]) (CNTLZW <typ.Int> x)) (BitLen32 x) => (SUBFCconst [32] (CNTLZW <typ.Int> x))
(PopCount64 ...) => (POPCNTD ...) (PopCount64 ...) => (POPCNTD ...)
(PopCount32 x) => (POPCNTW (MOVWZreg x)) (PopCount32 x) => (POPCNTW (MOVWZreg x))
@ -777,10 +786,19 @@
(ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) => (ADDconst [c+d] x) (ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) => (ADDconst [c+d] x)
(ADDconst [0] x) => x (ADDconst [0] x) => x
(SUB x (MOVDconst [c])) && is32Bit(-c) => (ADDconst [-c] x) (SUB x (MOVDconst [c])) && is32Bit(-c) => (ADDconst [-c] x)
// TODO deal with subtract-from-const
(ADDconst [c] (MOVDaddr [d] {sym} x)) && is32Bit(c+int64(d)) => (MOVDaddr [int32(c+int64(d))] {sym} x) (ADDconst [c] (MOVDaddr [d] {sym} x)) && is32Bit(c+int64(d)) => (MOVDaddr [int32(c+int64(d))] {sym} x)
// Subtract from (with carry, but ignored) constant.
// Note, these clobber the carry bit.
(SUB (MOVDconst [c]) x) && is32Bit(c) => (SUBFCconst [c] x)
(SUBFCconst [c] (NEG x)) => (ADDconst [c] x)
(SUBFCconst [c] (SUBFCconst [d] x)) && is32Bit(c-d) => (ADDconst [c-d] x)
(SUBFCconst [0] x) => (NEG x)
(ADDconst [c] (SUBFCconst [d] x)) && is32Bit(c+d) => (SUBFCconst [c+d] x)
(NEG (ADDconst [c] x)) && is32Bit(-c) => (SUBFCconst [-c] x)
(NEG (SUBFCconst [c] x)) && is32Bit(-c) => (ADDconst [-c] x)
// Use register moves instead of stores and loads to move int<=>float values // Use register moves instead of stores and loads to move int<=>float values
// Common with math Float64bits, Float64frombits // Common with math Float64bits, Float64frombits
(MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) => (MFVSRD x) (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) => (MFVSRD x)

View file

@ -175,6 +175,7 @@ func init() {
{name: "FADD", argLength: 2, reg: fp21, asm: "FADD", commutative: true}, // arg0+arg1 {name: "FADD", argLength: 2, reg: fp21, asm: "FADD", commutative: true}, // arg0+arg1
{name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0+arg1 {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0+arg1
{name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0-arg1 {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0-arg1
{name: "SUBFCconst", argLength: 1, reg: gp11, asm: "SUBC", aux: "Int64"}, // auxInt - arg0 (with carry)
{name: "FSUB", argLength: 2, reg: fp21, asm: "FSUB"}, // arg0-arg1 {name: "FSUB", argLength: 2, reg: fp21, asm: "FSUB"}, // arg0-arg1
{name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0-arg1 {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0-arg1
@ -206,9 +207,7 @@ func init() {
{name: "ROTL", argLength: 2, reg: gp21, asm: "ROTL"}, // arg0 rotate left by arg1 mod 64 {name: "ROTL", argLength: 2, reg: gp21, asm: "ROTL"}, // arg0 rotate left by arg1 mod 64
{name: "ROTLW", argLength: 2, reg: gp21, asm: "ROTLW"}, // uint32(arg0) rotate left by arg1 mod 32 {name: "ROTLW", argLength: 2, reg: gp21, asm: "ROTLW"}, // uint32(arg0) rotate left by arg1 mod 32
{name: "LoweredAdd64Carry", argLength: 3, reg: gp32, resultNotInArgs: true}, // arg0 + arg1 + carry, returns (sum, carry) {name: "LoweredAdd64Carry", argLength: 3, reg: gp32, resultNotInArgs: true}, // arg0 + arg1 + carry, returns (sum, carry)
{name: "ADDconstForCarry", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, aux: "Int16", asm: "ADDC", typ: "Flags"}, // _, carry := arg0 + auxint
{name: "MaskIfNotCarry", argLength: 1, reg: crgp, asm: "ADDME", typ: "Int64"}, // carry - 1 (if carry then 0 else -1)
{name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 64, 64 bit width {name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 64, 64 bit width
{name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 32, 32 bit width {name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 32, 32 bit width
@ -674,11 +673,9 @@ func init() {
// These ops are for temporary use by rewrite rules. They // These ops are for temporary use by rewrite rules. They
// cannot appear in the generated assembly. // cannot appear in the generated assembly.
{name: "FlagEQ"}, // equal {name: "FlagEQ"}, // equal
{name: "FlagLT"}, // signed < or unsigned < {name: "FlagLT"}, // signed < or unsigned <
{name: "FlagGT"}, // signed > or unsigned > {name: "FlagGT"}, // signed > or unsigned >
{name: "FlagCarrySet"}, // carry flag set
{name: "FlagCarryClear"}, // carry flag clear
} }
blocks := []blockData{ blocks := []blockData{

File diff suppressed because it is too large Load diff

View file

@ -377,6 +377,10 @@
(I64Ne (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Ne y (I64Const [x])) (I64Ne (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Ne y (I64Const [x]))
(I64Eq x (I64Const [0])) => (I64Eqz x) (I64Eq x (I64Const [0])) => (I64Eqz x)
(I64LtU (I64Const [0]) x) => (I64Eqz (I64Eqz x))
(I64LeU x (I64Const [0])) => (I64Eqz x)
(I64LtU x (I64Const [1])) => (I64Eqz x)
(I64LeU (I64Const [1]) x) => (I64Eqz (I64Eqz x))
(I64Ne x (I64Const [0])) => (I64Eqz (I64Eqz x)) (I64Ne x (I64Const [0])) => (I64Eqz (I64Eqz x))
(I64Add x (I64Const [y])) => (I64AddConst [y] x) (I64Add x (I64Const [y])) => (I64AddConst [y] x)

View file

@ -66,14 +66,14 @@
(Load <typ.Int> (Load <typ.Int>
(OffPtr <typ.IntPtr> [2*config.PtrSize] ptr) (OffPtr <typ.IntPtr> [2*config.PtrSize] ptr)
mem)) mem))
(Store dst (SliceMake ptr len cap) mem) => (Store {t} dst (SliceMake ptr len cap) mem) =>
(Store {typ.Int} (Store {typ.Int}
(OffPtr <typ.IntPtr> [2*config.PtrSize] dst) (OffPtr <typ.IntPtr> [2*config.PtrSize] dst)
cap cap
(Store {typ.Int} (Store {typ.Int}
(OffPtr <typ.IntPtr> [config.PtrSize] dst) (OffPtr <typ.IntPtr> [config.PtrSize] dst)
len len
(Store {typ.BytePtr} dst ptr mem))) (Store {t.Elem().PtrTo()} dst ptr mem)))
// interface ops // interface ops
(ITab (IMake itab _)) => itab (ITab (IMake itab _)) => itab

View file

@ -1807,6 +1807,8 @@
// invariant that pointers must stay within the pointed-to object, // invariant that pointers must stay within the pointed-to object,
// we can't pull part of a pointer computation above the AddPtr. // we can't pull part of a pointer computation above the AddPtr.
// See issue 37881. // See issue 37881.
// Note: we don't need to handle any (x-C) cases because we already rewrite
// (x-C) to (x+(-C)).
// x + (C + z) -> C + (x + z) // x + (C + z) -> C + (x + z)
(Add64 (Add64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Add64 <t> z x)) (Add64 (Add64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Add64 <t> z x))
@ -1820,23 +1822,29 @@
(Add16 (Sub16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Sub16 <t> x z)) (Add16 (Sub16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Sub16 <t> x z))
(Add8 (Sub8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Sub8 <t> x z)) (Add8 (Sub8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Sub8 <t> x z))
// x + (z - C) -> (x + z) - C
(Add64 (Sub64 z i:(Const64 <t>)) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 (Add64 <t> x z) i)
(Add32 (Sub32 z i:(Const32 <t>)) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 (Add32 <t> x z) i)
(Add16 (Sub16 z i:(Const16 <t>)) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 (Add16 <t> x z) i)
(Add8 (Sub8 z i:(Const8 <t>)) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 (Add8 <t> x z) i)
// x - (C - z) -> x + (z - C) -> (x + z) - C // x - (C - z) -> x + (z - C) -> (x + z) - C
(Sub64 x (Sub64 i:(Const64 <t>) z)) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 (Add64 <t> x z) i) (Sub64 x (Sub64 i:(Const64 <t>) z)) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 (Add64 <t> x z) i)
(Sub32 x (Sub32 i:(Const32 <t>) z)) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 (Add32 <t> x z) i) (Sub32 x (Sub32 i:(Const32 <t>) z)) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 (Add32 <t> x z) i)
(Sub16 x (Sub16 i:(Const16 <t>) z)) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 (Add16 <t> x z) i) (Sub16 x (Sub16 i:(Const16 <t>) z)) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 (Add16 <t> x z) i)
(Sub8 x (Sub8 i:(Const8 <t>) z)) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 (Add8 <t> x z) i) (Sub8 x (Sub8 i:(Const8 <t>) z)) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 (Add8 <t> x z) i)
// x - (z - C) -> x + (C - z) -> (x - z) + C // x - (z + C) -> x + (-z - C) -> (x - z) - C
(Sub64 x (Sub64 z i:(Const64 <t>))) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Sub64 <t> x z)) (Sub64 x (Add64 z i:(Const64 <t>))) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 (Sub64 <t> x z) i)
(Sub32 x (Sub32 z i:(Const32 <t>))) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Sub32 <t> x z)) (Sub32 x (Add32 z i:(Const32 <t>))) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 (Sub32 <t> x z) i)
(Sub16 x (Sub16 z i:(Const16 <t>))) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Sub16 <t> x z)) (Sub16 x (Add16 z i:(Const16 <t>))) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 (Sub16 <t> x z) i)
(Sub8 x (Sub8 z i:(Const8 <t>))) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Sub8 <t> x z)) (Sub8 x (Add8 z i:(Const8 <t>))) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 (Sub8 <t> x z) i)
// (C - z) - x -> C - (z + x)
(Sub64 (Sub64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 i (Add64 <t> z x))
(Sub32 (Sub32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 i (Add32 <t> z x))
(Sub16 (Sub16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 i (Add16 <t> z x))
(Sub8 (Sub8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 i (Add8 <t> z x))
// (z + C) -x -> C + (z - x)
(Sub64 (Add64 z i:(Const64 <t>)) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Sub64 <t> z x))
(Sub32 (Add32 z i:(Const32 <t>)) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Sub32 <t> z x))
(Sub16 (Add16 z i:(Const16 <t>)) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Sub16 <t> z x))
(Sub8 (Add8 z i:(Const8 <t>)) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Sub8 <t> z x))
// x & (C & z) -> C & (x & z) // x & (C & z) -> C & (x & z)
(And64 (And64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (And64 i (And64 <t> z x)) (And64 (And64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (And64 i (And64 <t> z x))
@ -1856,6 +1864,12 @@
(Xor16 (Xor16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Xor16 i (Xor16 <t> z x)) (Xor16 (Xor16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Xor16 i (Xor16 <t> z x))
(Xor8 (Xor8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Xor8 i (Xor8 <t> z x)) (Xor8 (Xor8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Xor8 i (Xor8 <t> z x))
// x * (D * z) = D * (x * z)
(Mul64 (Mul64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Mul64 i (Mul64 <t> x z))
(Mul32 (Mul32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Mul32 i (Mul32 <t> x z))
(Mul16 (Mul16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Mul16 i (Mul16 <t> x z))
(Mul8 (Mul8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Mul8 i (Mul8 <t> x z))
// C + (D + x) -> (C + D) + x // C + (D + x) -> (C + D) + x
(Add64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Add64 (Const64 <t> [c+d]) x) (Add64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Add64 (Const64 <t> [c+d]) x)
(Add32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Add32 (Const32 <t> [c+d]) x) (Add32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Add32 (Const32 <t> [c+d]) x)
@ -1868,24 +1882,18 @@
(Add16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x)) => (Sub16 (Const16 <t> [c+d]) x) (Add16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x)) => (Sub16 (Const16 <t> [c+d]) x)
(Add8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x)) => (Sub8 (Const8 <t> [c+d]) x) (Add8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x)) => (Sub8 (Const8 <t> [c+d]) x)
// C + (x - D) -> (C - D) + x
(Add64 (Const64 <t> [c]) (Sub64 x (Const64 <t> [d]))) => (Add64 (Const64 <t> [c-d]) x)
(Add32 (Const32 <t> [c]) (Sub32 x (Const32 <t> [d]))) => (Add32 (Const32 <t> [c-d]) x)
(Add16 (Const16 <t> [c]) (Sub16 x (Const16 <t> [d]))) => (Add16 (Const16 <t> [c-d]) x)
(Add8 (Const8 <t> [c]) (Sub8 x (Const8 <t> [d]))) => (Add8 (Const8 <t> [c-d]) x)
// C - (x - D) -> (C + D) - x
(Sub64 (Const64 <t> [c]) (Sub64 x (Const64 <t> [d]))) => (Sub64 (Const64 <t> [c+d]) x)
(Sub32 (Const32 <t> [c]) (Sub32 x (Const32 <t> [d]))) => (Sub32 (Const32 <t> [c+d]) x)
(Sub16 (Const16 <t> [c]) (Sub16 x (Const16 <t> [d]))) => (Sub16 (Const16 <t> [c+d]) x)
(Sub8 (Const8 <t> [c]) (Sub8 x (Const8 <t> [d]))) => (Sub8 (Const8 <t> [c+d]) x)
// C - (D - x) -> (C - D) + x // C - (D - x) -> (C - D) + x
(Sub64 (Const64 <t> [c]) (Sub64 (Const64 <t> [d]) x)) => (Add64 (Const64 <t> [c-d]) x) (Sub64 (Const64 <t> [c]) (Sub64 (Const64 <t> [d]) x)) => (Add64 (Const64 <t> [c-d]) x)
(Sub32 (Const32 <t> [c]) (Sub32 (Const32 <t> [d]) x)) => (Add32 (Const32 <t> [c-d]) x) (Sub32 (Const32 <t> [c]) (Sub32 (Const32 <t> [d]) x)) => (Add32 (Const32 <t> [c-d]) x)
(Sub16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x)) => (Add16 (Const16 <t> [c-d]) x) (Sub16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x)) => (Add16 (Const16 <t> [c-d]) x)
(Sub8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x)) => (Add8 (Const8 <t> [c-d]) x) (Sub8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x)) => (Add8 (Const8 <t> [c-d]) x)
// C - (D + x) -> (C - D) - x
(Sub64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Sub64 (Const64 <t> [c-d]) x)
(Sub32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Sub32 (Const32 <t> [c-d]) x)
(Sub16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Sub16 (Const16 <t> [c-d]) x)
(Sub8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Sub8 (Const8 <t> [c-d]) x)
// C & (D & x) -> (C & D) & x // C & (D & x) -> (C & D) & x
(And64 (Const64 <t> [c]) (And64 (Const64 <t> [d]) x)) => (And64 (Const64 <t> [c&d]) x) (And64 (Const64 <t> [c]) (And64 (Const64 <t> [d]) x)) => (And64 (Const64 <t> [c&d]) x)
(And32 (Const32 <t> [c]) (And32 (Const32 <t> [d]) x)) => (And32 (Const32 <t> [c&d]) x) (And32 (Const32 <t> [c]) (And32 (Const32 <t> [d]) x)) => (And32 (Const32 <t> [c&d]) x)

View file

@ -1423,7 +1423,8 @@ func parseValue(val string, arch arch, loc string) (op opData, oparch, typ, auxi
func opHasAuxInt(op opData) bool { func opHasAuxInt(op opData) bool {
switch op.aux { switch op.aux {
case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", "SymOff", "SymValAndOff", "TypSize", "ARM64BitField", "FlagConstant": case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64",
"SymOff", "SymValAndOff", "TypSize", "ARM64BitField", "FlagConstant", "CCop":
return true return true
} }
return false return false
@ -1431,7 +1432,7 @@ func opHasAuxInt(op opData) bool {
func opHasAux(op opData) bool { func opHasAux(op opData) bool {
switch op.aux { switch op.aux {
case "String", "Sym", "SymOff", "SymValAndOff", "Typ", "TypSize", "CCop", case "String", "Sym", "SymOff", "SymValAndOff", "Typ", "TypSize",
"S390XCCMask", "S390XRotateParams": "S390XCCMask", "S390XRotateParams":
return true return true
} }
@ -1784,8 +1785,6 @@ func (op opData) auxType() string {
return "s390x.CCMask" return "s390x.CCMask"
case "S390XRotateParams": case "S390XRotateParams":
return "s390x.RotateParams" return "s390x.RotateParams"
case "CCop":
return "CCop"
default: default:
return "invalid" return "invalid"
} }
@ -1820,6 +1819,8 @@ func (op opData) auxIntType() string {
return "Op" return "Op"
case "FlagConstant": case "FlagConstant":
return "flagConstant" return "flagConstant"
case "ARM64BitField":
return "arm64BitField"
default: default:
return "invalid" return "invalid"
} }

View file

@ -7,7 +7,7 @@ package ssa
// convert to machine-dependent ops // convert to machine-dependent ops
func lower(f *Func) { func lower(f *Func) {
// repeat rewrites until we find no more rewrites // repeat rewrites until we find no more rewrites
applyRewrite(f, f.Config.lowerBlock, f.Config.lowerValue) applyRewrite(f, f.Config.lowerBlock, f.Config.lowerValue, removeDeadValues)
} }
// checkLower checks for unlowered opcodes and fails if we find one. // checkLower checks for unlowered opcodes and fails if we find one.

View file

@ -235,7 +235,7 @@ func nilcheckelim2(f *Func) {
continue continue
} }
if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() { if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(GCNode).Typ().HasHeapPointer()) { if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(GCNode).Typ().HasPointers()) {
// These ops don't really change memory. // These ops don't really change memory.
continue continue
// Note: OpVarDef requires that the defined variable not have pointers. // Note: OpVarDef requires that the defined variable not have pointers.

View file

@ -277,3 +277,9 @@ func boundsABI(b int64) int {
panic("bad BoundsKind") panic("bad BoundsKind")
} }
} }
// arm64BitFileld is the GO type of ARM64BitField auxInt.
// if x is an ARM64BitField, then width=x&0xff, lsb=(x>>8)&0xff, and
// width+lsb<64 for 64-bit variant, width+lsb<32 for 32-bit variant.
// the meaning of width and lsb are instruction-dependent.
type arm64BitField int16

View file

@ -1828,6 +1828,7 @@ const (
OpPPC64FADD OpPPC64FADD
OpPPC64FADDS OpPPC64FADDS
OpPPC64SUB OpPPC64SUB
OpPPC64SUBFCconst
OpPPC64FSUB OpPPC64FSUB
OpPPC64FSUBS OpPPC64FSUBS
OpPPC64MULLD OpPPC64MULLD
@ -1853,8 +1854,6 @@ const (
OpPPC64ROTL OpPPC64ROTL
OpPPC64ROTLW OpPPC64ROTLW
OpPPC64LoweredAdd64Carry OpPPC64LoweredAdd64Carry
OpPPC64ADDconstForCarry
OpPPC64MaskIfNotCarry
OpPPC64SRADconst OpPPC64SRADconst
OpPPC64SRAWconst OpPPC64SRAWconst
OpPPC64SRDconst OpPPC64SRDconst
@ -2027,8 +2026,6 @@ const (
OpPPC64FlagEQ OpPPC64FlagEQ
OpPPC64FlagLT OpPPC64FlagLT
OpPPC64FlagGT OpPPC64FlagGT
OpPPC64FlagCarrySet
OpPPC64FlagCarryClear
OpRISCV64ADD OpRISCV64ADD
OpRISCV64ADDI OpRISCV64ADDI
@ -24317,6 +24314,20 @@ var opcodeTable = [...]opInfo{
}, },
}, },
}, },
{
name: "SUBFCconst",
auxType: auxInt64,
argLen: 1,
asm: ppc64.ASUBC,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
outputs: []outputInfo{
{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{ {
name: "FSUB", name: "FSUB",
argLen: 2, argLen: 2,
@ -24683,28 +24694,6 @@ var opcodeTable = [...]opInfo{
}, },
}, },
}, },
{
name: "ADDconstForCarry",
auxType: auxInt16,
argLen: 1,
asm: ppc64.AADDC,
reg: regInfo{
inputs: []inputInfo{
{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
clobbers: 2147483648, // R31
},
},
{
name: "MaskIfNotCarry",
argLen: 1,
asm: ppc64.AADDME,
reg: regInfo{
outputs: []outputInfo{
{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
},
},
{ {
name: "SRADconst", name: "SRADconst",
auxType: auxInt64, auxType: auxInt64,
@ -26964,16 +26953,6 @@ var opcodeTable = [...]opInfo{
argLen: 0, argLen: 0,
reg: regInfo{}, reg: regInfo{},
}, },
{
name: "FlagCarrySet",
argLen: 0,
reg: regInfo{},
},
{
name: "FlagCarryClear",
argLen: 0,
reg: regInfo{},
},
{ {
name: "ADD", name: "ADD",

View file

@ -6,5 +6,5 @@ package ssa
// machine-independent optimization // machine-independent optimization
func opt(f *Func) { func opt(f *Func) {
applyRewrite(f, rewriteBlockgeneric, rewriteValuegeneric) applyRewrite(f, rewriteBlockgeneric, rewriteValuegeneric, removeDeadValues)
} }

View file

@ -588,7 +588,7 @@ func (s *regAllocState) init(f *Func) {
if s.f.Config.hasGReg { if s.f.Config.hasGReg {
s.allocatable &^= 1 << s.GReg s.allocatable &^= 1 << s.GReg
} }
if s.f.Config.ctxt.Framepointer_enabled && s.f.Config.FPReg >= 0 { if objabi.Framepointer_enabled && s.f.Config.FPReg >= 0 {
s.allocatable &^= 1 << uint(s.f.Config.FPReg) s.allocatable &^= 1 << uint(s.f.Config.FPReg)
} }
if s.f.Config.LinkReg != -1 { if s.f.Config.LinkReg != -1 {

View file

@ -20,7 +20,15 @@ import (
"path/filepath" "path/filepath"
) )
func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter) { type deadValueChoice bool
const (
leaveDeadValues deadValueChoice = false
removeDeadValues = true
)
// deadcode indicates that rewrite should try to remove any values that become dead.
func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter, deadcode deadValueChoice) {
// repeat rewrites until we find no more rewrites // repeat rewrites until we find no more rewrites
pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block
pendingLines.clear() pendingLines.clear()
@ -56,6 +64,18 @@ func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter) {
*v0 = *v *v0 = *v
v0.Args = append([]*Value{}, v.Args...) // make a new copy, not aliasing v0.Args = append([]*Value{}, v.Args...) // make a new copy, not aliasing
} }
if v.Uses == 0 && v.removeable() {
if v.Op != OpInvalid && deadcode == removeDeadValues {
// Reset any values that are now unused, so that we decrement
// the use count of all of its arguments.
// Not quite a deadcode pass, because it does not handle cycles.
// But it should help Uses==1 rules to fire.
v.reset(OpInvalid)
change = true
}
// No point rewriting values which aren't used.
continue
}
vchange := phielimValue(v) vchange := phielimValue(v)
if vchange && debug > 1 { if vchange && debug > 1 {
@ -618,6 +638,9 @@ func auxIntToFloat64(i int64) float64 {
func auxIntToValAndOff(i int64) ValAndOff { func auxIntToValAndOff(i int64) ValAndOff {
return ValAndOff(i) return ValAndOff(i)
} }
func auxIntToArm64BitField(i int64) arm64BitField {
return arm64BitField(i)
}
func auxIntToInt128(x int64) int128 { func auxIntToInt128(x int64) int128 {
if x != 0 { if x != 0 {
panic("nonzero int128 not allowed") panic("nonzero int128 not allowed")
@ -628,6 +651,10 @@ func auxIntToFlagConstant(x int64) flagConstant {
return flagConstant(x) return flagConstant(x)
} }
func auxIntToOp(cc int64) Op {
return Op(cc)
}
func boolToAuxInt(b bool) int64 { func boolToAuxInt(b bool) int64 {
if b { if b {
return 1 return 1
@ -658,6 +685,9 @@ func float64ToAuxInt(f float64) int64 {
func valAndOffToAuxInt(v ValAndOff) int64 { func valAndOffToAuxInt(v ValAndOff) int64 {
return int64(v) return int64(v)
} }
func arm64BitFieldToAuxInt(v arm64BitField) int64 {
return int64(v)
}
func int128ToAuxInt(x int128) int64 { func int128ToAuxInt(x int128) int64 {
if x != 0 { if x != 0 {
panic("nonzero int128 not allowed") panic("nonzero int128 not allowed")
@ -668,6 +698,10 @@ func flagConstantToAuxInt(x flagConstant) int64 {
return int64(x) return int64(x)
} }
func opToAuxInt(o Op) int64 {
return int64(o)
}
func auxToString(i interface{}) string { func auxToString(i interface{}) string {
return i.(string) return i.(string)
} }
@ -701,9 +735,6 @@ func s390xCCMaskToAux(c s390x.CCMask) interface{} {
func s390xRotateParamsToAux(r s390x.RotateParams) interface{} { func s390xRotateParamsToAux(r s390x.RotateParams) interface{} {
return r return r
} }
func cCopToAux(o Op) interface{} {
return o
}
// uaddOvf reports whether unsigned a+b would overflow. // uaddOvf reports whether unsigned a+b would overflow.
func uaddOvf(a, b int64) bool { func uaddOvf(a, b int64) bool {
@ -1008,8 +1039,7 @@ func arm64Invert(op Op) Op {
// evaluate an ARM64 op against a flags value // evaluate an ARM64 op against a flags value
// that is potentially constant; return 1 for true, // that is potentially constant; return 1 for true,
// -1 for false, and 0 for not constant. // -1 for false, and 0 for not constant.
func ccARM64Eval(cc interface{}, flags *Value) int { func ccARM64Eval(op Op, flags *Value) int {
op := cc.(Op)
fop := flags.Op fop := flags.Op
if fop == OpARM64InvertFlags { if fop == OpARM64InvertFlags {
return -ccARM64Eval(op, flags.Args[0]) return -ccARM64Eval(op, flags.Args[0])
@ -1292,24 +1322,24 @@ func hasSmallRotate(c *Config) bool {
} }
// encodes the lsb and width for arm(64) bitfield ops into the expected auxInt format. // encodes the lsb and width for arm(64) bitfield ops into the expected auxInt format.
func armBFAuxInt(lsb, width int64) int64 { func armBFAuxInt(lsb, width int64) arm64BitField {
if lsb < 0 || lsb > 63 { if lsb < 0 || lsb > 63 {
panic("ARM(64) bit field lsb constant out of range") panic("ARM(64) bit field lsb constant out of range")
} }
if width < 1 || width > 64 { if width < 1 || width > 64 {
panic("ARM(64) bit field width constant out of range") panic("ARM(64) bit field width constant out of range")
} }
return width | lsb<<8 return arm64BitField(width | lsb<<8)
} }
// returns the lsb part of the auxInt field of arm64 bitfield ops. // returns the lsb part of the auxInt field of arm64 bitfield ops.
func getARM64BFlsb(bfc int64) int64 { func (bfc arm64BitField) getARM64BFlsb() int64 {
return int64(uint64(bfc) >> 8) return int64(uint64(bfc) >> 8)
} }
// returns the width part of the auxInt field of arm64 bitfield ops. // returns the width part of the auxInt field of arm64 bitfield ops.
func getARM64BFwidth(bfc int64) int64 { func (bfc arm64BitField) getARM64BFwidth() int64 {
return bfc & 0xff return int64(bfc) & 0xff
} }
// checks if mask >> rshift applied at lsb is a valid arm64 bitfield op mask. // checks if mask >> rshift applied at lsb is a valid arm64 bitfield op mask.

File diff suppressed because it is too large Load diff

View file

@ -2069,10 +2069,10 @@ func rewriteValueARM_OpARMADDshiftLL(v *Value) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (ADDshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [armBFAuxInt(8, 8)] x) x) // match: (ADDshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x)
// result: (REV16 x) // result: (REV16 x)
for { for {
if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) { if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != int32(armBFAuxInt(8, 8)) {
break break
} }
x := v_0.Args[0] x := v_0.Args[0]
@ -8537,10 +8537,10 @@ func rewriteValueARM_OpARMORshiftLL(v *Value) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (ORshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [armBFAuxInt(8, 8)] x) x) // match: (ORshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x)
// result: (REV16 x) // result: (REV16 x)
for { for {
if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) { if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != int32(armBFAuxInt(8, 8)) {
break break
} }
x := v_0.Args[0] x := v_0.Args[0]
@ -12576,10 +12576,10 @@ func rewriteValueARM_OpARMXORshiftLL(v *Value) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (XORshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [armBFAuxInt(8, 8)] x) x) // match: (XORshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x)
// result: (REV16 x) // result: (REV16 x)
for { for {
if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) { if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != int32(armBFAuxInt(8, 8)) {
break break
} }
x := v_0.Args[0] x := v_0.Args[0]

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -428,8 +428,6 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpPPC64ADD(v) return rewriteValuePPC64_OpPPC64ADD(v)
case OpPPC64ADDconst: case OpPPC64ADDconst:
return rewriteValuePPC64_OpPPC64ADDconst(v) return rewriteValuePPC64_OpPPC64ADDconst(v)
case OpPPC64ADDconstForCarry:
return rewriteValuePPC64_OpPPC64ADDconstForCarry(v)
case OpPPC64AND: case OpPPC64AND:
return rewriteValuePPC64_OpPPC64AND(v) return rewriteValuePPC64_OpPPC64AND(v)
case OpPPC64ANDN: case OpPPC64ANDN:
@ -570,8 +568,8 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpPPC64MOVWstorezero(v) return rewriteValuePPC64_OpPPC64MOVWstorezero(v)
case OpPPC64MTVSRD: case OpPPC64MTVSRD:
return rewriteValuePPC64_OpPPC64MTVSRD(v) return rewriteValuePPC64_OpPPC64MTVSRD(v)
case OpPPC64MaskIfNotCarry: case OpPPC64NEG:
return rewriteValuePPC64_OpPPC64MaskIfNotCarry(v) return rewriteValuePPC64_OpPPC64NEG(v)
case OpPPC64NOR: case OpPPC64NOR:
return rewriteValuePPC64_OpPPC64NOR(v) return rewriteValuePPC64_OpPPC64NOR(v)
case OpPPC64NotEqual: case OpPPC64NotEqual:
@ -600,6 +598,8 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpPPC64SRW(v) return rewriteValuePPC64_OpPPC64SRW(v)
case OpPPC64SUB: case OpPPC64SUB:
return rewriteValuePPC64_OpPPC64SUB(v) return rewriteValuePPC64_OpPPC64SUB(v)
case OpPPC64SUBFCconst:
return rewriteValuePPC64_OpPPC64SUBFCconst(v)
case OpPPC64XOR: case OpPPC64XOR:
return rewriteValuePPC64_OpPPC64XOR(v) return rewriteValuePPC64_OpPPC64XOR(v)
case OpPPC64XORconst: case OpPPC64XORconst:
@ -1025,15 +1025,14 @@ func rewriteValuePPC64_OpBitLen32(v *Value) bool {
b := v.Block b := v.Block
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
// match: (BitLen32 x) // match: (BitLen32 x)
// result: (SUB (MOVDconst [32]) (CNTLZW <typ.Int> x)) // result: (SUBFCconst [32] (CNTLZW <typ.Int> x))
for { for {
x := v_0 x := v_0
v.reset(OpPPC64SUB) v.reset(OpPPC64SUBFCconst)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v.AuxInt = int64ToAuxInt(32)
v0.AuxInt = int64ToAuxInt(32) v0 := b.NewValue0(v.Pos, OpPPC64CNTLZW, typ.Int)
v1 := b.NewValue0(v.Pos, OpPPC64CNTLZW, typ.Int) v0.AddArg(x)
v1.AddArg(x) v.AddArg(v0)
v.AddArg2(v0, v1)
return true return true
} }
} }
@ -1042,15 +1041,14 @@ func rewriteValuePPC64_OpBitLen64(v *Value) bool {
b := v.Block b := v.Block
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
// match: (BitLen64 x) // match: (BitLen64 x)
// result: (SUB (MOVDconst [64]) (CNTLZD <typ.Int> x)) // result: (SUBFCconst [64] (CNTLZD <typ.Int> x))
for { for {
x := v_0 x := v_0
v.reset(OpPPC64SUB) v.reset(OpPPC64SUBFCconst)
v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v.AuxInt = int64ToAuxInt(64)
v0.AuxInt = int64ToAuxInt(64) v0 := b.NewValue0(v.Pos, OpPPC64CNTLZD, typ.Int)
v1 := b.NewValue0(v.Pos, OpPPC64CNTLZD, typ.Int) v0.AddArg(x)
v1.AddArg(x) v.AddArg(v0)
v.AddArg2(v0, v1)
return true return true
} }
} }
@ -3961,6 +3959,76 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool {
} }
break break
} }
// match: (ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))))
// result: (ROTL x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpPPC64SLD {
continue
}
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpPPC64SRD {
continue
}
_ = v_1.Args[1]
if x != v_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 63 || y != v_1_1_0.Args[0] {
continue
}
v.reset(OpPPC64ROTL)
v.AddArg2(x, y)
return true
}
break
}
// match: (ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))))
// result: (ROTLW x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpPPC64SLW {
continue
}
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpPPC64SRW {
continue
}
_ = v_1.Args[1]
if x != v_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 32 {
continue
}
v_1_1_0 := v_1_1.Args[0]
if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 31 || y != v_1_1_0.Args[0] {
continue
}
v.reset(OpPPC64ROTLW)
v.AddArg2(x, y)
return true
}
break
}
// match: (ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) // match: (ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))))
// result: (ROTLW x y) // result: (ROTLW x y)
for { for {
@ -4073,38 +4141,22 @@ func rewriteValuePPC64_OpPPC64ADDconst(v *Value) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
return false // match: (ADDconst [c] (SUBFCconst [d] x))
} // cond: is32Bit(c+d)
func rewriteValuePPC64_OpPPC64ADDconstForCarry(v *Value) bool { // result: (SUBFCconst [c+d] x)
v_0 := v.Args[0]
// match: (ADDconstForCarry [c] (MOVDconst [d]))
// cond: c < 0 && (c < 0 || int64(c) + d >= 0)
// result: (FlagCarryClear)
for { for {
c := auxIntToInt16(v.AuxInt) c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64MOVDconst { if v_0.Op != OpPPC64SUBFCconst {
break break
} }
d := auxIntToInt64(v_0.AuxInt) d := auxIntToInt64(v_0.AuxInt)
if !(c < 0 && (c < 0 || int64(c)+d >= 0)) { x := v_0.Args[0]
if !(is32Bit(c + d)) {
break break
} }
v.reset(OpPPC64FlagCarryClear) v.reset(OpPPC64SUBFCconst)
return true v.AuxInt = int64ToAuxInt(c + d)
} v.AddArg(x)
// match: (ADDconstForCarry [c] (MOVDconst [d]))
// cond: c < 0 && c >= 0 && int64(c) + d < 0
// result: (FlagCarrySet)
for {
c := auxIntToInt16(v.AuxInt)
if v_0.Op != OpPPC64MOVDconst {
break
}
d := auxIntToInt64(v_0.AuxInt)
if !(c < 0 && c >= 0 && int64(c)+d < 0) {
break
}
v.reset(OpPPC64FlagCarrySet)
return true return true
} }
return false return false
@ -10374,46 +10426,40 @@ func rewriteValuePPC64_OpPPC64MTVSRD(v *Value) bool {
} }
return false return false
} }
func rewriteValuePPC64_OpPPC64MaskIfNotCarry(v *Value) bool { func rewriteValuePPC64_OpPPC64NEG(v *Value) bool {
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) // match: (NEG (ADDconst [c] x))
// cond: c < 0 && d > 0 && int64(c) + d < 0 // cond: is32Bit(-c)
// result: (MOVDconst [-1]) // result: (SUBFCconst [-c] x)
for { for {
if v_0.Op != OpPPC64ADDconstForCarry { if v_0.Op != OpPPC64ADDconst {
break break
} }
c := auxIntToInt16(v_0.AuxInt) c := auxIntToInt64(v_0.AuxInt)
v_0_0 := v_0.Args[0] x := v_0.Args[0]
if v_0_0.Op != OpPPC64ANDconst { if !(is32Bit(-c)) {
break break
} }
d := auxIntToInt64(v_0_0.AuxInt) v.reset(OpPPC64SUBFCconst)
if !(c < 0 && d > 0 && int64(c)+d < 0) { v.AuxInt = int64ToAuxInt(-c)
break v.AddArg(x)
}
v.reset(OpPPC64MOVDconst)
v.AuxInt = int64ToAuxInt(-1)
return true return true
} }
// match: (MaskIfNotCarry (FlagCarrySet)) // match: (NEG (SUBFCconst [c] x))
// result: (MOVDconst [0]) // cond: is32Bit(-c)
// result: (ADDconst [-c] x)
for { for {
if v_0.Op != OpPPC64FlagCarrySet { if v_0.Op != OpPPC64SUBFCconst {
break break
} }
v.reset(OpPPC64MOVDconst) c := auxIntToInt64(v_0.AuxInt)
v.AuxInt = int64ToAuxInt(0) x := v_0.Args[0]
return true if !(is32Bit(-c)) {
}
// match: (MaskIfNotCarry (FlagCarryClear))
// result: (MOVDconst [-1])
for {
if v_0.Op != OpPPC64FlagCarryClear {
break break
} }
v.reset(OpPPC64MOVDconst) v.reset(OpPPC64ADDconst)
v.AuxInt = int64ToAuxInt(-1) v.AuxInt = int64ToAuxInt(-c)
v.AddArg(x)
return true return true
} }
return false return false
@ -10592,6 +10638,76 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool {
} }
break break
} }
// match: ( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))))
// result: (ROTL x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpPPC64SLD {
continue
}
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpPPC64SRD {
continue
}
_ = v_1.Args[1]
if x != v_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 63 || y != v_1_1_0.Args[0] {
continue
}
v.reset(OpPPC64ROTL)
v.AddArg2(x, y)
return true
}
break
}
// match: ( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))))
// result: (ROTLW x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpPPC64SLW {
continue
}
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpPPC64SRW {
continue
}
_ = v_1.Args[1]
if x != v_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 32 {
continue
}
v_1_1_0 := v_1_1.Args[0]
if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 31 || y != v_1_1_0.Args[0] {
continue
}
v.reset(OpPPC64ROTLW)
v.AddArg2(x, y)
return true
}
break
}
// match: ( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) // match: ( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))))
// result: (ROTLW x y) // result: (ROTLW x y)
for { for {
@ -12191,6 +12307,69 @@ func rewriteValuePPC64_OpPPC64SUB(v *Value) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (SUB (MOVDconst [c]) x)
// cond: is32Bit(c)
// result: (SUBFCconst [c] x)
for {
if v_0.Op != OpPPC64MOVDconst {
break
}
c := auxIntToInt64(v_0.AuxInt)
x := v_1
if !(is32Bit(c)) {
break
}
v.reset(OpPPC64SUBFCconst)
v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64SUBFCconst(v *Value) bool {
v_0 := v.Args[0]
// match: (SUBFCconst [c] (NEG x))
// result: (ADDconst [c] x)
for {
c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64NEG {
break
}
x := v_0.Args[0]
v.reset(OpPPC64ADDconst)
v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
// match: (SUBFCconst [c] (SUBFCconst [d] x))
// cond: is32Bit(c-d)
// result: (ADDconst [c-d] x)
for {
c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpPPC64SUBFCconst {
break
}
d := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(is32Bit(c - d)) {
break
}
v.reset(OpPPC64ADDconst)
v.AuxInt = int64ToAuxInt(c - d)
v.AddArg(x)
return true
}
// match: (SUBFCconst [0] x)
// result: (NEG x)
for {
if auxIntToInt64(v.AuxInt) != 0 {
break
}
x := v_0
v.reset(OpPPC64NEG)
v.AddArg(x)
return true
}
return false return false
} }
func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { func rewriteValuePPC64_OpPPC64XOR(v *Value) bool {
@ -12286,6 +12465,76 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool {
} }
break break
} }
// match: (XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))))
// result: (ROTL x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpPPC64SLD {
continue
}
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpPPC64SRD {
continue
}
_ = v_1.Args[1]
if x != v_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 63 || y != v_1_1_0.Args[0] {
continue
}
v.reset(OpPPC64ROTL)
v.AddArg2(x, y)
return true
}
break
}
// match: (XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))))
// result: (ROTLW x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpPPC64SLW {
continue
}
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpPPC64SRW {
continue
}
_ = v_1.Args[1]
if x != v_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 32 {
continue
}
v_1_1_0 := v_1_1.Args[0]
if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 31 || y != v_1_1_0.Args[0] {
continue
}
v.reset(OpPPC64ROTLW)
v.AddArg2(x, y)
return true
}
break
}
// match: (XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) // match: (XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))))
// result: (ROTLW x y) // result: (ROTLW x y)
for { for {
@ -13257,6 +13506,28 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool {
v.AddArg2(x, v0) v.AddArg2(x, v0)
return true return true
} }
// match: (Rsh32Ux64 x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
// result: (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
for {
x := v_0
if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 {
break
}
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 31 {
break
}
y := v_1_0.Args[0]
v.reset(OpPPC64SRW)
v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
v0.AuxInt = int64ToAuxInt(32)
v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
v1.AuxInt = int64ToAuxInt(31)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg2(x, v0)
return true
}
// match: (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) // match: (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31]))))
// result: (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) // result: (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
for { for {
@ -13294,6 +13565,37 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool {
} }
break break
} }
// match: (Rsh32Ux64 x (SUBFCconst <typ.UInt> [32] (AND <typ.UInt> y (MOVDconst [31]))))
// result: (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
for {
x := v_0
if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 {
break
}
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt {
break
}
_ = v_1_0.Args[1]
v_1_0_0 := v_1_0.Args[0]
v_1_0_1 := v_1_0.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
y := v_1_0_0
if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 31 {
continue
}
v.reset(OpPPC64SRW)
v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
v0.AuxInt = int64ToAuxInt(32)
v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
v1.AuxInt = int64ToAuxInt(31)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg2(x, v0)
return true
}
break
}
// match: (Rsh32Ux64 x y) // match: (Rsh32Ux64 x y)
// result: (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32])))) // result: (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
for { for {
@ -13564,6 +13866,28 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool {
v.AddArg2(x, v0) v.AddArg2(x, v0)
return true return true
} }
// match: (Rsh32x64 x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
// result: (SRAW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
for {
x := v_0
if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 {
break
}
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 31 {
break
}
y := v_1_0.Args[0]
v.reset(OpPPC64SRAW)
v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
v0.AuxInt = int64ToAuxInt(32)
v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
v1.AuxInt = int64ToAuxInt(31)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg2(x, v0)
return true
}
// match: (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) // match: (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31]))))
// result: (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) // result: (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
for { for {
@ -13601,6 +13925,37 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool {
} }
break break
} }
// match: (Rsh32x64 x (SUBFCconst <typ.UInt> [32] (AND <typ.UInt> y (MOVDconst [31]))))
// result: (SRAW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
for {
x := v_0
if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 {
break
}
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt {
break
}
_ = v_1_0.Args[1]
v_1_0_0 := v_1_0.Args[0]
v_1_0_1 := v_1_0.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
y := v_1_0_0
if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 31 {
continue
}
v.reset(OpPPC64SRAW)
v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
v0.AuxInt = int64ToAuxInt(32)
v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
v1.AuxInt = int64ToAuxInt(31)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg2(x, v0)
return true
}
break
}
// match: (Rsh32x64 x y) // match: (Rsh32x64 x y)
// result: (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32])))) // result: (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
for { for {
@ -13869,6 +14224,28 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool {
v.AddArg2(x, v0) v.AddArg2(x, v0)
return true return true
} }
// match: (Rsh64Ux64 x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
// result: (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
for {
x := v_0
if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 {
break
}
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 63 {
break
}
y := v_1_0.Args[0]
v.reset(OpPPC64SRD)
v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
v0.AuxInt = int64ToAuxInt(64)
v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
v1.AuxInt = int64ToAuxInt(63)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg2(x, v0)
return true
}
// match: (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) // match: (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63]))))
// result: (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) // result: (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
for { for {
@ -13906,6 +14283,37 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool {
} }
break break
} }
// match: (Rsh64Ux64 x (SUBFCconst <typ.UInt> [64] (AND <typ.UInt> y (MOVDconst [63]))))
// result: (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
for {
x := v_0
if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 {
break
}
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt {
break
}
_ = v_1_0.Args[1]
v_1_0_0 := v_1_0.Args[0]
v_1_0_1 := v_1_0.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
y := v_1_0_0
if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 63 {
continue
}
v.reset(OpPPC64SRD)
v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
v0.AuxInt = int64ToAuxInt(64)
v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
v1.AuxInt = int64ToAuxInt(63)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg2(x, v0)
return true
}
break
}
// match: (Rsh64Ux64 x y) // match: (Rsh64Ux64 x y)
// result: (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64])))) // result: (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
for { for {
@ -14176,6 +14584,28 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool {
v.AddArg2(x, v0) v.AddArg2(x, v0)
return true return true
} }
// match: (Rsh64x64 x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
// result: (SRAD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
for {
x := v_0
if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 {
break
}
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 63 {
break
}
y := v_1_0.Args[0]
v.reset(OpPPC64SRAD)
v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
v0.AuxInt = int64ToAuxInt(64)
v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
v1.AuxInt = int64ToAuxInt(63)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg2(x, v0)
return true
}
// match: (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) // match: (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63]))))
// result: (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) // result: (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
for { for {
@ -14213,6 +14643,37 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool {
} }
break break
} }
// match: (Rsh64x64 x (SUBFCconst <typ.UInt> [64] (AND <typ.UInt> y (MOVDconst [63]))))
// result: (SRAD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
for {
x := v_0
if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 {
break
}
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt {
break
}
_ = v_1_0.Args[1]
v_1_0_0 := v_1_0.Args[0]
v_1_0_1 := v_1_0.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
y := v_1_0_0
if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 63 {
continue
}
v.reset(OpPPC64SRAD)
v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
v0.AuxInt = int64ToAuxInt(64)
v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
v1.AuxInt = int64ToAuxInt(63)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg2(x, v0)
return true
}
break
}
// match: (Rsh64x64 x y) // match: (Rsh64x64 x y)
// result: (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64])))) // result: (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
for { for {

File diff suppressed because it is too large Load diff

View file

@ -591,6 +591,8 @@ func rewriteValueWasm(v *Value) bool {
return rewriteValueWasm_OpWasmI64Eq(v) return rewriteValueWasm_OpWasmI64Eq(v)
case OpWasmI64Eqz: case OpWasmI64Eqz:
return rewriteValueWasm_OpWasmI64Eqz(v) return rewriteValueWasm_OpWasmI64Eqz(v)
case OpWasmI64LeU:
return rewriteValueWasm_OpWasmI64LeU(v)
case OpWasmI64Load: case OpWasmI64Load:
return rewriteValueWasm_OpWasmI64Load(v) return rewriteValueWasm_OpWasmI64Load(v)
case OpWasmI64Load16S: case OpWasmI64Load16S:
@ -605,6 +607,8 @@ func rewriteValueWasm(v *Value) bool {
return rewriteValueWasm_OpWasmI64Load8S(v) return rewriteValueWasm_OpWasmI64Load8S(v)
case OpWasmI64Load8U: case OpWasmI64Load8U:
return rewriteValueWasm_OpWasmI64Load8U(v) return rewriteValueWasm_OpWasmI64Load8U(v)
case OpWasmI64LtU:
return rewriteValueWasm_OpWasmI64LtU(v)
case OpWasmI64Mul: case OpWasmI64Mul:
return rewriteValueWasm_OpWasmI64Mul(v) return rewriteValueWasm_OpWasmI64Mul(v)
case OpWasmI64Ne: case OpWasmI64Ne:
@ -3824,6 +3828,37 @@ func rewriteValueWasm_OpWasmI64Eqz(v *Value) bool {
} }
return false return false
} }
func rewriteValueWasm_OpWasmI64LeU(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
// match: (I64LeU x (I64Const [0]))
// result: (I64Eqz x)
for {
x := v_0
if v_1.Op != OpWasmI64Const || auxIntToInt64(v_1.AuxInt) != 0 {
break
}
v.reset(OpWasmI64Eqz)
v.AddArg(x)
return true
}
// match: (I64LeU (I64Const [1]) x)
// result: (I64Eqz (I64Eqz x))
for {
if v_0.Op != OpWasmI64Const || auxIntToInt64(v_0.AuxInt) != 1 {
break
}
x := v_1
v.reset(OpWasmI64Eqz)
v0 := b.NewValue0(v.Pos, OpWasmI64Eqz, typ.Bool)
v0.AddArg(x)
v.AddArg(v0)
return true
}
return false
}
func rewriteValueWasm_OpWasmI64Load(v *Value) bool { func rewriteValueWasm_OpWasmI64Load(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
@ -4070,6 +4105,37 @@ func rewriteValueWasm_OpWasmI64Load8U(v *Value) bool {
} }
return false return false
} }
func rewriteValueWasm_OpWasmI64LtU(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
// match: (I64LtU (I64Const [0]) x)
// result: (I64Eqz (I64Eqz x))
for {
if v_0.Op != OpWasmI64Const || auxIntToInt64(v_0.AuxInt) != 0 {
break
}
x := v_1
v.reset(OpWasmI64Eqz)
v0 := b.NewValue0(v.Pos, OpWasmI64Eqz, typ.Bool)
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (I64LtU x (I64Const [1]))
// result: (I64Eqz x)
for {
x := v_0
if v_1.Op != OpWasmI64Const || auxIntToInt64(v_1.AuxInt) != 1 {
break
}
v.reset(OpWasmI64Eqz)
v.AddArg(x)
return true
}
return false
}
func rewriteValueWasm_OpWasmI64Mul(v *Value) bool { func rewriteValueWasm_OpWasmI64Mul(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]

View file

@ -328,9 +328,10 @@ func rewriteValuedec_OpStore(v *Value) bool {
v.AddArg3(v0, len, v1) v.AddArg3(v0, len, v1)
return true return true
} }
// match: (Store dst (SliceMake ptr len cap) mem) // match: (Store {t} dst (SliceMake ptr len cap) mem)
// result: (Store {typ.Int} (OffPtr <typ.IntPtr> [2*config.PtrSize] dst) cap (Store {typ.Int} (OffPtr <typ.IntPtr> [config.PtrSize] dst) len (Store {typ.BytePtr} dst ptr mem))) // result: (Store {typ.Int} (OffPtr <typ.IntPtr> [2*config.PtrSize] dst) cap (Store {typ.Int} (OffPtr <typ.IntPtr> [config.PtrSize] dst) len (Store {t.Elem().PtrTo()} dst ptr mem)))
for { for {
t := auxToType(v.Aux)
dst := v_0 dst := v_0
if v_1.Op != OpSliceMake { if v_1.Op != OpSliceMake {
break break
@ -350,7 +351,7 @@ func rewriteValuedec_OpStore(v *Value) bool {
v2.AuxInt = int64ToAuxInt(config.PtrSize) v2.AuxInt = int64ToAuxInt(config.PtrSize)
v2.AddArg(dst) v2.AddArg(dst)
v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v3.Aux = typeToAux(typ.BytePtr) v3.Aux = typeToAux(t.Elem().PtrTo())
v3.AddArg3(dst, ptr, mem) v3.AddArg3(dst, ptr, mem)
v1.AddArg3(v2, len, v3) v1.AddArg3(v2, len, v3)
v.AddArg3(v0, cap, v1) v.AddArg3(v0, cap, v1)

File diff suppressed because it is too large Load diff

View file

@ -18,6 +18,7 @@ func softfloat(f *Func) {
for _, b := range f.Blocks { for _, b := range f.Blocks {
for _, v := range b.Values { for _, v := range b.Values {
if v.Type.IsFloat() { if v.Type.IsFloat() {
f.unCache(v)
switch v.Op { switch v.Op {
case OpPhi, OpLoad, OpArg: case OpPhi, OpLoad, OpArg:
if v.Type.Size() == 4 { if v.Type.Size() == 4 {
@ -72,7 +73,7 @@ func softfloat(f *Func) {
if newInt64 && f.Config.RegSize == 4 { if newInt64 && f.Config.RegSize == 4 {
// On 32bit arch, decompose Uint64 introduced in the switch above. // On 32bit arch, decompose Uint64 introduced in the switch above.
decomposeBuiltIn(f) decomposeBuiltIn(f)
applyRewrite(f, rewriteBlockdec64, rewriteValuedec64) applyRewrite(f, rewriteBlockdec64, rewriteValuedec64, removeDeadValues)
} }
} }

View file

@ -54,6 +54,9 @@ type Value struct {
// nor a slot on Go stack, and the generation of this value is delayed to its use time. // nor a slot on Go stack, and the generation of this value is delayed to its use time.
OnWasmStack bool OnWasmStack bool
// Is this value in the per-function constant cache? If so, remove from cache before changing it or recycling it.
InCache bool
// Storage for the first three args // Storage for the first three args
argstorage [3]*Value argstorage [3]*Value
} }
@ -126,6 +129,13 @@ func (v *Value) AuxValAndOff() ValAndOff {
return ValAndOff(v.AuxInt) return ValAndOff(v.AuxInt)
} }
func (v *Value) AuxArm64BitField() arm64BitField {
if opcodeTable[v.Op].auxType != auxARM64BitField {
v.Fatalf("op %s doesn't have a ValAndOff aux field", v.Op)
}
return arm64BitField(v.AuxInt)
}
// long form print. v# = opcode <type> [aux] args [: reg] (names) // long form print. v# = opcode <type> [aux] args [: reg] (names)
func (v *Value) LongString() string { func (v *Value) LongString() string {
s := fmt.Sprintf("v%d = %s", v.ID, v.Op) s := fmt.Sprintf("v%d = %s", v.ID, v.Op)
@ -176,8 +186,8 @@ func (v *Value) auxString() string {
case auxInt64, auxInt128: case auxInt64, auxInt128:
return fmt.Sprintf(" [%d]", v.AuxInt) return fmt.Sprintf(" [%d]", v.AuxInt)
case auxARM64BitField: case auxARM64BitField:
lsb := getARM64BFlsb(v.AuxInt) lsb := v.AuxArm64BitField().getARM64BFlsb()
width := getARM64BFwidth(v.AuxInt) width := v.AuxArm64BitField().getARM64BFwidth()
return fmt.Sprintf(" [lsb=%d,width=%d]", lsb, width) return fmt.Sprintf(" [lsb=%d,width=%d]", lsb, width)
case auxFloat32, auxFloat64: case auxFloat32, auxFloat64:
return fmt.Sprintf(" [%g]", v.AuxFloat()) return fmt.Sprintf(" [%g]", v.AuxFloat())
@ -203,7 +213,7 @@ func (v *Value) auxString() string {
} }
return s + fmt.Sprintf(" [%s]", v.AuxValAndOff()) return s + fmt.Sprintf(" [%s]", v.AuxValAndOff())
case auxCCop: case auxCCop:
return fmt.Sprintf(" {%s}", v.Aux.(Op)) return fmt.Sprintf(" {%s}", Op(v.AuxInt))
case auxS390XCCMask, auxS390XRotateParams: case auxS390XCCMask, auxS390XRotateParams:
return fmt.Sprintf(" {%v}", v.Aux) return fmt.Sprintf(" {%v}", v.Aux)
case auxFlagConstant: case auxFlagConstant:
@ -325,6 +335,9 @@ func (v *Value) resetArgs() {
// of cmd/compile by almost 10%, and slows it down. // of cmd/compile by almost 10%, and slows it down.
//go:noinline //go:noinline
func (v *Value) reset(op Op) { func (v *Value) reset(op Op) {
if v.InCache {
v.Block.Func.unCache(v)
}
v.Op = op v.Op = op
v.resetArgs() v.resetArgs()
v.AuxInt = 0 v.AuxInt = 0
@ -335,6 +348,9 @@ func (v *Value) reset(op Op) {
// It modifies v to be (Copy a). // It modifies v to be (Copy a).
//go:noinline //go:noinline
func (v *Value) copyOf(a *Value) { func (v *Value) copyOf(a *Value) {
if v.InCache {
v.Block.Func.unCache(v)
}
v.Op = OpCopy v.Op = OpCopy
v.resetArgs() v.resetArgs()
v.AddArg(a) v.AddArg(a)
@ -453,3 +469,23 @@ func (v *Value) LackingPos() bool {
return v.Op == OpVarDef || v.Op == OpVarKill || v.Op == OpVarLive || v.Op == OpPhi || return v.Op == OpVarDef || v.Op == OpVarKill || v.Op == OpVarLive || v.Op == OpPhi ||
(v.Op == OpFwdRef || v.Op == OpCopy) && v.Type == types.TypeMem (v.Op == OpFwdRef || v.Op == OpCopy) && v.Type == types.TypeMem
} }
// removeable reports whether the value v can be removed from the SSA graph entirely
// if its use count drops to 0.
func (v *Value) removeable() bool {
if v.Type.IsVoid() {
// Void ops, like nil pointer checks, must stay.
return false
}
if v.Type.IsMemory() {
// All memory ops aren't needed here, but we do need
// to keep calls at least (because they might have
// syncronization operations we can't see).
return false
}
if v.Op.HasSideEffects() {
// These are mostly synchronization operations.
return false
}
return true
}

View file

@ -31,7 +31,7 @@ func needwb(v *Value, zeroes map[ID]ZeroRegion) bool {
if !ok { if !ok {
v.Fatalf("store aux is not a type: %s", v.LongString()) v.Fatalf("store aux is not a type: %s", v.LongString())
} }
if !t.HasHeapPointer() { if !t.HasPointers() {
return false return false
} }
if IsStackAddr(v.Args[0]) { if IsStackAddr(v.Args[0]) {

View file

@ -1230,6 +1230,11 @@ func (t *Type) IsUnsafePtr() bool {
return t.Etype == TUNSAFEPTR return t.Etype == TUNSAFEPTR
} }
// IsUintptr reports whether t is an uintptr.
func (t *Type) IsUintptr() bool {
return t.Etype == TUINTPTR
}
// IsPtrShaped reports whether t is represented by a single machine pointer. // IsPtrShaped reports whether t is represented by a single machine pointer.
// In addition to regular Go pointer types, this includes map, channel, and // In addition to regular Go pointer types, this includes map, channel, and
// function types and unsafe.Pointer. It does not include array or struct types // function types and unsafe.Pointer. It does not include array or struct types
@ -1398,14 +1403,9 @@ func (t *Type) IsUntyped() bool {
return false return false
} }
// TODO(austin): We probably only need HasHeapPointer. See // HasPointers reports whether t contains a heap pointer.
// golang.org/cl/73412 for discussion. // Note that this function ignores pointers to go:notinheap types.
func (t *Type) HasPointers() bool {
func Haspointers(t *Type) bool {
return Haspointers1(t, false)
}
func Haspointers1(t *Type, ignoreNotInHeap bool) bool {
switch t.Etype { switch t.Etype {
case TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, case TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64,
TUINT64, TUINTPTR, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TBOOL, TSSA: TUINT64, TUINTPTR, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TBOOL, TSSA:
@ -1415,34 +1415,27 @@ func Haspointers1(t *Type, ignoreNotInHeap bool) bool {
if t.NumElem() == 0 { // empty array has no pointers if t.NumElem() == 0 { // empty array has no pointers
return false return false
} }
return Haspointers1(t.Elem(), ignoreNotInHeap) return t.Elem().HasPointers()
case TSTRUCT: case TSTRUCT:
for _, t1 := range t.Fields().Slice() { for _, t1 := range t.Fields().Slice() {
if Haspointers1(t1.Type, ignoreNotInHeap) { if t1.Type.HasPointers() {
return true return true
} }
} }
return false return false
case TPTR, TSLICE: case TPTR, TSLICE:
return !(ignoreNotInHeap && t.Elem().NotInHeap()) return !t.Elem().NotInHeap()
case TTUPLE: case TTUPLE:
ttup := t.Extra.(*Tuple) ttup := t.Extra.(*Tuple)
return Haspointers1(ttup.first, ignoreNotInHeap) || Haspointers1(ttup.second, ignoreNotInHeap) return ttup.first.HasPointers() || ttup.second.HasPointers()
} }
return true return true
} }
// HasHeapPointer reports whether t contains a heap pointer.
// This is used for write barrier insertion, so it ignores
// pointers to go:notinheap types.
func (t *Type) HasHeapPointer() bool {
return Haspointers1(t, true)
}
func (t *Type) Symbol() *obj.LSym { func (t *Type) Symbol() *obj.LSym {
return TypeLinkSym(t) return TypeLinkSym(t)
} }
@ -1471,7 +1464,7 @@ func FakeRecvType() *Type {
} }
var ( var (
// TSSA types. Haspointers assumes these are pointer-free. // TSSA types. HasPointers assumes these are pointer-free.
TypeInvalid = newSSA("invalid") TypeInvalid = newSSA("invalid")
TypeMem = newSSA("mem") TypeMem = newSSA("mem")
TypeFlags = newSSA("flags") TypeFlags = newSSA("flags")

View file

@ -261,8 +261,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
n.To.Reg = x86.REG_DX n.To.Reg = x86.REG_DX
} }
j.To.Val = n j.To.SetTarget(n)
j2.To.Val = s.Pc() j2.To.SetTarget(s.Pc())
} }
case ssa.Op386HMULL, ssa.Op386HMULLU: case ssa.Op386HMULL, ssa.Op386HMULLU:

View file

@ -1106,8 +1106,9 @@ func (t *tester) cgoTest(dt *distTest) error {
cmd := t.addCmd(dt, "misc/cgo/test", t.goTest()) cmd := t.addCmd(dt, "misc/cgo/test", t.goTest())
cmd.Env = append(os.Environ(), "GOFLAGS=-ldflags=-linkmode=external") cmd.Env = append(os.Environ(), "GOFLAGS=-ldflags=-linkmode=external")
// A -g argument in CGO_CFLAGS should not affect how the test runs. // cgo should be able to cope with both -g arguments and colored
cmd.Env = append(cmd.Env, "CGO_CFLAGS=-g0") // diagnostics.
cmd.Env = append(cmd.Env, "CGO_CFLAGS=-g0 -fdiagnostics-color")
t.addCmd(dt, "misc/cgo/testtls", t.goTest(), "-ldflags", "-linkmode=auto") t.addCmd(dt, "misc/cgo/testtls", t.goTest(), "-ldflags", "-linkmode=auto")
t.addCmd(dt, "misc/cgo/testtls", t.goTest(), "-ldflags", "-linkmode=external") t.addCmd(dt, "misc/cgo/testtls", t.goTest(), "-ldflags", "-linkmode=external")

View file

@ -7,13 +7,9 @@ package main
import ( import (
"fmt" "fmt"
"go/ast" "go/ast"
"go/parser"
"go/token" "go/token"
"os"
"path" "path"
"reflect"
"strconv" "strconv"
"strings"
) )
type fix struct { type fix struct {
@ -323,160 +319,12 @@ func declImports(gen *ast.GenDecl, path string) bool {
return false return false
} }
// isPkgDot reports whether t is the expression "pkg.name"
// where pkg is an imported identifier.
func isPkgDot(t ast.Expr, pkg, name string) bool {
sel, ok := t.(*ast.SelectorExpr)
return ok && isTopName(sel.X, pkg) && sel.Sel.String() == name
}
// isPtrPkgDot reports whether f is the expression "*pkg.name"
// where pkg is an imported identifier.
func isPtrPkgDot(t ast.Expr, pkg, name string) bool {
ptr, ok := t.(*ast.StarExpr)
return ok && isPkgDot(ptr.X, pkg, name)
}
// isTopName reports whether n is a top-level unresolved identifier with the given name. // isTopName reports whether n is a top-level unresolved identifier with the given name.
func isTopName(n ast.Expr, name string) bool { func isTopName(n ast.Expr, name string) bool {
id, ok := n.(*ast.Ident) id, ok := n.(*ast.Ident)
return ok && id.Name == name && id.Obj == nil return ok && id.Name == name && id.Obj == nil
} }
// isName reports whether n is an identifier with the given name.
func isName(n ast.Expr, name string) bool {
id, ok := n.(*ast.Ident)
return ok && id.String() == name
}
// isCall reports whether t is a call to pkg.name.
func isCall(t ast.Expr, pkg, name string) bool {
call, ok := t.(*ast.CallExpr)
return ok && isPkgDot(call.Fun, pkg, name)
}
// If n is an *ast.Ident, isIdent returns it; otherwise isIdent returns nil.
func isIdent(n interface{}) *ast.Ident {
id, _ := n.(*ast.Ident)
return id
}
// refersTo reports whether n is a reference to the same object as x.
func refersTo(n ast.Node, x *ast.Ident) bool {
id, ok := n.(*ast.Ident)
// The test of id.Name == x.Name handles top-level unresolved
// identifiers, which all have Obj == nil.
return ok && id.Obj == x.Obj && id.Name == x.Name
}
// isBlank reports whether n is the blank identifier.
func isBlank(n ast.Expr) bool {
return isName(n, "_")
}
// isEmptyString reports whether n is an empty string literal.
func isEmptyString(n ast.Expr) bool {
lit, ok := n.(*ast.BasicLit)
return ok && lit.Kind == token.STRING && len(lit.Value) == 2
}
func warn(pos token.Pos, msg string, args ...interface{}) {
if pos.IsValid() {
msg = "%s: " + msg
arg1 := []interface{}{fset.Position(pos).String()}
args = append(arg1, args...)
}
fmt.Fprintf(os.Stderr, msg+"\n", args...)
}
// countUses returns the number of uses of the identifier x in scope.
func countUses(x *ast.Ident, scope []ast.Stmt) int {
count := 0
ff := func(n interface{}) {
if n, ok := n.(ast.Node); ok && refersTo(n, x) {
count++
}
}
for _, n := range scope {
walk(n, ff)
}
return count
}
// rewriteUses replaces all uses of the identifier x and !x in scope
// with f(x.Pos()) and fnot(x.Pos()).
func rewriteUses(x *ast.Ident, f, fnot func(token.Pos) ast.Expr, scope []ast.Stmt) {
var lastF ast.Expr
ff := func(n interface{}) {
ptr, ok := n.(*ast.Expr)
if !ok {
return
}
nn := *ptr
// The child node was just walked and possibly replaced.
// If it was replaced and this is a negation, replace with fnot(p).
not, ok := nn.(*ast.UnaryExpr)
if ok && not.Op == token.NOT && not.X == lastF {
*ptr = fnot(nn.Pos())
return
}
if refersTo(nn, x) {
lastF = f(nn.Pos())
*ptr = lastF
}
}
for _, n := range scope {
walk(n, ff)
}
}
// assignsTo reports whether any of the code in scope assigns to or takes the address of x.
func assignsTo(x *ast.Ident, scope []ast.Stmt) bool {
assigned := false
ff := func(n interface{}) {
if assigned {
return
}
switch n := n.(type) {
case *ast.UnaryExpr:
// use of &x
if n.Op == token.AND && refersTo(n.X, x) {
assigned = true
return
}
case *ast.AssignStmt:
for _, l := range n.Lhs {
if refersTo(l, x) {
assigned = true
return
}
}
}
}
for _, n := range scope {
if assigned {
break
}
walk(n, ff)
}
return assigned
}
// newPkgDot returns an ast.Expr referring to "pkg.name" at position pos.
func newPkgDot(pos token.Pos, pkg, name string) ast.Expr {
return &ast.SelectorExpr{
X: &ast.Ident{
NamePos: pos,
Name: pkg,
},
Sel: &ast.Ident{
NamePos: pos,
Name: name,
},
}
}
// renameTop renames all references to the top-level name old. // renameTop renames all references to the top-level name old.
// It reports whether it makes any changes. // It reports whether it makes any changes.
func renameTop(f *ast.File, old, new string) bool { func renameTop(f *ast.File, old, new string) bool {
@ -707,143 +555,3 @@ func rewriteImport(f *ast.File, oldPath, newPath string) (rewrote bool) {
} }
return return
} }
func usesImport(f *ast.File, path string) (used bool) {
spec := importSpec(f, path)
if spec == nil {
return
}
name := spec.Name.String()
switch name {
case "<nil>":
// If the package name is not explicitly specified,
// make an educated guess. This is not guaranteed to be correct.
lastSlash := strings.LastIndex(path, "/")
if lastSlash == -1 {
name = path
} else {
name = path[lastSlash+1:]
}
case "_", ".":
// Not sure if this import is used - err on the side of caution.
return true
}
walk(f, func(n interface{}) {
sel, ok := n.(*ast.SelectorExpr)
if ok && isTopName(sel.X, name) {
used = true
}
})
return
}
func expr(s string) ast.Expr {
x, err := parser.ParseExpr(s)
if err != nil {
panic("parsing " + s + ": " + err.Error())
}
// Remove position information to avoid spurious newlines.
killPos(reflect.ValueOf(x))
return x
}
var posType = reflect.TypeOf(token.Pos(0))
func killPos(v reflect.Value) {
switch v.Kind() {
case reflect.Ptr, reflect.Interface:
if !v.IsNil() {
killPos(v.Elem())
}
case reflect.Slice:
n := v.Len()
for i := 0; i < n; i++ {
killPos(v.Index(i))
}
case reflect.Struct:
n := v.NumField()
for i := 0; i < n; i++ {
f := v.Field(i)
if f.Type() == posType {
f.SetInt(0)
continue
}
killPos(f)
}
}
}
// A Rename describes a single renaming.
type rename struct {
OldImport string // only apply rename if this import is present
NewImport string // add this import during rewrite
Old string // old name: p.T or *p.T
New string // new name: p.T or *p.T
}
func renameFix(tab []rename) func(*ast.File) bool {
return func(f *ast.File) bool {
return renameFixTab(f, tab)
}
}
func parseName(s string) (ptr bool, pkg, nam string) {
i := strings.Index(s, ".")
if i < 0 {
panic("parseName: invalid name " + s)
}
if strings.HasPrefix(s, "*") {
ptr = true
s = s[1:]
i--
}
pkg = s[:i]
nam = s[i+1:]
return
}
func renameFixTab(f *ast.File, tab []rename) bool {
fixed := false
added := map[string]bool{}
check := map[string]bool{}
for _, t := range tab {
if !imports(f, t.OldImport) {
continue
}
optr, opkg, onam := parseName(t.Old)
walk(f, func(n interface{}) {
np, ok := n.(*ast.Expr)
if !ok {
return
}
x := *np
if optr {
p, ok := x.(*ast.StarExpr)
if !ok {
return
}
x = p.X
}
if !isPkgDot(x, opkg, onam) {
return
}
if t.NewImport != "" && !added[t.NewImport] {
addImport(f, t.NewImport)
added[t.NewImport] = true
}
*np = expr(t.New)
check[t.OldImport] = true
fixed = true
})
}
for ipath := range check {
if !usesImport(f, ipath) {
deleteImport(f, ipath)
}
}
return fixed
}

View file

@ -7,8 +7,8 @@ require (
github.com/ianlancetaylor/demangle v0.0.0-20200414190113-039b1ae3a340 // indirect github.com/ianlancetaylor/demangle v0.0.0-20200414190113-039b1ae3a340 // indirect
golang.org/x/arch v0.0.0-20200511175325-f7c78586839d golang.org/x/arch v0.0.0-20200511175325-f7c78586839d
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
golang.org/x/mod v0.3.1-0.20200625141748-0b26df4a2231 golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449
golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3 // indirect golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3 // indirect
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 golang.org/x/tools v0.0.0-20200901153117-6e59e24738da
golang.org/x/xerrors v0.0.0-20200806184451-1a77d5e9f316 // indirect golang.org/x/xerrors v0.0.0-20200806184451-1a77d5e9f316 // indirect
) )

View file

@ -6,34 +6,34 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200414190113-039b1ae3a340 h1:S1+yTUaFPXuDZnPDbO+TrDFIjPzQraYH8/CwSlu9Fac= github.com/ianlancetaylor/demangle v0.0.0-20200414190113-039b1ae3a340 h1:S1+yTUaFPXuDZnPDbO+TrDFIjPzQraYH8/CwSlu9Fac=
github.com/ianlancetaylor/demangle v0.0.0-20200414190113-039b1ae3a340/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200414190113-039b1ae3a340/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/arch v0.0.0-20200511175325-f7c78586839d h1:YvwchuJby5xEAPdBGmdAVSiVME50C+RJfJJwJJsGEV8= golang.org/x/arch v0.0.0-20200511175325-f7c78586839d h1:YvwchuJby5xEAPdBGmdAVSiVME50C+RJfJJwJJsGEV8=
golang.org/x/arch v0.0.0-20200511175325-f7c78586839d/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4= golang.org/x/arch v0.0.0-20200511175325-f7c78586839d/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.1-0.20200625141748-0b26df4a2231 h1:R11LxkoUvECaAHdM5/ZOevSR7n+016EgTw8nbE1l+XM= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449 h1:xUIPaMhvROX9dhPvRCenIJtU78+lbEenGbgqB5hfHCQ=
golang.org/x/mod v0.3.1-0.20200625141748-0b26df4a2231/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3 h1:5B6i6EAiSYyejWfvc5Rc9BbI3rzIsrrXfAQBWnYfn+w= golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3 h1:5B6i6EAiSYyejWfvc5Rc9BbI3rzIsrrXfAQBWnYfn+w=
golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 h1:HHeAlu5H9b71C+Fx0K+1dGgVFN1DM1/wz4aoGOA5qS8= golang.org/x/tools v0.0.0-20200901153117-6e59e24738da h1:8nFbt74voFOsM+Hb5XtF+1SNbbf3dzikH5osZO1hyyo=
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200901153117-6e59e24738da/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200806184451-1a77d5e9f316 h1:Jhw4VC65LaKnpq9FvcK+a8ZzrFm3D+UygvMMrhkOw70= golang.org/x/xerrors v0.0.0-20200806184451-1a77d5e9f316 h1:Jhw4VC65LaKnpq9FvcK+a8ZzrFm3D+UygvMMrhkOw70=
golang.org/x/xerrors v0.0.0-20200806184451-1a77d5e9f316/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200806184451-1a77d5e9f316/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=

View file

@ -916,6 +916,7 @@
// Dir string // directory holding files for this module, if any // Dir string // directory holding files for this module, if any
// GoMod string // path to go.mod file used when loading this module, if any // GoMod string // path to go.mod file used when loading this module, if any
// GoVersion string // go version used in module // GoVersion string // go version used in module
// Retracted string // retraction information, if any (with -retracted or -u)
// Error *ModuleError // error loading module // Error *ModuleError // error loading module
// } // }
// //
@ -947,14 +948,16 @@
// The -u flag adds information about available upgrades. // The -u flag adds information about available upgrades.
// When the latest version of a given module is newer than // When the latest version of a given module is newer than
// the current one, list -u sets the Module's Update field // the current one, list -u sets the Module's Update field
// to information about the newer module. // to information about the newer module. list -u will also set
// the module's Retracted field if the current version is retracted.
// The Module's String method indicates an available upgrade by // The Module's String method indicates an available upgrade by
// formatting the newer version in brackets after the current version. // formatting the newer version in brackets after the current version.
// If a version is retracted, the string "(retracted)" will follow it.
// For example, 'go list -m -u all' might print: // For example, 'go list -m -u all' might print:
// //
// my/main/module // my/main/module
// golang.org/x/text v0.3.0 [v0.4.0] => /tmp/text // golang.org/x/text v0.3.0 [v0.4.0] => /tmp/text
// rsc.io/pdf v0.1.1 [v0.1.2] // rsc.io/pdf v0.1.1 (retracted) [v0.1.2]
// //
// (For tools, 'go list -m -u -json all' may be more convenient to parse.) // (For tools, 'go list -m -u -json all' may be more convenient to parse.)
// //
@ -964,6 +967,14 @@
// the default output format to display the module path followed by the // the default output format to display the module path followed by the
// space-separated version list. // space-separated version list.
// //
// The -retracted flag causes list to report information about retracted
// module versions. When -retracted is used with -f or -json, the Retracted
// field will be set to a string explaining why the version was retracted.
// The string is taken from comments on the retract directive in the
// module's go.mod file. When -retracted is used with -versions, retracted
// versions are listed together with unretracted versions. The -retracted
// flag may be used with or without -m.
//
// The arguments to list -m are interpreted as a list of modules, not packages. // The arguments to list -m are interpreted as a list of modules, not packages.
// The main module is the module containing the current directory. // The main module is the module containing the current directory.
// The active modules are the main module and its dependencies. // The active modules are the main module and its dependencies.
@ -1100,9 +1111,14 @@
// module path and version pair. If the @v is omitted, a replacement without // module path and version pair. If the @v is omitted, a replacement without
// a version on the left side is dropped. // a version on the left side is dropped.
// //
// The -retract=version and -dropretract=version flags add and drop a
// retraction on the given version. The version may be a single version
// like "v1.2.3" or a closed interval like "[v1.1.0-v1.1.9]". Note that
// -retract=version is a no-op if that retraction already exists.
//
// The -require, -droprequire, -exclude, -dropexclude, -replace, // The -require, -droprequire, -exclude, -dropexclude, -replace,
// and -dropreplace editing flags may be repeated, and the changes // -dropreplace, -retract, and -dropretract editing flags may be repeated,
// are applied in the order given. // and the changes are applied in the order given.
// //
// The -go=version flag sets the expected Go language version. // The -go=version flag sets the expected Go language version.
// //
@ -1136,6 +1152,15 @@
// New Module // New Module
// } // }
// //
// type Retract struct {
// Low string
// High string
// Rationale string
// }
//
// Retract entries representing a single version (not an interval) will have
// the "Low" and "High" fields set to the same value.
//
// Note that this only describes the go.mod file itself, not other modules // Note that this only describes the go.mod file itself, not other modules
// referred to indirectly. For the full set of modules available to a build, // referred to indirectly. For the full set of modules available to a build,
// use 'go list -m -json all'. // use 'go list -m -json all'.
@ -1894,15 +1919,17 @@
// require new/thing/v2 v2.3.4 // require new/thing/v2 v2.3.4
// exclude old/thing v1.2.3 // exclude old/thing v1.2.3
// replace bad/thing v1.4.5 => good/thing v1.4.5 // replace bad/thing v1.4.5 => good/thing v1.4.5
// retract v1.5.6
// //
// The verbs are // The verbs are
// module, to define the module path; // module, to define the module path;
// go, to set the expected language version; // go, to set the expected language version;
// require, to require a particular module at a given version or later; // require, to require a particular module at a given version or later;
// exclude, to exclude a particular module version from use; and // exclude, to exclude a particular module version from use;
// replace, to replace a module version with a different module version. // replace, to replace a module version with a different module version; and
// retract, to indicate a previously released version should not be used.
// Exclude and replace apply only in the main module's go.mod and are ignored // Exclude and replace apply only in the main module's go.mod and are ignored
// in dependencies. See https://research.swtch.com/vgo-mvs for details. // in dependencies. See https://golang.org/ref/mod for details.
// //
// The leading verb can be factored out of adjacent lines to create a block, // The leading verb can be factored out of adjacent lines to create a block,
// like in Go imports: // like in Go imports:
@ -2145,7 +2172,10 @@
// before resolving dependencies or building the code. // before resolving dependencies or building the code.
// //
// The -insecure flag permits fetching from repositories and resolving // The -insecure flag permits fetching from repositories and resolving
// custom domains using insecure schemes such as HTTP. Use with caution. // custom domains using insecure schemes such as HTTP. Use with caution. The
// GOINSECURE environment variable is usually a better alternative, since it
// provides control over which modules may be retrieved using an insecure scheme.
// See 'go help environment' for details.
// //
// The -t flag instructs get to also download the packages required to build // The -t flag instructs get to also download the packages required to build
// the tests for the specified packages. // the tests for the specified packages.

View file

@ -28,13 +28,42 @@ func (v *StringsFlag) String() string {
return "<StringsFlag>" return "<StringsFlag>"
} }
// explicitStringFlag is like a regular string flag, but it also tracks whether
// the string was set explicitly to a non-empty value.
type explicitStringFlag struct {
value *string
explicit *bool
}
func (f explicitStringFlag) String() string {
if f.value == nil {
return ""
}
return *f.value
}
func (f explicitStringFlag) Set(v string) error {
*f.value = v
if v != "" {
*f.explicit = true
}
return nil
}
// AddBuildFlagsNX adds the -n and -x build flags to the flag set. // AddBuildFlagsNX adds the -n and -x build flags to the flag set.
func AddBuildFlagsNX(flags *flag.FlagSet) { func AddBuildFlagsNX(flags *flag.FlagSet) {
flags.BoolVar(&cfg.BuildN, "n", false, "") flags.BoolVar(&cfg.BuildN, "n", false, "")
flags.BoolVar(&cfg.BuildX, "x", false, "") flags.BoolVar(&cfg.BuildX, "x", false, "")
} }
// AddLoadFlags adds the -mod build flag to the flag set. // AddModFlag adds the -mod build flag to the flag set.
func AddLoadFlags(flags *flag.FlagSet) { func AddModFlag(flags *flag.FlagSet) {
flags.StringVar(&cfg.BuildMod, "mod", "", "") flags.Var(explicitStringFlag{value: &cfg.BuildMod, explicit: &cfg.BuildModExplicit}, "mod", "")
}
// AddModCommonFlags adds the module-related flags common to build commands
// and 'go mod' subcommands.
func AddModCommonFlags(flags *flag.FlagSet) {
flags.BoolVar(&cfg.ModCacheRW, "modcacherw", false, "")
flags.StringVar(&cfg.ModFile, "modfile", "", "")
} }

View file

@ -27,7 +27,8 @@ var (
BuildBuildmode string // -buildmode flag BuildBuildmode string // -buildmode flag
BuildContext = defaultContext() BuildContext = defaultContext()
BuildMod string // -mod flag BuildMod string // -mod flag
BuildModReason string // reason -mod flag is set, if set by default BuildModExplicit bool // whether -mod was set explicitly
BuildModReason string // reason -mod was set, if set by default
BuildI bool // -i flag BuildI bool // -i flag
BuildLinkshared bool // -linkshared flag BuildLinkshared bool // -linkshared flag
BuildMSan bool // -msan flag BuildMSan bool // -msan flag

View file

@ -23,7 +23,8 @@ import (
func init() { func init() {
base.AddBuildFlagsNX(&CmdFmt.Flag) base.AddBuildFlagsNX(&CmdFmt.Flag)
base.AddLoadFlags(&CmdFmt.Flag) base.AddModFlag(&CmdFmt.Flag)
base.AddModCommonFlags(&CmdFmt.Flag)
} }
var CmdFmt = &base.Command{ var CmdFmt = &base.Command{

View file

@ -20,6 +20,8 @@ import (
"cmd/go/internal/str" "cmd/go/internal/str"
"cmd/go/internal/web" "cmd/go/internal/web"
"cmd/go/internal/work" "cmd/go/internal/work"
"golang.org/x/mod/module"
) )
var CmdGet = &base.Command{ var CmdGet = &base.Command{
@ -41,7 +43,10 @@ The -fix flag instructs get to run the fix tool on the downloaded packages
before resolving dependencies or building the code. before resolving dependencies or building the code.
The -insecure flag permits fetching from repositories and resolving The -insecure flag permits fetching from repositories and resolving
custom domains using insecure schemes such as HTTP. Use with caution. custom domains using insecure schemes such as HTTP. Use with caution. The
GOINSECURE environment variable is usually a better alternative, since it
provides control over which modules may be retrieved using an insecure scheme.
See 'go help environment' for details.
The -t flag instructs get to also download the packages required to build The -t flag instructs get to also download the packages required to build
the tests for the specified packages. the tests for the specified packages.
@ -409,11 +414,6 @@ func downloadPackage(p *load.Package) error {
blindRepo bool // set if the repo has unusual configuration blindRepo bool // set if the repo has unusual configuration
) )
security := web.SecureOnly
if Insecure {
security = web.Insecure
}
// p can be either a real package, or a pseudo-package whose “import path” is // p can be either a real package, or a pseudo-package whose “import path” is
// actually a wildcard pattern. // actually a wildcard pattern.
// Trim the path at the element containing the first wildcard, // Trim the path at the element containing the first wildcard,
@ -427,9 +427,13 @@ func downloadPackage(p *load.Package) error {
} }
importPrefix = importPrefix[:slash] importPrefix = importPrefix[:slash]
} }
if err := CheckImportPath(importPrefix); err != nil { if err := module.CheckImportPath(importPrefix); err != nil {
return fmt.Errorf("%s: invalid import path: %v", p.ImportPath, err) return fmt.Errorf("%s: invalid import path: %v", p.ImportPath, err)
} }
security := web.SecureOnly
if Insecure || module.MatchPrefixPatterns(cfg.GOINSECURE, importPrefix) {
security = web.Insecure
}
if p.Internal.Build.SrcRoot != "" { if p.Internal.Build.SrcRoot != "" {
// Directory exists. Look for checkout along path to src. // Directory exists. Look for checkout along path to src.
@ -473,7 +477,7 @@ func downloadPackage(p *load.Package) error {
} }
vcs, repo, rootPath = rr.vcs, rr.Repo, rr.Root vcs, repo, rootPath = rr.vcs, rr.Repo, rr.Root
} }
if !blindRepo && !vcs.isSecure(repo) && !Insecure { if !blindRepo && !vcs.isSecure(repo) && security != web.Insecure {
return fmt.Errorf("cannot download, %v uses insecure protocol", repo) return fmt.Errorf("cannot download, %v uses insecure protocol", repo)
} }

View file

@ -1,192 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package get
import (
"fmt"
"strings"
"unicode"
"unicode/utf8"
)
// The following functions are copied verbatim from golang.org/x/mod/module/module.go,
// with a change to additionally reject Windows short-names,
// and one to accept arbitrary letters (golang.org/issue/29101).
//
// TODO(bcmills): After the call site for this function is backported,
// consolidate this back down to a single copy.
//
// NOTE: DO NOT MERGE THESE UNTIL WE DECIDE ABOUT ARBITRARY LETTERS IN MODULE MODE.
// CheckImportPath checks that an import path is valid.
func CheckImportPath(path string) error {
if err := checkPath(path, false); err != nil {
return fmt.Errorf("malformed import path %q: %v", path, err)
}
return nil
}
// checkPath checks that a general path is valid.
// It returns an error describing why but not mentioning path.
// Because these checks apply to both module paths and import paths,
// the caller is expected to add the "malformed ___ path %q: " prefix.
// fileName indicates whether the final element of the path is a file name
// (as opposed to a directory name).
func checkPath(path string, fileName bool) error {
if !utf8.ValidString(path) {
return fmt.Errorf("invalid UTF-8")
}
if path == "" {
return fmt.Errorf("empty string")
}
if path[0] == '-' {
return fmt.Errorf("leading dash")
}
if strings.Contains(path, "//") {
return fmt.Errorf("double slash")
}
if path[len(path)-1] == '/' {
return fmt.Errorf("trailing slash")
}
elemStart := 0
for i, r := range path {
if r == '/' {
if err := checkElem(path[elemStart:i], fileName); err != nil {
return err
}
elemStart = i + 1
}
}
if err := checkElem(path[elemStart:], fileName); err != nil {
return err
}
return nil
}
// checkElem checks whether an individual path element is valid.
// fileName indicates whether the element is a file name (not a directory name).
func checkElem(elem string, fileName bool) error {
if elem == "" {
return fmt.Errorf("empty path element")
}
if strings.Count(elem, ".") == len(elem) {
return fmt.Errorf("invalid path element %q", elem)
}
if elem[0] == '.' && !fileName {
return fmt.Errorf("leading dot in path element")
}
if elem[len(elem)-1] == '.' {
return fmt.Errorf("trailing dot in path element")
}
charOK := pathOK
if fileName {
charOK = fileNameOK
}
for _, r := range elem {
if !charOK(r) {
return fmt.Errorf("invalid char %q", r)
}
}
// Windows disallows a bunch of path elements, sadly.
// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
short := elem
if i := strings.Index(short, "."); i >= 0 {
short = short[:i]
}
for _, bad := range badWindowsNames {
if strings.EqualFold(bad, short) {
return fmt.Errorf("disallowed path element %q", elem)
}
}
// Reject path components that look like Windows short-names.
// Those usually end in a tilde followed by one or more ASCII digits.
if tilde := strings.LastIndexByte(short, '~'); tilde >= 0 && tilde < len(short)-1 {
suffix := short[tilde+1:]
suffixIsDigits := true
for _, r := range suffix {
if r < '0' || r > '9' {
suffixIsDigits = false
break
}
}
if suffixIsDigits {
return fmt.Errorf("trailing tilde and digits in path element")
}
}
return nil
}
// pathOK reports whether r can appear in an import path element.
//
// NOTE: This function DIVERGES from module mode pathOK by accepting Unicode letters.
func pathOK(r rune) bool {
if r < utf8.RuneSelf {
return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' ||
'0' <= r && r <= '9' ||
'A' <= r && r <= 'Z' ||
'a' <= r && r <= 'z'
}
return unicode.IsLetter(r)
}
// fileNameOK reports whether r can appear in a file name.
// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters.
// If we expand the set of allowed characters here, we have to
// work harder at detecting potential case-folding and normalization collisions.
// See note about "safe encoding" below.
func fileNameOK(r rune) bool {
if r < utf8.RuneSelf {
// Entire set of ASCII punctuation, from which we remove characters:
// ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~
// We disallow some shell special characters: " ' * < > ? ` |
// (Note that some of those are disallowed by the Windows file system as well.)
// We also disallow path separators / : and \ (fileNameOK is only called on path element characters).
// We allow spaces (U+0020) in file names.
const allowed = "!#$%&()+,-.=@[]^_{}~ "
if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' {
return true
}
for i := 0; i < len(allowed); i++ {
if rune(allowed[i]) == r {
return true
}
}
return false
}
// It may be OK to add more ASCII punctuation here, but only carefully.
// For example Windows disallows < > \, and macOS disallows :, so we must not allow those.
return unicode.IsLetter(r)
}
// badWindowsNames are the reserved file path elements on Windows.
// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
var badWindowsNames = []string{
"CON",
"PRN",
"AUX",
"NUL",
"COM1",
"COM2",
"COM3",
"COM4",
"COM5",
"COM6",
"COM7",
"COM8",
"COM9",
"LPT1",
"LPT2",
"LPT3",
"LPT4",
"LPT5",
"LPT6",
"LPT7",
"LPT8",
"LPT9",
}

View file

@ -1027,7 +1027,7 @@ var vcsPaths = []*vcsPath{
// Github // Github
{ {
prefix: "github.com/", prefix: "github.com/",
regexp: lazyregexp.New(`^(?P<root>github\.com/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(/[\p{L}0-9_.\-]+)*$`), regexp: lazyregexp.New(`^(?P<root>github\.com/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(/[A-Za-z0-9_.\-]+)*$`),
vcs: "git", vcs: "git",
repo: "https://{root}", repo: "https://{root}",
check: noVCSSuffix, check: noVCSSuffix,

View file

@ -32,13 +32,10 @@ func TestRepoRootForImportPath(t *testing.T) {
Repo: "https://github.com/golang/groupcache", Repo: "https://github.com/golang/groupcache",
}, },
}, },
// Unicode letters in directories (issue 18660). // Unicode letters in directories are not valid.
{ {
"github.com/user/unicode/испытание", "github.com/user/unicode/испытание",
&RepoRoot{ nil,
vcs: vcsGit,
Repo: "https://github.com/user/unicode",
},
}, },
// IBM DevOps Services tests // IBM DevOps Services tests
{ {

View file

@ -10,6 +10,7 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"os" "os"
"sort" "sort"
@ -215,6 +216,7 @@ applied to a Go struct, but now a Module struct:
Dir string // directory holding files for this module, if any Dir string // directory holding files for this module, if any
GoMod string // path to go.mod file used when loading this module, if any GoMod string // path to go.mod file used when loading this module, if any
GoVersion string // go version used in module GoVersion string // go version used in module
Retracted string // retraction information, if any (with -retracted or -u)
Error *ModuleError // error loading module Error *ModuleError // error loading module
} }
@ -246,14 +248,16 @@ the replaced source code.)
The -u flag adds information about available upgrades. The -u flag adds information about available upgrades.
When the latest version of a given module is newer than When the latest version of a given module is newer than
the current one, list -u sets the Module's Update field the current one, list -u sets the Module's Update field
to information about the newer module. to information about the newer module. list -u will also set
the module's Retracted field if the current version is retracted.
The Module's String method indicates an available upgrade by The Module's String method indicates an available upgrade by
formatting the newer version in brackets after the current version. formatting the newer version in brackets after the current version.
If a version is retracted, the string "(retracted)" will follow it.
For example, 'go list -m -u all' might print: For example, 'go list -m -u all' might print:
my/main/module my/main/module
golang.org/x/text v0.3.0 [v0.4.0] => /tmp/text golang.org/x/text v0.3.0 [v0.4.0] => /tmp/text
rsc.io/pdf v0.1.1 [v0.1.2] rsc.io/pdf v0.1.1 (retracted) [v0.1.2]
(For tools, 'go list -m -u -json all' may be more convenient to parse.) (For tools, 'go list -m -u -json all' may be more convenient to parse.)
@ -263,6 +267,14 @@ to semantic versioning, earliest to latest. The flag also changes
the default output format to display the module path followed by the the default output format to display the module path followed by the
space-separated version list. space-separated version list.
The -retracted flag causes list to report information about retracted
module versions. When -retracted is used with -f or -json, the Retracted
field will be set to a string explaining why the version was retracted.
The string is taken from comments on the retract directive in the
module's go.mod file. When -retracted is used with -versions, retracted
versions are listed together with unretracted versions. The -retracted
flag may be used with or without -m.
The arguments to list -m are interpreted as a list of modules, not packages. The arguments to list -m are interpreted as a list of modules, not packages.
The main module is the module containing the current directory. The main module is the module containing the current directory.
The active modules are the main module and its dependencies. The active modules are the main module and its dependencies.
@ -296,17 +308,18 @@ func init() {
} }
var ( var (
listCompiled = CmdList.Flag.Bool("compiled", false, "") listCompiled = CmdList.Flag.Bool("compiled", false, "")
listDeps = CmdList.Flag.Bool("deps", false, "") listDeps = CmdList.Flag.Bool("deps", false, "")
listE = CmdList.Flag.Bool("e", false, "") listE = CmdList.Flag.Bool("e", false, "")
listExport = CmdList.Flag.Bool("export", false, "") listExport = CmdList.Flag.Bool("export", false, "")
listFmt = CmdList.Flag.String("f", "", "") listFmt = CmdList.Flag.String("f", "", "")
listFind = CmdList.Flag.Bool("find", false, "") listFind = CmdList.Flag.Bool("find", false, "")
listJson = CmdList.Flag.Bool("json", false, "") listJson = CmdList.Flag.Bool("json", false, "")
listM = CmdList.Flag.Bool("m", false, "") listM = CmdList.Flag.Bool("m", false, "")
listU = CmdList.Flag.Bool("u", false, "") listRetracted = CmdList.Flag.Bool("retracted", false, "")
listTest = CmdList.Flag.Bool("test", false, "") listTest = CmdList.Flag.Bool("test", false, "")
listVersions = CmdList.Flag.Bool("versions", false, "") listU = CmdList.Flag.Bool("u", false, "")
listVersions = CmdList.Flag.Bool("versions", false, "")
) )
var nl = []byte{'\n'} var nl = []byte{'\n'}
@ -367,6 +380,16 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
} }
} }
modload.Init()
if *listRetracted {
if cfg.BuildMod == "vendor" {
base.Fatalf("go list -retracted cannot be used when vendoring is enabled")
}
if !modload.Enabled() {
base.Fatalf("go list -retracted can only be used in module-aware mode")
}
}
if *listM { if *listM {
// Module mode. // Module mode.
if *listCompiled { if *listCompiled {
@ -414,9 +437,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
} }
} }
modload.LoadBuildList(ctx) mods := modload.ListModules(ctx, args, *listU, *listVersions, *listRetracted)
mods := modload.ListModules(ctx, args, *listU, *listVersions)
if !*listE { if !*listE {
for _, m := range mods { for _, m := range mods {
if m.Error != nil { if m.Error != nil {
@ -522,7 +543,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
// Note that -deps is applied after -test, // Note that -deps is applied after -test,
// so that you only get descriptions of tests for the things named // so that you only get descriptions of tests for the things named
// explicitly on the command line, not for all dependencies. // explicitly on the command line, not for all dependencies.
pkgs = load.PackageList(pkgs) pkgs = loadPackageList(pkgs)
} }
// Do we need to run a build to gather information? // Do we need to run a build to gather information?
@ -557,7 +578,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
if *listTest { if *listTest {
all := pkgs all := pkgs
if !*listDeps { if !*listDeps {
all = load.PackageList(pkgs) all = loadPackageList(pkgs)
} }
// Update import paths to distinguish the real package p // Update import paths to distinguish the real package p
// from p recompiled for q.test. // from p recompiled for q.test.
@ -607,6 +628,55 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
} }
} }
// TODO(golang.org/issue/40676): This mechanism could be extended to support
// -u without -m.
if *listRetracted {
// Load retractions for modules that provide packages that will be printed.
// TODO(golang.org/issue/40775): Packages from the same module refer to
// distinct ModulePublic instance. It would be nice if they could all point
// to the same instance. This would require additional global state in
// modload.loaded, so that should be refactored first. For now, we update
// all instances.
modToArg := make(map[*modinfo.ModulePublic]string)
argToMods := make(map[string][]*modinfo.ModulePublic)
var args []string
addModule := func(mod *modinfo.ModulePublic) {
if mod.Version == "" {
return
}
arg := fmt.Sprintf("%s@%s", mod.Path, mod.Version)
if argToMods[arg] == nil {
args = append(args, arg)
}
argToMods[arg] = append(argToMods[arg], mod)
modToArg[mod] = arg
}
for _, p := range pkgs {
if p.Module == nil {
continue
}
addModule(p.Module)
if p.Module.Replace != nil {
addModule(p.Module.Replace)
}
}
if len(args) > 0 {
listU := false
listVersions := false
rmods := modload.ListModules(ctx, args, listU, listVersions, *listRetracted)
for i, arg := range args {
rmod := rmods[i]
for _, mod := range argToMods[arg] {
mod.Retracted = rmod.Retracted
if rmod.Error != nil && mod.Error == nil {
mod.Error = rmod.Error
}
}
}
}
}
// Record non-identity import mappings in p.ImportMap. // Record non-identity import mappings in p.ImportMap.
for _, p := range pkgs { for _, p := range pkgs {
for i, srcPath := range p.Internal.RawImports { for i, srcPath := range p.Internal.RawImports {
@ -625,6 +695,23 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
} }
} }
// loadPackageList is like load.PackageList, but prints error messages and exits
// with nonzero status if listE is not set and any package in the expanded list
// has errors.
func loadPackageList(roots []*load.Package) []*load.Package {
pkgs := load.PackageList(roots)
if !*listE {
for _, pkg := range pkgs {
if pkg.Error != nil {
base.Errorf("%v", pkg.Error)
}
}
}
return pkgs
}
// TrackingWriter tracks the last byte written on every write so // TrackingWriter tracks the last byte written on every write so
// we can avoid printing a newline if one was already written or // we can avoid printing a newline if one was already written or
// if there is no output at all. // if there is no output at all.

View file

@ -191,6 +191,7 @@ func TestPackagesAndErrors(ctx context.Context, p *Package, cover *TestCover) (p
GoFiles: p.XTestGoFiles, GoFiles: p.XTestGoFiles,
Imports: p.XTestImports, Imports: p.XTestImports,
ForTest: p.ImportPath, ForTest: p.ImportPath,
Module: p.Module,
Error: pxtestErr, Error: pxtestErr,
}, },
Internal: PackageInternal{ Internal: PackageInternal{
@ -222,6 +223,7 @@ func TestPackagesAndErrors(ctx context.Context, p *Package, cover *TestCover) (p
ImportPath: p.ImportPath + ".test", ImportPath: p.ImportPath + ".test",
Root: p.Root, Root: p.Root,
Imports: str.StringList(TestMainDeps), Imports: str.StringList(TestMainDeps),
Module: p.Module,
}, },
Internal: PackageInternal{ Internal: PackageInternal{
Build: &build.Package{Name: "main"}, Build: &build.Package{Name: "main"},

View file

@ -12,9 +12,8 @@ import (
"cmd/go/internal/base" "cmd/go/internal/base"
"cmd/go/internal/cfg" "cmd/go/internal/cfg"
"cmd/go/internal/modload"
"cmd/go/internal/modfetch" "cmd/go/internal/modfetch"
"cmd/go/internal/work" "cmd/go/internal/modload"
"golang.org/x/mod/module" "golang.org/x/mod/module"
) )
@ -64,7 +63,7 @@ func init() {
// TODO(jayconrod): https://golang.org/issue/35849 Apply -x to other 'go mod' commands. // TODO(jayconrod): https://golang.org/issue/35849 Apply -x to other 'go mod' commands.
cmdDownload.Flag.BoolVar(&cfg.BuildX, "x", false, "") cmdDownload.Flag.BoolVar(&cfg.BuildX, "x", false, "")
work.AddModCommonFlags(cmdDownload) base.AddModCommonFlags(&cmdDownload.Flag)
} }
type moduleJSON struct { type moduleJSON struct {
@ -136,9 +135,10 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
var mods []*moduleJSON var mods []*moduleJSON
listU := false listU := false
listVersions := false listVersions := false
listRetractions := false
type token struct{} type token struct{}
sem := make(chan token, runtime.GOMAXPROCS(0)) sem := make(chan token, runtime.GOMAXPROCS(0))
for _, info := range modload.ListModules(ctx, args, listU, listVersions) { for _, info := range modload.ListModules(ctx, args, listU, listVersions, listRetractions) {
if info.Replace != nil { if info.Replace != nil {
info = info.Replace info = info.Replace
} }
@ -187,4 +187,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
} }
base.ExitIfErrors() base.ExitIfErrors()
} }
// Update go.mod and especially go.sum if needed.
modload.WriteGoMod()
} }

View file

@ -19,7 +19,6 @@ import (
"cmd/go/internal/lockedfile" "cmd/go/internal/lockedfile"
"cmd/go/internal/modfetch" "cmd/go/internal/modfetch"
"cmd/go/internal/modload" "cmd/go/internal/modload"
"cmd/go/internal/work"
"golang.org/x/mod/modfile" "golang.org/x/mod/modfile"
"golang.org/x/mod/module" "golang.org/x/mod/module"
@ -68,9 +67,14 @@ The -dropreplace=old[@v] flag drops a replacement of the given
module path and version pair. If the @v is omitted, a replacement without module path and version pair. If the @v is omitted, a replacement without
a version on the left side is dropped. a version on the left side is dropped.
The -retract=version and -dropretract=version flags add and drop a
retraction on the given version. The version may be a single version
like "v1.2.3" or a closed interval like "[v1.1.0-v1.1.9]". Note that
-retract=version is a no-op if that retraction already exists.
The -require, -droprequire, -exclude, -dropexclude, -replace, The -require, -droprequire, -exclude, -dropexclude, -replace,
and -dropreplace editing flags may be repeated, and the changes -dropreplace, -retract, and -dropretract editing flags may be repeated,
are applied in the order given. and the changes are applied in the order given.
The -go=version flag sets the expected Go language version. The -go=version flag sets the expected Go language version.
@ -104,6 +108,15 @@ writing it back to go.mod. The JSON output corresponds to these Go types:
New Module New Module
} }
type Retract struct {
Low string
High string
Rationale string
}
Retract entries representing a single version (not an interval) will have
the "Low" and "High" fields set to the same value.
Note that this only describes the go.mod file itself, not other modules Note that this only describes the go.mod file itself, not other modules
referred to indirectly. For the full set of modules available to a build, referred to indirectly. For the full set of modules available to a build,
use 'go list -m -json all'. use 'go list -m -json all'.
@ -137,8 +150,10 @@ func init() {
cmdEdit.Flag.Var(flagFunc(flagDropReplace), "dropreplace", "") cmdEdit.Flag.Var(flagFunc(flagDropReplace), "dropreplace", "")
cmdEdit.Flag.Var(flagFunc(flagReplace), "replace", "") cmdEdit.Flag.Var(flagFunc(flagReplace), "replace", "")
cmdEdit.Flag.Var(flagFunc(flagDropExclude), "dropexclude", "") cmdEdit.Flag.Var(flagFunc(flagDropExclude), "dropexclude", "")
cmdEdit.Flag.Var(flagFunc(flagRetract), "retract", "")
cmdEdit.Flag.Var(flagFunc(flagDropRetract), "dropretract", "")
work.AddModCommonFlags(cmdEdit) base.AddModCommonFlags(&cmdEdit.Flag)
base.AddBuildFlagsNX(&cmdEdit.Flag) base.AddBuildFlagsNX(&cmdEdit.Flag)
} }
@ -252,12 +267,7 @@ func parsePathVersion(flag, arg string) (path, version string) {
base.Fatalf("go mod: -%s=%s: invalid path: %v", flag, arg, err) base.Fatalf("go mod: -%s=%s: invalid path: %v", flag, arg, err)
} }
// We don't call modfile.CheckPathVersion, because that insists if !allowedVersionArg(version) {
// on versions being in semver form, but here we want to allow
// versions like "master" or "1234abcdef", which the go command will resolve
// the next time it runs (or during -fix).
// Even so, we need to make sure the version is a valid token.
if modfile.MustQuote(version) {
base.Fatalf("go mod: -%s=%s: invalid version %q", flag, arg, version) base.Fatalf("go mod: -%s=%s: invalid version %q", flag, arg, version)
} }
@ -289,12 +299,48 @@ func parsePathVersionOptional(adj, arg string, allowDirPath bool) (path, version
return path, version, fmt.Errorf("invalid %s path: %v", adj, err) return path, version, fmt.Errorf("invalid %s path: %v", adj, err)
} }
} }
if path != arg && modfile.MustQuote(version) { if path != arg && !allowedVersionArg(version) {
return path, version, fmt.Errorf("invalid %s version: %q", adj, version) return path, version, fmt.Errorf("invalid %s version: %q", adj, version)
} }
return path, version, nil return path, version, nil
} }
// parseVersionInterval parses a single version like "v1.2.3" or a closed
// interval like "[v1.2.3,v1.4.5]". Note that a single version has the same
// representation as an interval with equal upper and lower bounds: both
// Low and High are set.
func parseVersionInterval(arg string) (modfile.VersionInterval, error) {
if !strings.HasPrefix(arg, "[") {
if !allowedVersionArg(arg) {
return modfile.VersionInterval{}, fmt.Errorf("invalid version: %q", arg)
}
return modfile.VersionInterval{Low: arg, High: arg}, nil
}
if !strings.HasSuffix(arg, "]") {
return modfile.VersionInterval{}, fmt.Errorf("invalid version interval: %q", arg)
}
s := arg[1 : len(arg)-1]
i := strings.Index(s, ",")
if i < 0 {
return modfile.VersionInterval{}, fmt.Errorf("invalid version interval: %q", arg)
}
low := strings.TrimSpace(s[:i])
high := strings.TrimSpace(s[i+1:])
if !allowedVersionArg(low) || !allowedVersionArg(high) {
return modfile.VersionInterval{}, fmt.Errorf("invalid version interval: %q", arg)
}
return modfile.VersionInterval{Low: low, High: high}, nil
}
// allowedVersionArg returns whether a token may be used as a version in go.mod.
// We don't call modfile.CheckPathVersion, because that insists on versions
// being in semver form, but here we want to allow versions like "master" or
// "1234abcdef", which the go command will resolve the next time it runs (or
// during -fix). Even so, we need to make sure the version is a valid token.
func allowedVersionArg(arg string) bool {
return !modfile.MustQuote(arg)
}
// flagRequire implements the -require flag. // flagRequire implements the -require flag.
func flagRequire(arg string) { func flagRequire(arg string) {
path, version := parsePathVersion("require", arg) path, version := parsePathVersion("require", arg)
@ -377,6 +423,32 @@ func flagDropReplace(arg string) {
}) })
} }
// flagRetract implements the -retract flag.
func flagRetract(arg string) {
vi, err := parseVersionInterval(arg)
if err != nil {
base.Fatalf("go mod: -retract=%s: %v", arg, err)
}
edits = append(edits, func(f *modfile.File) {
if err := f.AddRetract(vi, ""); err != nil {
base.Fatalf("go mod: -retract=%s: %v", arg, err)
}
})
}
// flagDropRetract implements the -dropretract flag.
func flagDropRetract(arg string) {
vi, err := parseVersionInterval(arg)
if err != nil {
base.Fatalf("go mod: -dropretract=%s: %v", arg, err)
}
edits = append(edits, func(f *modfile.File) {
if err := f.DropRetract(vi); err != nil {
base.Fatalf("go mod: -dropretract=%s: %v", arg, err)
}
})
}
// fileJSON is the -json output data structure. // fileJSON is the -json output data structure.
type fileJSON struct { type fileJSON struct {
Module module.Version Module module.Version
@ -384,6 +456,7 @@ type fileJSON struct {
Require []requireJSON Require []requireJSON
Exclude []module.Version Exclude []module.Version
Replace []replaceJSON Replace []replaceJSON
Retract []retractJSON
} }
type requireJSON struct { type requireJSON struct {
@ -397,6 +470,12 @@ type replaceJSON struct {
New module.Version New module.Version
} }
type retractJSON struct {
Low string `json:",omitempty"`
High string `json:",omitempty"`
Rationale string `json:",omitempty"`
}
// editPrintJSON prints the -json output. // editPrintJSON prints the -json output.
func editPrintJSON(modFile *modfile.File) { func editPrintJSON(modFile *modfile.File) {
var f fileJSON var f fileJSON
@ -415,6 +494,9 @@ func editPrintJSON(modFile *modfile.File) {
for _, r := range modFile.Replace { for _, r := range modFile.Replace {
f.Replace = append(f.Replace, replaceJSON{r.Old, r.New}) f.Replace = append(f.Replace, replaceJSON{r.Old, r.New})
} }
for _, r := range modFile.Retract {
f.Retract = append(f.Retract, retractJSON{r.Low, r.High, r.Rationale})
}
data, err := json.MarshalIndent(&f, "", "\t") data, err := json.MarshalIndent(&f, "", "\t")
if err != nil { if err != nil {
base.Fatalf("go: internal error: %v", err) base.Fatalf("go: internal error: %v", err)

View file

@ -15,7 +15,6 @@ import (
"cmd/go/internal/base" "cmd/go/internal/base"
"cmd/go/internal/cfg" "cmd/go/internal/cfg"
"cmd/go/internal/modload" "cmd/go/internal/modload"
"cmd/go/internal/work"
"golang.org/x/mod/module" "golang.org/x/mod/module"
) )
@ -33,7 +32,7 @@ path@version, except for the main module, which has no @version suffix.
} }
func init() { func init() {
work.AddModCommonFlags(cmdGraph) base.AddModCommonFlags(&cmdGraph.Flag)
} }
func runGraph(ctx context.Context, cmd *base.Command, args []string) { func runGraph(ctx context.Context, cmd *base.Command, args []string) {
@ -48,7 +47,7 @@ func runGraph(ctx context.Context, cmd *base.Command, args []string) {
base.Fatalf("go: cannot find main module; see 'go help modules'") base.Fatalf("go: cannot find main module; see 'go help modules'")
} }
} }
modload.LoadBuildList(ctx) modload.LoadAllModules(ctx)
reqs := modload.MinReqs() reqs := modload.MinReqs()
format := func(m module.Version) string { format := func(m module.Version) string {

View file

@ -9,7 +9,6 @@ package modcmd
import ( import (
"cmd/go/internal/base" "cmd/go/internal/base"
"cmd/go/internal/modload" "cmd/go/internal/modload"
"cmd/go/internal/work"
"context" "context"
"os" "os"
"strings" "strings"
@ -30,7 +29,7 @@ To override this guess, supply the module path as an argument.
} }
func init() { func init() {
work.AddModCommonFlags(cmdInit) base.AddModCommonFlags(&cmdInit.Flag)
} }
func runInit(ctx context.Context, cmd *base.Command, args []string) { func runInit(ctx context.Context, cmd *base.Command, args []string) {

View file

@ -10,7 +10,6 @@ import (
"cmd/go/internal/base" "cmd/go/internal/base"
"cmd/go/internal/cfg" "cmd/go/internal/cfg"
"cmd/go/internal/modload" "cmd/go/internal/modload"
"cmd/go/internal/work"
"context" "context"
) )
@ -32,7 +31,7 @@ to standard error.
func init() { func init() {
cmdTidy.Run = runTidy // break init cycle cmdTidy.Run = runTidy // break init cycle
cmdTidy.Flag.BoolVar(&cfg.BuildV, "v", false, "") cmdTidy.Flag.BoolVar(&cfg.BuildV, "v", false, "")
work.AddModCommonFlags(cmdTidy) base.AddModCommonFlags(&cmdTidy.Flag)
} }
func runTidy(ctx context.Context, cmd *base.Command, args []string) { func runTidy(ctx context.Context, cmd *base.Command, args []string) {
@ -40,6 +39,18 @@ func runTidy(ctx context.Context, cmd *base.Command, args []string) {
base.Fatalf("go mod tidy: no arguments allowed") base.Fatalf("go mod tidy: no arguments allowed")
} }
// Tidy aims to make 'go test' reproducible for any package in 'all', so we
// need to include test dependencies. For modules that specify go 1.15 or
// earlier this is a no-op (because 'all' saturates transitive test
// dependencies).
//
// However, with lazy loading (go 1.16+) 'all' includes only the packages that
// are transitively imported by the main module, not the test dependencies of
// those packages. In order to make 'go test' reproducible for the packages
// that are in 'all' but outside of the main module, we must explicitly
// request that their test dependencies be included.
modload.LoadTests = true
modload.LoadALL(ctx) modload.LoadALL(ctx)
modload.TidyBuildList() modload.TidyBuildList()
modload.TrimGoSum() modload.TrimGoSum()

View file

@ -19,7 +19,6 @@ import (
"cmd/go/internal/cfg" "cmd/go/internal/cfg"
"cmd/go/internal/imports" "cmd/go/internal/imports"
"cmd/go/internal/modload" "cmd/go/internal/modload"
"cmd/go/internal/work"
"golang.org/x/mod/module" "golang.org/x/mod/module"
"golang.org/x/mod/semver" "golang.org/x/mod/semver"
@ -41,7 +40,7 @@ modules and packages to standard error.
func init() { func init() {
cmdVendor.Flag.BoolVar(&cfg.BuildV, "v", false, "") cmdVendor.Flag.BoolVar(&cfg.BuildV, "v", false, "")
work.AddModCommonFlags(cmdVendor) base.AddModCommonFlags(&cmdVendor.Flag)
} }
func runVendor(ctx context.Context, cmd *base.Command, args []string) { func runVendor(ctx context.Context, cmd *base.Command, args []string) {
@ -77,7 +76,7 @@ func runVendor(ctx context.Context, cmd *base.Command, args []string) {
} }
var buf bytes.Buffer var buf bytes.Buffer
for _, m := range modload.BuildList()[1:] { for _, m := range modload.LoadedModules()[1:] {
if pkgs := modpkgs[m]; len(pkgs) > 0 || isExplicit[m] { if pkgs := modpkgs[m]; len(pkgs) > 0 || isExplicit[m] {
line := moduleLine(m, modload.Replacement(m)) line := moduleLine(m, modload.Replacement(m))
buf.WriteString(line) buf.WriteString(line)

View file

@ -17,7 +17,6 @@ import (
"cmd/go/internal/cfg" "cmd/go/internal/cfg"
"cmd/go/internal/modfetch" "cmd/go/internal/modfetch"
"cmd/go/internal/modload" "cmd/go/internal/modload"
"cmd/go/internal/work"
"golang.org/x/mod/module" "golang.org/x/mod/module"
"golang.org/x/mod/sumdb/dirhash" "golang.org/x/mod/sumdb/dirhash"
@ -38,7 +37,7 @@ non-zero status.
} }
func init() { func init() {
work.AddModCommonFlags(cmdVerify) base.AddModCommonFlags(&cmdVerify.Flag)
} }
func runVerify(ctx context.Context, cmd *base.Command, args []string) { func runVerify(ctx context.Context, cmd *base.Command, args []string) {
@ -60,7 +59,7 @@ func runVerify(ctx context.Context, cmd *base.Command, args []string) {
sem := make(chan token, runtime.GOMAXPROCS(0)) sem := make(chan token, runtime.GOMAXPROCS(0))
// Use a slice of result channels, so that the output is deterministic. // Use a slice of result channels, so that the output is deterministic.
mods := modload.LoadBuildList(ctx)[1:] mods := modload.LoadAllModules(ctx)[1:]
errsChans := make([]<-chan []error, len(mods)) errsChans := make([]<-chan []error, len(mods))
for i, mod := range mods { for i, mod := range mods {

View file

@ -11,7 +11,6 @@ import (
"cmd/go/internal/base" "cmd/go/internal/base"
"cmd/go/internal/modload" "cmd/go/internal/modload"
"cmd/go/internal/work"
"golang.org/x/mod/module" "golang.org/x/mod/module"
) )
@ -58,23 +57,26 @@ var (
func init() { func init() {
cmdWhy.Run = runWhy // break init cycle cmdWhy.Run = runWhy // break init cycle
work.AddModCommonFlags(cmdWhy) base.AddModCommonFlags(&cmdWhy.Flag)
} }
func runWhy(ctx context.Context, cmd *base.Command, args []string) { func runWhy(ctx context.Context, cmd *base.Command, args []string) {
loadALL := modload.LoadALL loadALL := modload.LoadALL
if *whyVendor { if *whyVendor {
loadALL = modload.LoadVendor loadALL = modload.LoadVendor
} else {
modload.LoadTests = true
} }
if *whyM { if *whyM {
listU := false listU := false
listVersions := false listVersions := false
listRetractions := false
for _, arg := range args { for _, arg := range args {
if strings.Contains(arg, "@") { if strings.Contains(arg, "@") {
base.Fatalf("go mod why: module query not allowed") base.Fatalf("go mod why: module query not allowed")
} }
} }
mods := modload.ListModules(ctx, args, listU, listVersions) mods := modload.ListModules(ctx, args, listU, listVersions, listRetractions)
byModule := make(map[module.Version][]string) byModule := make(map[module.Version][]string)
for _, path := range loadALL(ctx) { for _, path := range loadALL(ctx) {
m := modload.PackageModule(path) m := modload.PackageModule(path)

View file

@ -503,6 +503,9 @@ func checkGoMod(path, version string, data []byte) error {
} }
// checkModSum checks that the recorded checksum for mod is h. // checkModSum checks that the recorded checksum for mod is h.
//
// mod.Version may have the additional suffix "/go.mod" to request the checksum
// for the module's go.mod file only.
func checkModSum(mod module.Version, h string) error { func checkModSum(mod module.Version, h string) error {
// We lock goSum when manipulating it, // We lock goSum when manipulating it,
// but we arrange to release the lock when calling checkSumDB, // but we arrange to release the lock when calling checkSumDB,
@ -579,9 +582,16 @@ func addModSumLocked(mod module.Version, h string) {
// checkSumDB checks the mod, h pair against the Go checksum database. // checkSumDB checks the mod, h pair against the Go checksum database.
// It calls base.Fatalf if the hash is to be rejected. // It calls base.Fatalf if the hash is to be rejected.
func checkSumDB(mod module.Version, h string) error { func checkSumDB(mod module.Version, h string) error {
modWithoutSuffix := mod
noun := "module"
if strings.HasSuffix(mod.Version, "/go.mod") {
noun = "go.mod"
modWithoutSuffix.Version = strings.TrimSuffix(mod.Version, "/go.mod")
}
db, lines, err := lookupSumDB(mod) db, lines, err := lookupSumDB(mod)
if err != nil { if err != nil {
return module.VersionError(mod, fmt.Errorf("verifying module: %v", err)) return module.VersionError(modWithoutSuffix, fmt.Errorf("verifying %s: %v", noun, err))
} }
have := mod.Path + " " + mod.Version + " " + h have := mod.Path + " " + mod.Version + " " + h
@ -591,7 +601,7 @@ func checkSumDB(mod module.Version, h string) error {
return nil return nil
} }
if strings.HasPrefix(line, prefix) { if strings.HasPrefix(line, prefix) {
return module.VersionError(mod, fmt.Errorf("verifying module: checksum mismatch\n\tdownloaded: %v\n\t%s: %v"+sumdbMismatch, h, db, line[len(prefix)-len("h1:"):])) return module.VersionError(modWithoutSuffix, fmt.Errorf("verifying %s: checksum mismatch\n\tdownloaded: %v\n\t%s: %v"+sumdbMismatch, noun, h, db, line[len(prefix)-len("h1:"):]))
} }
} }
return nil return nil

View file

@ -278,7 +278,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) {
} }
modload.LoadTests = *getT modload.LoadTests = *getT
buildList := modload.LoadBuildList(ctx) buildList := modload.LoadAllModules(ctx)
buildList = buildList[:len(buildList):len(buildList)] // copy on append buildList = buildList[:len(buildList):len(buildList)] // copy on append
versionByPath := make(map[string]string) versionByPath := make(map[string]string)
for _, m := range buildList { for _, m := range buildList {
@ -290,7 +290,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) {
// what was requested. // what was requested.
modload.DisallowWriteGoMod() modload.DisallowWriteGoMod()
// Allow looking up modules for import paths outside of a module. // Allow looking up modules for import paths when outside of a module.
// 'go get' is expected to do this, unlike other commands. // 'go get' is expected to do this, unlike other commands.
modload.AllowMissingModuleImports() modload.AllowMissingModuleImports()
@ -599,7 +599,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) {
base.ExitIfErrors() base.ExitIfErrors()
// Stop if no changes have been made to the build list. // Stop if no changes have been made to the build list.
buildList = modload.BuildList() buildList = modload.LoadedModules()
eq := len(buildList) == len(prevBuildList) eq := len(buildList) == len(prevBuildList)
for i := 0; eq && i < len(buildList); i++ { for i := 0; eq && i < len(buildList); i++ {
eq = buildList[i] == prevBuildList[i] eq = buildList[i] == prevBuildList[i]
@ -617,7 +617,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) {
// Handle downgrades. // Handle downgrades.
var down []module.Version var down []module.Version
for _, m := range modload.BuildList() { for _, m := range modload.LoadedModules() {
q := byPath[m.Path] q := byPath[m.Path]
if q != nil && semver.Compare(m.Version, q.m.Version) > 0 { if q != nil && semver.Compare(m.Version, q.m.Version) > 0 {
down = append(down, module.Version{Path: m.Path, Version: q.m.Version}) down = append(down, module.Version{Path: m.Path, Version: q.m.Version})
@ -628,6 +628,10 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) {
if err != nil { if err != nil {
base.Fatalf("go: %v", err) base.Fatalf("go: %v", err)
} }
// TODO(bcmills) What should happen here under lazy loading?
// Downgrading may intentionally violate the lazy-loading invariants.
modload.SetBuildList(buildList) modload.SetBuildList(buildList)
modload.ReloadBuildList() // note: does not update go.mod modload.ReloadBuildList() // note: does not update go.mod
base.ExitIfErrors() base.ExitIfErrors()
@ -637,7 +641,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) {
var lostUpgrades []*query var lostUpgrades []*query
if len(down) > 0 { if len(down) > 0 {
versionByPath = make(map[string]string) versionByPath = make(map[string]string)
for _, m := range modload.BuildList() { for _, m := range modload.LoadedModules() {
versionByPath[m.Path] = m.Version versionByPath[m.Path] = m.Version
} }
for _, q := range byPath { for _, q := range byPath {
@ -702,6 +706,15 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) {
// Everything succeeded. Update go.mod. // Everything succeeded. Update go.mod.
modload.AllowWriteGoMod() modload.AllowWriteGoMod()
modload.WriteGoMod() modload.WriteGoMod()
modload.DisallowWriteGoMod()
// Report warnings if any retracted versions are in the build list.
// This must be done after writing go.mod to avoid spurious '// indirect'
// comments. These functions read and write global state.
// TODO(golang.org/issue/40775): ListModules resets modload.loader, which
// contains information about direct dependencies that WriteGoMod uses.
// Refactor to avoid these kinds of global side effects.
reportRetractions(ctx)
// If -d was specified, we're done after the module work. // If -d was specified, we're done after the module work.
// We've already downloaded modules by loading packages above. // We've already downloaded modules by loading packages above.
@ -804,6 +817,14 @@ func getQuery(ctx context.Context, path, vers string, prevM module.Version, forc
base.Fatalf("go get: internal error: prevM may be set if and only if forceModulePath is set") base.Fatalf("go get: internal error: prevM may be set if and only if forceModulePath is set")
} }
// If vers is a query like "latest", we should ignore retracted and excluded
// versions. If vers refers to a specific version or commit like "v1.0.0"
// or "master", we should only ignore excluded versions.
allowed := modload.CheckAllowed
if modload.IsRevisionQuery(vers) {
allowed = modload.CheckExclusions
}
// If the query must be a module path, try only that module path. // If the query must be a module path, try only that module path.
if forceModulePath { if forceModulePath {
if path == modload.Target.Path { if path == modload.Target.Path {
@ -812,7 +833,7 @@ func getQuery(ctx context.Context, path, vers string, prevM module.Version, forc
} }
} }
info, err := modload.Query(ctx, path, vers, prevM.Version, modload.Allowed) info, err := modload.Query(ctx, path, vers, prevM.Version, allowed)
if err == nil { if err == nil {
if info.Version != vers && info.Version != prevM.Version { if info.Version != vers && info.Version != prevM.Version {
logOncef("go: %s %s => %s", path, vers, info.Version) logOncef("go: %s %s => %s", path, vers, info.Version)
@ -838,7 +859,7 @@ func getQuery(ctx context.Context, path, vers string, prevM module.Version, forc
// If it turns out to only exist as a module, we can detect the resulting // If it turns out to only exist as a module, we can detect the resulting
// PackageNotInModuleError and avoid a second round-trip through (potentially) // PackageNotInModuleError and avoid a second round-trip through (potentially)
// all of the configured proxies. // all of the configured proxies.
results, err := modload.QueryPattern(ctx, path, vers, modload.Allowed) results, err := modload.QueryPattern(ctx, path, vers, allowed)
if err != nil { if err != nil {
// If the path doesn't contain a wildcard, check whether it was actually a // If the path doesn't contain a wildcard, check whether it was actually a
// module path instead. If so, return that. // module path instead. If so, return that.
@ -864,190 +885,41 @@ func getQuery(ctx context.Context, path, vers string, prevM module.Version, forc
return m, nil return m, nil
} }
// An upgrader adapts an underlying mvs.Reqs to apply an // reportRetractions prints warnings if any modules in the build list are
// upgrade policy to a list of targets and their dependencies. // retracted.
type upgrader struct { func reportRetractions(ctx context.Context) {
mvs.Reqs // Query for retractions of modules in the build list.
// Use modload.ListModules, since that provides information in the same format
// cmdline maps a module path to a query made for that module at a // as 'go list -m'. Don't query for "all", since that's not allowed outside a
// specific target version. Each query corresponds to a module // module.
// matched by a command line argument. buildList := modload.LoadedModules()
cmdline map[string]*query args := make([]string, 0, len(buildList))
for _, m := range buildList {
// upgrade is a set of modules providing dependencies of packages if m.Version == "" {
// matched by command line arguments. If -u or -u=patch is set, // main module or dummy target module
// these modules are upgraded accordingly. continue
upgrade map[string]bool }
} args = append(args, m.Path+"@"+m.Version)
// newUpgrader creates an upgrader. cmdline contains queries made at
// specific versions for modules matched by command line arguments. pkgs
// is the set of packages matched by command line arguments. If -u or -u=patch
// is set, modules providing dependencies of pkgs are upgraded accordingly.
func newUpgrader(cmdline map[string]*query, pkgs map[string]bool) *upgrader {
u := &upgrader{
Reqs: modload.Reqs(),
cmdline: cmdline,
} }
if getU != "" { listU := false
u.upgrade = make(map[string]bool) listVersions := false
listRetractions := true
// Traverse package import graph. mods := modload.ListModules(ctx, args, listU, listVersions, listRetractions)
// Initialize work queue with root packages. retractPath := ""
seen := make(map[string]bool) for _, mod := range mods {
var work []string if len(mod.Retracted) > 0 {
add := func(path string) { if retractPath == "" {
if !seen[path] { retractPath = mod.Path
seen[path] = true } else {
work = append(work, path) retractPath = "<module>"
}
}
for pkg := range pkgs {
add(pkg)
}
for len(work) > 0 {
pkg := work[0]
work = work[1:]
m := modload.PackageModule(pkg)
u.upgrade[m.Path] = true
// testImports is empty unless test imports were actually loaded,
// i.e., -t was set or "all" was one of the arguments.
imports, testImports := modload.PackageImports(pkg)
for _, imp := range imports {
add(imp)
}
for _, imp := range testImports {
add(imp)
} }
rationale := modload.ShortRetractionRationale(mod.Retracted[0])
logOncef("go: warning: %s@%s is retracted: %s", mod.Path, mod.Version, rationale)
} }
} }
return u if modload.HasModRoot() && retractPath != "" {
} logOncef("go: run 'go get %s@latest' to switch to the latest unretracted version", retractPath)
// Required returns the requirement list for m.
// For the main module, we override requirements with the modules named
// one the command line, and we include new requirements. Otherwise,
// we defer to u.Reqs.
func (u *upgrader) Required(m module.Version) ([]module.Version, error) {
rs, err := u.Reqs.Required(m)
if err != nil {
return nil, err
} }
if m != modload.Target {
return rs, nil
}
overridden := make(map[string]bool)
for i, m := range rs {
if q := u.cmdline[m.Path]; q != nil && q.m.Version != "none" {
rs[i] = q.m
overridden[q.m.Path] = true
}
}
for _, q := range u.cmdline {
if !overridden[q.m.Path] && q.m.Path != modload.Target.Path && q.m.Version != "none" {
rs = append(rs, q.m)
}
}
return rs, nil
}
// Upgrade returns the desired upgrade for m.
//
// If m was requested at a specific version on the command line, then
// Upgrade returns that version.
//
// If -u is set and m provides a dependency of a package matched by
// command line arguments, then Upgrade may provider a newer tagged version.
// If m is a tagged version, then Upgrade will return the latest tagged
// version (with the same minor version number if -u=patch).
// If m is a pseudo-version, then Upgrade returns the latest tagged version
// only if that version has a time-stamp newer than m. This special case
// prevents accidental downgrades when already using a pseudo-version
// newer than the latest tagged version.
//
// If none of the above cases apply, then Upgrade returns m.
func (u *upgrader) Upgrade(m module.Version) (module.Version, error) {
// Allow pkg@vers on the command line to override the upgrade choice v.
// If q's version is < m.Version, then we're going to downgrade anyway,
// and it's cleaner to avoid moving back and forth and picking up
// extraneous other newer dependencies.
// If q's version is > m.Version, then we're going to upgrade past
// m.Version anyway, and again it's cleaner to avoid moving back and forth
// picking up extraneous other newer dependencies.
if q := u.cmdline[m.Path]; q != nil {
return q.m, nil
}
if !u.upgrade[m.Path] {
// Not involved in upgrade. Leave alone.
return m, nil
}
// Run query required by upgrade semantics.
// Note that Query "latest" is not the same as using repo.Latest,
// which may return a pseudoversion for the latest commit.
// Query "latest" returns the newest tagged version or the newest
// prerelease version if there are no non-prereleases, or repo.Latest
// if there aren't any tagged versions.
// If we're querying "upgrade" or "patch", Query will compare the current
// version against the chosen version and will return the current version
// if it is newer.
info, err := modload.Query(context.TODO(), m.Path, string(getU), m.Version, modload.Allowed)
if err != nil {
// Report error but return m, to let version selection continue.
// (Reporting the error will fail the command at the next base.ExitIfErrors.)
// Special case: if the error is for m.Version itself and m.Version has a
// replacement, then keep it and don't report the error: the fact that the
// version is invalid is likely the reason it was replaced to begin with.
var vErr *module.InvalidVersionError
if errors.As(err, &vErr) && vErr.Version == m.Version && modload.Replacement(m).Path != "" {
return m, nil
}
// Special case: if the error is "no matching versions" then don't
// even report the error. Because Query does not consider pseudo-versions,
// it may happen that we have a pseudo-version but during -u=patch
// the query v0.0 matches no versions (not even the one we're using).
var noMatch *modload.NoMatchingVersionError
if !errors.As(err, &noMatch) {
base.Errorf("go get: upgrading %s@%s: %v", m.Path, m.Version, err)
}
return m, nil
}
if info.Version != m.Version {
logOncef("go: %s %s => %s", m.Path, getU, info.Version)
}
return module.Version{Path: m.Path, Version: info.Version}, nil
}
// buildListForLostUpgrade returns the build list for the module graph
// rooted at lost. Unlike mvs.BuildList, the target module (lost) is not
// treated specially. The returned build list may contain a newer version
// of lost.
//
// buildListForLostUpgrade is used after a downgrade has removed a module
// requested at a specific version. This helps us understand the requirements
// implied by each downgrade.
func buildListForLostUpgrade(lost module.Version, reqs mvs.Reqs) ([]module.Version, error) {
return mvs.BuildList(lostUpgradeRoot, &lostUpgradeReqs{Reqs: reqs, lost: lost})
}
var lostUpgradeRoot = module.Version{Path: "lost-upgrade-root", Version: ""}
type lostUpgradeReqs struct {
mvs.Reqs
lost module.Version
}
func (r *lostUpgradeReqs) Required(mod module.Version) ([]module.Version, error) {
if mod == lostUpgradeRoot {
return []module.Version{r.lost}, nil
}
return r.Reqs.Required(mod)
} }
var loggedLines sync.Map var loggedLines sync.Map

View file

@ -0,0 +1,202 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modget
import (
"context"
"errors"
"cmd/go/internal/base"
"cmd/go/internal/modload"
"cmd/go/internal/mvs"
"golang.org/x/mod/module"
)
// An upgrader adapts an underlying mvs.Reqs to apply an
// upgrade policy to a list of targets and their dependencies.
type upgrader struct {
mvs.Reqs
// cmdline maps a module path to a query made for that module at a
// specific target version. Each query corresponds to a module
// matched by a command line argument.
cmdline map[string]*query
// upgrade is a set of modules providing dependencies of packages
// matched by command line arguments. If -u or -u=patch is set,
// these modules are upgraded accordingly.
upgrade map[string]bool
}
// newUpgrader creates an upgrader. cmdline contains queries made at
// specific versions for modules matched by command line arguments. pkgs
// is the set of packages matched by command line arguments. If -u or -u=patch
// is set, modules providing dependencies of pkgs are upgraded accordingly.
func newUpgrader(cmdline map[string]*query, pkgs map[string]bool) *upgrader {
u := &upgrader{
Reqs: modload.Reqs(),
cmdline: cmdline,
}
if getU != "" {
u.upgrade = make(map[string]bool)
// Traverse package import graph.
// Initialize work queue with root packages.
seen := make(map[string]bool)
var work []string
add := func(path string) {
if !seen[path] {
seen[path] = true
work = append(work, path)
}
}
for pkg := range pkgs {
add(pkg)
}
for len(work) > 0 {
pkg := work[0]
work = work[1:]
m := modload.PackageModule(pkg)
u.upgrade[m.Path] = true
// testImports is empty unless test imports were actually loaded,
// i.e., -t was set or "all" was one of the arguments.
imports, testImports := modload.PackageImports(pkg)
for _, imp := range imports {
add(imp)
}
for _, imp := range testImports {
add(imp)
}
}
}
return u
}
// Required returns the requirement list for m.
// For the main module, we override requirements with the modules named
// one the command line, and we include new requirements. Otherwise,
// we defer to u.Reqs.
func (u *upgrader) Required(m module.Version) ([]module.Version, error) {
rs, err := u.Reqs.Required(m)
if err != nil {
return nil, err
}
if m != modload.Target {
return rs, nil
}
overridden := make(map[string]bool)
for i, m := range rs {
if q := u.cmdline[m.Path]; q != nil && q.m.Version != "none" {
rs[i] = q.m
overridden[q.m.Path] = true
}
}
for _, q := range u.cmdline {
if !overridden[q.m.Path] && q.m.Path != modload.Target.Path && q.m.Version != "none" {
rs = append(rs, q.m)
}
}
return rs, nil
}
// Upgrade returns the desired upgrade for m.
//
// If m was requested at a specific version on the command line, then
// Upgrade returns that version.
//
// If -u is set and m provides a dependency of a package matched by
// command line arguments, then Upgrade may provider a newer tagged version.
// If m is a tagged version, then Upgrade will return the latest tagged
// version (with the same minor version number if -u=patch).
// If m is a pseudo-version, then Upgrade returns the latest tagged version
// only if that version has a time-stamp newer than m. This special case
// prevents accidental downgrades when already using a pseudo-version
// newer than the latest tagged version.
//
// If none of the above cases apply, then Upgrade returns m.
func (u *upgrader) Upgrade(m module.Version) (module.Version, error) {
// Allow pkg@vers on the command line to override the upgrade choice v.
// If q's version is < m.Version, then we're going to downgrade anyway,
// and it's cleaner to avoid moving back and forth and picking up
// extraneous other newer dependencies.
// If q's version is > m.Version, then we're going to upgrade past
// m.Version anyway, and again it's cleaner to avoid moving back and forth
// picking up extraneous other newer dependencies.
if q := u.cmdline[m.Path]; q != nil {
return q.m, nil
}
if !u.upgrade[m.Path] {
// Not involved in upgrade. Leave alone.
return m, nil
}
// Run query required by upgrade semantics.
// Note that Query "latest" is not the same as using repo.Latest,
// which may return a pseudoversion for the latest commit.
// Query "latest" returns the newest tagged version or the newest
// prerelease version if there are no non-prereleases, or repo.Latest
// if there aren't any tagged versions.
// If we're querying "upgrade" or "patch", Query will compare the current
// version against the chosen version and will return the current version
// if it is newer.
info, err := modload.Query(context.TODO(), m.Path, string(getU), m.Version, modload.CheckAllowed)
if err != nil {
// Report error but return m, to let version selection continue.
// (Reporting the error will fail the command at the next base.ExitIfErrors.)
// Special case: if the error is for m.Version itself and m.Version has a
// replacement, then keep it and don't report the error: the fact that the
// version is invalid is likely the reason it was replaced to begin with.
var vErr *module.InvalidVersionError
if errors.As(err, &vErr) && vErr.Version == m.Version && modload.Replacement(m).Path != "" {
return m, nil
}
// Special case: if the error is "no matching versions" then don't
// even report the error. Because Query does not consider pseudo-versions,
// it may happen that we have a pseudo-version but during -u=patch
// the query v0.0 matches no versions (not even the one we're using).
var noMatch *modload.NoMatchingVersionError
if !errors.As(err, &noMatch) {
base.Errorf("go get: upgrading %s@%s: %v", m.Path, m.Version, err)
}
return m, nil
}
if info.Version != m.Version {
logOncef("go: %s %s => %s", m.Path, getU, info.Version)
}
return module.Version{Path: m.Path, Version: info.Version}, nil
}
// buildListForLostUpgrade returns the build list for the module graph
// rooted at lost. Unlike mvs.BuildList, the target module (lost) is not
// treated specially. The returned build list may contain a newer version
// of lost.
//
// buildListForLostUpgrade is used after a downgrade has removed a module
// requested at a specific version. This helps us understand the requirements
// implied by each downgrade.
func buildListForLostUpgrade(lost module.Version, reqs mvs.Reqs) ([]module.Version, error) {
return mvs.BuildList(lostUpgradeRoot, &lostUpgradeReqs{Reqs: reqs, lost: lost})
}
var lostUpgradeRoot = module.Version{Path: "lost-upgrade-root", Version: ""}
type lostUpgradeReqs struct {
mvs.Reqs
lost module.Version
}
func (r *lostUpgradeReqs) Required(mod module.Version) ([]module.Version, error) {
if mod == lostUpgradeRoot {
return []module.Version{r.lost}, nil
}
return r.Reqs.Required(mod)
}

Some files were not shown because too many files have changed in this diff Show more