[dev.boringcrypto] all: merge master into dev.boringcrypto

Change-Id: Ia661c871e14445672b7d36a443455302e47cc2a1
This commit is contained in:
Filippo Valsorda 2018-10-25 19:30:18 -04:00
commit 13bf5b80e8
534 changed files with 23488 additions and 5204 deletions

View file

@ -6,6 +6,8 @@ pkg os, const ModeType = 2399141888
pkg os, const ModeType = 2399666176 pkg os, const ModeType = 2399666176
pkg os (linux-arm), const O_SYNC = 4096 pkg os (linux-arm), const O_SYNC = 4096
pkg os (linux-arm-cgo), const O_SYNC = 4096 pkg os (linux-arm-cgo), const O_SYNC = 4096
pkg os (linux-arm), const O_SYNC = 1052672
pkg os (linux-arm-cgo), const O_SYNC = 1052672
pkg syscall (darwin-386), const ImplementsGetwd = false pkg syscall (darwin-386), const ImplementsGetwd = false
pkg syscall (darwin-386), func Fchflags(string, int) error pkg syscall (darwin-386), func Fchflags(string, int) error
pkg syscall (darwin-386-cgo), const ImplementsGetwd = false pkg syscall (darwin-386-cgo), const ImplementsGetwd = false
@ -381,3 +383,101 @@ pkg syscall (windows-amd64), type CertRevocationInfo struct, CrlInfo uintptr
pkg syscall (windows-amd64), type CertRevocationInfo struct, OidSpecificInfo uintptr pkg syscall (windows-amd64), type CertRevocationInfo struct, OidSpecificInfo uintptr
pkg syscall (windows-amd64), type CertSimpleChain struct, TrustListInfo uintptr pkg syscall (windows-amd64), type CertSimpleChain struct, TrustListInfo uintptr
pkg syscall (windows-amd64), type RawSockaddrAny struct, Pad [96]int8 pkg syscall (windows-amd64), type RawSockaddrAny struct, Pad [96]int8
pkg syscall (freebsd-386), func Mknod(string, uint32, int) error
pkg syscall (freebsd-386), type Dirent struct, Fileno uint32
pkg syscall (freebsd-386), type Dirent struct, Namlen uint8
pkg syscall (freebsd-386), type Stat_t struct, Atimespec Timespec
pkg syscall (freebsd-386), type Stat_t struct, Birthtimespec Timespec
pkg syscall (freebsd-386), type Stat_t struct, Blksize uint32
pkg syscall (freebsd-386), type Stat_t struct, Ctimespec Timespec
pkg syscall (freebsd-386), type Stat_t struct, Dev uint32
pkg syscall (freebsd-386), type Stat_t struct, Gen uint32
pkg syscall (freebsd-386), type Stat_t struct, Ino uint32
pkg syscall (freebsd-386), type Stat_t struct, Lspare int32
pkg syscall (freebsd-386), type Stat_t struct, Mtimespec Timespec
pkg syscall (freebsd-386), type Stat_t struct, Nlink uint16
pkg syscall (freebsd-386), type Stat_t struct, Pad_cgo_0 [8]uint8
pkg syscall (freebsd-386), type Stat_t struct, Rdev uint32
pkg syscall (freebsd-386), type Statfs_t struct, Mntfromname [88]int8
pkg syscall (freebsd-386), type Statfs_t struct, Mntonname [88]int8
pkg syscall (freebsd-386-cgo), func Mknod(string, uint32, int) error
pkg syscall (freebsd-386-cgo), type Dirent struct, Fileno uint32
pkg syscall (freebsd-386-cgo), type Dirent struct, Namlen uint8
pkg syscall (freebsd-386-cgo), type Stat_t struct, Atimespec Timespec
pkg syscall (freebsd-386-cgo), type Stat_t struct, Birthtimespec Timespec
pkg syscall (freebsd-386-cgo), type Stat_t struct, Blksize uint32
pkg syscall (freebsd-386-cgo), type Stat_t struct, Ctimespec Timespec
pkg syscall (freebsd-386-cgo), type Stat_t struct, Dev uint32
pkg syscall (freebsd-386-cgo), type Stat_t struct, Gen uint32
pkg syscall (freebsd-386-cgo), type Stat_t struct, Ino uint32
pkg syscall (freebsd-386-cgo), type Stat_t struct, Lspare int32
pkg syscall (freebsd-386-cgo), type Stat_t struct, Mtimespec Timespec
pkg syscall (freebsd-386-cgo), type Stat_t struct, Nlink uint16
pkg syscall (freebsd-386-cgo), type Stat_t struct, Pad_cgo_0 [8]uint8
pkg syscall (freebsd-386-cgo), type Stat_t struct, Rdev uint32
pkg syscall (freebsd-386-cgo), type Statfs_t struct, Mntfromname [88]int8
pkg syscall (freebsd-386-cgo), type Statfs_t struct, Mntonname [88]int8
pkg syscall (freebsd-amd64), func Mknod(string, uint32, int) error
pkg syscall (freebsd-amd64), type Dirent struct, Fileno uint32
pkg syscall (freebsd-amd64), type Dirent struct, Namlen uint8
pkg syscall (freebsd-amd64), type Stat_t struct, Atimespec Timespec
pkg syscall (freebsd-amd64), type Stat_t struct, Birthtimespec Timespec
pkg syscall (freebsd-amd64), type Stat_t struct, Blksize uint32
pkg syscall (freebsd-amd64), type Stat_t struct, Ctimespec Timespec
pkg syscall (freebsd-amd64), type Stat_t struct, Dev uint32
pkg syscall (freebsd-amd64), type Stat_t struct, Gen uint32
pkg syscall (freebsd-amd64), type Stat_t struct, Ino uint32
pkg syscall (freebsd-amd64), type Stat_t struct, Lspare int32
pkg syscall (freebsd-amd64), type Stat_t struct, Mtimespec Timespec
pkg syscall (freebsd-amd64), type Stat_t struct, Nlink uint16
pkg syscall (freebsd-amd64), type Stat_t struct, Rdev uint32
pkg syscall (freebsd-amd64), type Statfs_t struct, Mntfromname [88]int8
pkg syscall (freebsd-amd64), type Statfs_t struct, Mntonname [88]int8
pkg syscall (freebsd-amd64-cgo), func Mknod(string, uint32, int) error
pkg syscall (freebsd-amd64-cgo), type Dirent struct, Fileno uint32
pkg syscall (freebsd-amd64-cgo), type Dirent struct, Namlen uint8
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Atimespec Timespec
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Birthtimespec Timespec
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Blksize uint32
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Ctimespec Timespec
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Dev uint32
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Gen uint32
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Ino uint32
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Lspare int32
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Mtimespec Timespec
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Nlink uint16
pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Rdev uint32
pkg syscall (freebsd-amd64-cgo), type Statfs_t struct, Mntfromname [88]int8
pkg syscall (freebsd-amd64-cgo), type Statfs_t struct, Mntonname [88]int8
pkg syscall (freebsd-arm), func Mknod(string, uint32, int) error
pkg syscall (freebsd-arm), type Dirent struct, Fileno uint32
pkg syscall (freebsd-arm), type Dirent struct, Namlen uint8
pkg syscall (freebsd-arm), type Stat_t struct, Atimespec Timespec
pkg syscall (freebsd-arm), type Stat_t struct, Birthtimespec Timespec
pkg syscall (freebsd-arm), type Stat_t struct, Blksize uint32
pkg syscall (freebsd-arm), type Stat_t struct, Ctimespec Timespec
pkg syscall (freebsd-arm), type Stat_t struct, Dev uint32
pkg syscall (freebsd-arm), type Stat_t struct, Gen uint32
pkg syscall (freebsd-arm), type Stat_t struct, Ino uint32
pkg syscall (freebsd-arm), type Stat_t struct, Lspare int32
pkg syscall (freebsd-arm), type Stat_t struct, Mtimespec Timespec
pkg syscall (freebsd-arm), type Stat_t struct, Nlink uint16
pkg syscall (freebsd-arm), type Stat_t struct, Rdev uint32
pkg syscall (freebsd-arm), type Statfs_t struct, Mntfromname [88]int8
pkg syscall (freebsd-arm), type Statfs_t struct, Mntonname [88]int8
pkg syscall (freebsd-arm-cgo), func Mknod(string, uint32, int) error
pkg syscall (freebsd-arm-cgo), type Dirent struct, Fileno uint32
pkg syscall (freebsd-arm-cgo), type Dirent struct, Namlen uint8
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Atimespec Timespec
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Birthtimespec Timespec
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Blksize uint32
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Ctimespec Timespec
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Dev uint32
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Gen uint32
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Ino uint32
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Lspare int32
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Mtimespec Timespec
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Nlink uint16
pkg syscall (freebsd-arm-cgo), type Stat_t struct, Rdev uint32
pkg syscall (freebsd-arm-cgo), type Statfs_t struct, Mntfromname [88]int8
pkg syscall (freebsd-arm-cgo), type Statfs_t struct, Mntonname [88]int8

View file

@ -50,10 +50,10 @@ learned. You can {{if not $.GoogleCN}}<a href="//tour.golang.org/">take the tour
online</a> or{{end}} install it locally with: online</a> or{{end}} install it locally with:
</p> </p>
<pre> <pre>
$ go get golang.org/x/tour/gotour $ go get golang.org/x/tour
</pre> </pre>
<p> <p>
This will place the <code>gotour</code> binary in your workspace's <code>bin</code> directory. This will place the <code>tour</code> binary in your workspace's <code>bin</code> directory.
</p> </p>
<h3 id="code"><a href="code.html">How to write Go code</a></h3> <h3 id="code"><a href="code.html">How to write Go code</a></h3>

View file

@ -28,7 +28,7 @@ or as a plugin for IntelliJ IDEA Ultimate</li>
</ul> </ul>
<p> <p>
Note that these are only a few top solutions; a more comphensive Note that these are only a few top solutions; a more comprehensive
community-maintained list of community-maintained list of
<a href="https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins">IDEs and text editor plugins</a> <a href="https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins">IDEs and text editor plugins</a>
is available at the Wiki. is available at the Wiki.

View file

@ -1402,11 +1402,11 @@ the moment, the following snippet would also read the first 32 bytes of the buff
var err error var err error
for i := 0; i &lt; 32; i++ { for i := 0; i &lt; 32; i++ {
nbytes, e := f.Read(buf[i:i+1]) // Read one byte. nbytes, e := f.Read(buf[i:i+1]) // Read one byte.
n += nbytes
if nbytes == 0 || e != nil { if nbytes == 0 || e != nil {
err = e err = e
break break
} }
n += nbytes
} }
</pre> </pre>
<p> <p>
@ -2762,7 +2762,7 @@ type Job struct {
} }
</pre> </pre>
<p> <p>
The <code>Job</code> type now has the <code>Log</code>, <code>Logf</code> The <code>Job</code> type now has the <code>Print</code>, <code>Printf</code>, <code>Println</code>
and other and other
methods of <code>*log.Logger</code>. We could have given the <code>Logger</code> methods of <code>*log.Logger</code>. We could have given the <code>Logger</code>
a field name, of course, but it's not necessary to do so. And now, once a field name, of course, but it's not necessary to do so. And now, once
@ -2770,7 +2770,7 @@ initialized, we can
log to the <code>Job</code>: log to the <code>Job</code>:
</p> </p>
<pre> <pre>
job.Log("starting now...") job.Println("starting now...")
</pre> </pre>
<p> <p>
The <code>Logger</code> is a regular field of the <code>Job</code> struct, The <code>Logger</code> is a regular field of the <code>Job</code> struct,
@ -2797,8 +2797,8 @@ we would write <code>job.Logger</code>,
which would be useful if we wanted to refine the methods of <code>Logger</code>. which would be useful if we wanted to refine the methods of <code>Logger</code>.
</p> </p>
<pre> <pre>
func (job *Job) Logf(format string, args ...interface{}) { func (job *Job) Printf(format string, args ...interface{}) {
job.Logger.Logf("%q: %s", job.Command, fmt.Sprintf(format, args...)) job.Logger.Printf("%q: %s", job.Command, fmt.Sprintf(format, args...))
} }
</pre> </pre>
<p> <p>

View file

@ -1,6 +1,6 @@
<!--{ <!--{
"Title": "The Go Programming Language Specification", "Title": "The Go Programming Language Specification",
"Subtitle": "Version of September 24, 2018", "Subtitle": "Version of October 23, 2018",
"Path": "/ref/spec" "Path": "/ref/spec"
}--> }-->
@ -811,7 +811,7 @@ To avoid portability issues all numeric types are <a href="#Type_definitions">de
types</a> and thus distinct except types</a> and thus distinct except
<code>byte</code>, which is an <a href="#Alias_declarations">alias</a> for <code>uint8</code>, and <code>byte</code>, which is an <a href="#Alias_declarations">alias</a> for <code>uint8</code>, and
<code>rune</code>, which is an alias for <code>int32</code>. <code>rune</code>, which is an alias for <code>int32</code>.
Conversions Explicit conversions
are required when different numeric types are mixed in an expression are required when different numeric types are mixed in an expression
or assignment. For instance, <code>int32</code> and <code>int</code> or assignment. For instance, <code>int32</code> and <code>int</code>
are not the same type even though they may have the same size on a are not the same type even though they may have the same size on a
@ -1348,7 +1348,7 @@ ChannelType = ( "chan" | "chan" "&lt;-" | "&lt;-" "chan" ) ElementType .
The optional <code>&lt;-</code> operator specifies the channel <i>direction</i>, The optional <code>&lt;-</code> operator specifies the channel <i>direction</i>,
<i>send</i> or <i>receive</i>. If no direction is given, the channel is <i>send</i> or <i>receive</i>. If no direction is given, the channel is
<i>bidirectional</i>. <i>bidirectional</i>.
A channel may be constrained only to send or only to receive by A channel may be constrained only to send or only to receive by explicit
<a href="#Conversions">conversion</a> or <a href="#Assignments">assignment</a>. <a href="#Conversions">conversion</a> or <a href="#Assignments">assignment</a>.
</p> </p>
@ -2069,9 +2069,9 @@ Otherwise, each variable is initialized to its <a href="#The_zero_value">zero va
If a type is present, each variable is given that type. If a type is present, each variable is given that type.
Otherwise, each variable is given the type of the corresponding Otherwise, each variable is given the type of the corresponding
initialization value in the assignment. initialization value in the assignment.
If that value is an untyped constant, it is first If that value is an untyped constant, it is first implicitly
<a href="#Conversions">converted</a> to its <a href="#Constants">default type</a>; <a href="#Conversions">converted</a> to its <a href="#Constants">default type</a>;
if it is an untyped boolean value, it is first converted to type <code>bool</code>. if it is an untyped boolean value, it is first implicitly converted to type <code>bool</code>.
The predeclared value <code>nil</code> cannot be used to initialize a variable The predeclared value <code>nil</code> cannot be used to initialize a variable
with no explicit type. with no explicit type.
</p> </p>
@ -2202,11 +2202,11 @@ Receiver = Parameters .
<p> <p>
The receiver is specified via an extra parameter section preceding the method The receiver is specified via an extra parameter section preceding the method
name. That parameter section must declare a single non-variadic parameter, the receiver. name. That parameter section must declare a single non-variadic parameter, the receiver.
Its type must be of the form <code>T</code> or <code>*T</code> (possibly using Its type must be a <a href="#Type_definitions">defined</a> type <code>T</code> or a
parentheses) where <code>T</code> is a type name. The type denoted by <code>T</code> is called pointer to a defined type <code>T</code>. <code>T</code> is called the receiver
the receiver <i>base type</i>; it must not be a pointer or interface type and <i>base type</i>. A receiver base type cannot be a pointer or interface type and
it must be <a href="#Type_definitions">defined</a> in the same package as the method. it must be defined in the same package as the method.
The method is said to be <i>bound</i> to the base type and the method name The method is said to be <i>bound</i> to its receiver base type and the method name
is visible only within <a href="#Selectors">selectors</a> for type <code>T</code> is visible only within <a href="#Selectors">selectors</a> for type <code>T</code>
or <code>*T</code>. or <code>*T</code>.
</p> </p>
@ -2226,7 +2226,7 @@ the non-blank method and field names must be distinct.
</p> </p>
<p> <p>
Given type <code>Point</code>, the declarations Given defined type <code>Point</code>, the declarations
</p> </p>
<pre> <pre>
@ -3260,7 +3260,7 @@ var v, ok T1 = x.(T)
yields an additional untyped boolean value. The value of <code>ok</code> is <code>true</code> yields an additional untyped boolean value. The value of <code>ok</code> is <code>true</code>
if the assertion holds. Otherwise it is <code>false</code> and the value of <code>v</code> is if the assertion holds. Otherwise it is <code>false</code> and the value of <code>v</code> is
the <a href="#The_zero_value">zero value</a> for type <code>T</code>. the <a href="#The_zero_value">zero value</a> for type <code>T</code>.
No run-time panic occurs in this case. No <a href="#Run_time_panics">run-time panic</a> occurs in this case.
</p> </p>
@ -3433,7 +3433,7 @@ For operations involving constants only, see the section on
<p> <p>
Except for shift operations, if one operand is an untyped <a href="#Constants">constant</a> Except for shift operations, if one operand is an untyped <a href="#Constants">constant</a>
and the other operand is not, the constant is <a href="#Conversions">converted</a> and the other operand is not, the constant is implicitly <a href="#Conversions">converted</a>
to the type of the other operand. to the type of the other operand.
</p> </p>
@ -3442,7 +3442,7 @@ The right operand in a shift expression must have unsigned integer type
or be an untyped constant <a href="#Representability">representable</a> by a or be an untyped constant <a href="#Representability">representable</a> by a
value of type <code>uint</code>. value of type <code>uint</code>.
If the left operand of a non-constant shift expression is an untyped constant, If the left operand of a non-constant shift expression is an untyped constant,
it is first converted to the type it would assume if the shift expression were it is first implicitly converted to the type it would assume if the shift expression were
replaced by its left operand alone. replaced by its left operand alone.
</p> </p>
@ -3624,7 +3624,7 @@ For signed integers, the operations <code>+</code>,
<code>-</code>, <code>*</code>, <code>/</code>, and <code>&lt;&lt;</code> may legally <code>-</code>, <code>*</code>, <code>/</code>, and <code>&lt;&lt;</code> may legally
overflow and the resulting value exists and is deterministically defined overflow and the resulting value exists and is deterministically defined
by the signed integer representation, the operation, and its operands. by the signed integer representation, the operation, and its operands.
No exception is raised as a result of overflow. Overflow does not cause a <a href="#Run_time_panics">run-time panic</a>.
A compiler may not optimize code under the assumption that overflow does A compiler may not optimize code under the assumption that overflow does
not occur. For instance, it may not assume that <code>x &lt; x + 1</code> is always true. not occur. For instance, it may not assume that <code>x &lt; x + 1</code> is always true.
</p> </p>
@ -3645,7 +3645,7 @@ occurs is implementation-specific.
An implementation may combine multiple floating-point operations into a single An implementation may combine multiple floating-point operations into a single
fused operation, possibly across statements, and produce a result that differs fused operation, possibly across statements, and produce a result that differs
from the value obtained by executing and rounding the instructions individually. from the value obtained by executing and rounding the instructions individually.
A floating-point type <a href="#Conversions">conversion</a> explicitly rounds to An explicit floating-point type <a href="#Conversions">conversion</a> rounds to
the precision of the target type, preventing fusion that would discard that rounding. the precision of the target type, preventing fusion that would discard that rounding.
</p> </p>
@ -3907,7 +3907,14 @@ channel is closed and empty.
<h3 id="Conversions">Conversions</h3> <h3 id="Conversions">Conversions</h3>
<p> <p>
Conversions are expressions of the form <code>T(x)</code> A conversion changes the <a href="#Types">type</a> of an expression
to the type specified by the conversion.
A conversion may appear literally in the source, or it may be <i>implied</i>
by the context in which an expression appears.
</p>
<p>
An <i>explicit</i> conversion is an expression of the form <code>T(x)</code>
where <code>T</code> is a type and <code>x</code> is an expression where <code>T</code> is a type and <code>x</code> is an expression
that can be converted to type <code>T</code>. that can be converted to type <code>T</code>.
</p> </p>
@ -3938,7 +3945,7 @@ func() int(x) // x is converted to func() int (unambiguous)
A <a href="#Constants">constant</a> value <code>x</code> can be converted to A <a href="#Constants">constant</a> value <code>x</code> can be converted to
type <code>T</code> if <code>x</code> is <a href="#Representability">representable</a> type <code>T</code> if <code>x</code> is <a href="#Representability">representable</a>
by a value of <code>T</code>. by a value of <code>T</code>.
As a special case, an integer constant <code>x</code> can be converted to a As a special case, an integer constant <code>x</code> can be explicitly converted to a
<a href="#String_types">string type</a> using the <a href="#String_types">string type</a> using the
<a href="#Conversions_to_and_from_a_string_type">same rule</a> <a href="#Conversions_to_and_from_a_string_type">same rule</a>
as for non-constant <code>x</code>. as for non-constant <code>x</code>.
@ -4672,13 +4679,13 @@ to the type of the operand to which it is assigned, with the following special c
<li> <li>
If an untyped constant If an untyped constant
is assigned to a variable of interface type or the blank identifier, is assigned to a variable of interface type or the blank identifier,
the constant is first <a href="#Conversions">converted</a> to its the constant is first implicitly <a href="#Conversions">converted</a> to its
<a href="#Constants">default type</a>. <a href="#Constants">default type</a>.
</li> </li>
<li> <li>
If an untyped boolean value is assigned to a variable of interface type or If an untyped boolean value is assigned to a variable of interface type or
the blank identifier, it is first converted to type <code>bool</code>. the blank identifier, it is first implicitly converted to type <code>bool</code>.
</li> </li>
</ol> </ol>
@ -4764,14 +4771,14 @@ ExprSwitchCase = "case" ExpressionList | "default" .
</pre> </pre>
<p> <p>
If the switch expression evaluates to an untyped constant, it is first If the switch expression evaluates to an untyped constant, it is first implicitly
<a href="#Conversions">converted</a> to its <a href="#Constants">default type</a>; <a href="#Conversions">converted</a> to its <a href="#Constants">default type</a>;
if it is an untyped boolean value, it is first converted to type <code>bool</code>. if it is an untyped boolean value, it is first implicitly converted to type <code>bool</code>.
The predeclared untyped value <code>nil</code> cannot be used as a switch expression. The predeclared untyped value <code>nil</code> cannot be used as a switch expression.
</p> </p>
<p> <p>
If a case expression is untyped, it is first <a href="#Conversions">converted</a> If a case expression is untyped, it is first implicitly <a href="#Conversions">converted</a>
to the type of the switch expression. to the type of the switch expression.
For each (possibly converted) case expression <code>x</code> and the value <code>t</code> For each (possibly converted) case expression <code>x</code> and the value <code>t</code>
of the switch expression, <code>x == t</code> must be a valid <a href="#Comparison_operators">comparison</a>. of the switch expression, <code>x == t</code> must be a valid <a href="#Comparison_operators">comparison</a>.
@ -5881,7 +5888,7 @@ floating-point type and the return type is the complex type
with the corresponding floating-point constituents: with the corresponding floating-point constituents:
<code>complex64</code> for <code>float32</code> arguments, and <code>complex64</code> for <code>float32</code> arguments, and
<code>complex128</code> for <code>float64</code> arguments. <code>complex128</code> for <code>float64</code> arguments.
If one of the arguments evaluates to an untyped constant, it is first If one of the arguments evaluates to an untyped constant, it is first implicitly
<a href="#Conversions">converted</a> to the type of the other argument. <a href="#Conversions">converted</a> to the type of the other argument.
If both arguments evaluate to untyped constants, they must be non-complex If both arguments evaluate to untyped constants, they must be non-complex
numbers or their imaginary parts must be zero, and the return value of numbers or their imaginary parts must be zero, and the return value of

View file

@ -27,6 +27,11 @@ The <a href="https://forum.golangbridge.org/">Go Forum</a> is a discussion
forum for Go programmers. forum for Go programmers.
</p> </p>
<h3 id="discord"><a href="https://discord.gg/64C346U">Gophers Discord</a></h3>
<p>
Get live support and talk with other gophers on the Go Discord.
</p>
<h3 id="slack"><a href="https://blog.gopheracademy.com/gophers-slack-community/">Gopher Slack</a></h3> <h3 id="slack"><a href="https://blog.gopheracademy.com/gophers-slack-community/">Gopher Slack</a></h3>
<p>Get live support from other users in the Go slack channel.</p> <p>Get live support from other users in the Go slack channel.</p>

View file

@ -295,7 +295,7 @@ func goWithString(s string) {
} }
func testCallbackStack(t *testing.T) { func testCallbackStack(t *testing.T) {
// Make cgo call and callback with different amount of stack stack available. // Make cgo call and callback with different amount of stack available.
// We do not do any explicit checks, just ensure that it does not crash. // We do not do any explicit checks, just ensure that it does not crash.
for _, f := range splitTests { for _, f := range splitTests {
f() f()

View file

@ -9,7 +9,7 @@ import "C"
func FuncInt() int { return 1 } func FuncInt() int { return 1 }
// Add a recursive type to to check that type equality across plugins doesn't // Add a recursive type to check that type equality across plugins doesn't
// crash. See https://golang.org/issues/19258 // crash. See https://golang.org/issues/19258
func FuncRecursive() X { return X{} } func FuncRecursive() X { return X{} }

View file

@ -374,7 +374,7 @@ func (c *config) checkRuntime() (skip bool, err error) {
} }
// libcgo.h sets CGO_TSAN if it detects TSAN support in the C compiler. // libcgo.h sets CGO_TSAN if it detects TSAN support in the C compiler.
// Dump the preprocessor defines to check that that works. // Dump the preprocessor defines to check that works.
// (Sometimes it doesn't: see https://golang.org/issue/15983.) // (Sometimes it doesn't: see https://golang.org/issue/15983.)
cmd, err := cc(c.cFlags...) cmd, err := cc(c.cFlags...)
if err != nil { if err != nil {

View file

@ -578,7 +578,7 @@ func TestNotes(t *testing.T) {
} }
// Build a GOPATH package (depBase) into a shared library that links against the goroot // Build a GOPATH package (depBase) into a shared library that links against the goroot
// runtime, another package (dep2) that links against the first, and and an // runtime, another package (dep2) that links against the first, and an
// executable that links against dep2. // executable that links against dep2.
func TestTwoGopathShlibs(t *testing.T) { func TestTwoGopathShlibs(t *testing.T) {
goCmd(t, "install", "-buildmode=shared", "-linkshared", "depBase") goCmd(t, "install", "-buildmode=shared", "-linkshared", "depBase")

View file

@ -26,7 +26,7 @@ scheme.
# Download NaCl # Download NaCl
Download nacl_sdk.zip file from Download nacl_sdk.zip file from
https://developers.google.com/native-client/dev/sdk/download https://developer.chrome.com/native-client/sdk/download
and unpack it. I chose /opt/nacl_sdk. and unpack it. I chose /opt/nacl_sdk.
# Update # Update
@ -37,7 +37,7 @@ sdk. These are released every 6-8 weeks, in line with Chrome releases.
% cd /opt/nacl_sdk % cd /opt/nacl_sdk
% ./naclsdk update % ./naclsdk update
At this time pepper_40 is the stable version. The NaCl port needs at least pepper_39 At this time pepper_49 is the stable version. The NaCl port needs at least pepper_39
to work. If naclsdk downloads a later version, please adjust accordingly. to work. If naclsdk downloads a later version, please adjust accordingly.
The cmd/go helper scripts expect that the loaders sel_ldr_{x86_{32,64},arm} and The cmd/go helper scripts expect that the loaders sel_ldr_{x86_{32,64},arm} and

View file

@ -37,6 +37,9 @@ go src=..
buildid buildid
testdata testdata
+ +
xcoff
testdata
+
gofmt gofmt
gofmt.go gofmt.go
gofmt_test.go gofmt_test.go
@ -151,6 +154,9 @@ go src=..
trace trace
testdata testdata
+ +
traceparser
testdata
+
io io
+ +
mime mime

View file

@ -47,10 +47,20 @@
} }
return buf.length; return buf.length;
}, },
openSync(path, flags, mode) { write(fd, buf, offset, length, position, callback) {
if (offset !== 0 || length !== buf.length || position !== null) {
throw new Error("not implemented");
}
const n = this.writeSync(fd, buf);
callback(null, n);
},
open(path, flags, mode, callback) {
const err = new Error("not implemented"); const err = new Error("not implemented");
err.code = "ENOSYS"; err.code = "ENOSYS";
throw err; callback(err);
},
fsync(fd, callback) {
callback(null);
}, },
}; };
} }
@ -88,6 +98,9 @@
const loadValue = (addr) => { const loadValue = (addr) => {
const f = mem().getFloat64(addr, true); const f = mem().getFloat64(addr, true);
if (f === 0) {
return undefined;
}
if (!isNaN(f)) { if (!isNaN(f)) {
return f; return f;
} }
@ -105,14 +118,18 @@
mem().setUint32(addr, 0, true); mem().setUint32(addr, 0, true);
return; return;
} }
if (v === 0) {
mem().setUint32(addr + 4, nanHead, true);
mem().setUint32(addr, 1, true);
return;
}
mem().setFloat64(addr, v, true); mem().setFloat64(addr, v, true);
return; return;
} }
switch (v) { switch (v) {
case undefined: case undefined:
mem().setUint32(addr + 4, nanHead, true); mem().setFloat64(addr, 0, true);
mem().setUint32(addr, 1, true);
return; return;
case null: case null:
mem().setUint32(addr + 4, nanHead, true); mem().setUint32(addr + 4, nanHead, true);
@ -327,7 +344,7 @@
this._inst = instance; this._inst = instance;
this._values = [ // TODO: garbage collection this._values = [ // TODO: garbage collection
NaN, NaN,
undefined, 0,
null, null,
true, true,
false, false,
@ -389,14 +406,14 @@
} }
static _makeCallbackHelper(id, pendingCallbacks, go) { static _makeCallbackHelper(id, pendingCallbacks, go) {
return function() { return function () {
pendingCallbacks.push({ id: id, args: arguments }); pendingCallbacks.push({ id: id, args: arguments });
go._resolveCallbackPromise(); go._resolveCallbackPromise();
}; };
} }
static _makeEventCallbackHelper(preventDefault, stopPropagation, stopImmediatePropagation, fn) { static _makeEventCallbackHelper(preventDefault, stopPropagation, stopImmediatePropagation, fn) {
return function(event) { return function (event) {
if (preventDefault) { if (preventDefault) {
event.preventDefault(); event.preventDefault();
} }

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build linux dragonfly openbsd solaris // +build linux dragonfly freebsd openbsd solaris
package tar package tar

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build darwin freebsd netbsd // +build darwin netbsd
package tar package tar

View file

@ -178,7 +178,7 @@ func (w *Writer) Close() error {
return err return err
} }
// store max values in the regular end record to signal that // store max values in the regular end record to signal
// that the zip64 values should be used instead // that the zip64 values should be used instead
records = uint16max records = uint16max
size = uint32max size = uint32max

View file

@ -41,9 +41,16 @@ var compareTests = []struct {
func TestCompare(t *testing.T) { func TestCompare(t *testing.T) {
for _, tt := range compareTests { for _, tt := range compareTests {
cmp := Compare(tt.a, tt.b) numShifts := 16
buffer := make([]byte, len(tt.b)+numShifts)
// vary the input alignment of tt.b
for offset := 0; offset <= numShifts; offset++ {
shiftedB := buffer[offset : len(tt.b)+offset]
copy(shiftedB, tt.b)
cmp := Compare(tt.a, shiftedB)
if cmp != tt.i { if cmp != tt.i {
t.Errorf(`Compare(%q, %q) = %v`, tt.a, tt.b, cmp) t.Errorf(`Compare(%q, %q), offset %d = %v; want %v`, tt.a, tt.b, offset, cmp, tt.i)
}
} }
} }
} }

View file

@ -385,9 +385,7 @@ func (w *Walker) parseFile(dir, file string) (*ast.File, error) {
return f, nil return f, nil
} }
// The package cache doesn't operate correctly in rare (so far artificial) // Disable before debugging non-obvious errors from the type-checker.
// circumstances (issue 8425). Disable before debugging non-obvious errors
// from the type-checker.
const usePkgCache = true const usePkgCache = true
var ( var (
@ -398,7 +396,7 @@ var (
// tagKey returns the tag-based key to use in the pkgCache. // tagKey returns the tag-based key to use in the pkgCache.
// It is a comma-separated string; the first part is dir, the rest tags. // It is a comma-separated string; the first part is dir, the rest tags.
// The satisfied tags are derived from context but only those that // The satisfied tags are derived from context but only those that
// matter (the ones listed in the tags argument) are used. // matter (the ones listed in the tags argument plus GOOS and GOARCH) are used.
// The tags list, which came from go/build's Package.AllTags, // The tags list, which came from go/build's Package.AllTags,
// is known to be sorted. // is known to be sorted.
func tagKey(dir string, context *build.Context, tags []string) string { func tagKey(dir string, context *build.Context, tags []string) string {
@ -414,9 +412,17 @@ func tagKey(dir string, context *build.Context, tags []string) string {
} }
// TODO: ReleaseTags (need to load default) // TODO: ReleaseTags (need to load default)
key := dir key := dir
// explicit on GOOS and GOARCH as global cache will use "all" cached packages for
// an indirect imported package. See https://github.com/golang/go/issues/21181
// for more detail.
tags = append(tags, context.GOOS, context.GOARCH)
sort.Strings(tags)
for _, tag := range tags { for _, tag := range tags {
if ctags[tag] { if ctags[tag] {
key += "," + tag key += "," + tag
ctags[tag] = false
} }
} }
return key return key

View file

@ -188,3 +188,18 @@ func BenchmarkAll(b *testing.B) {
} }
} }
} }
func TestIssue21181(t *testing.T) {
for _, c := range contexts {
c.Compiler = build.Default.Compiler
}
for _, context := range contexts {
w := NewWalker(context, "testdata/src/issue21181")
pkg, err := w.Import("p")
if err != nil {
t.Fatalf("%s: (%s-%s) %s %v", err, context.GOOS, context.GOARCH,
pkg.Name(), w.imported)
}
w.export(pkg)
}
}

View file

@ -0,0 +1,5 @@
package dep
type Interface interface {
N([]byte)
}

View file

@ -0,0 +1 @@
package dep

View file

@ -0,0 +1,5 @@
package indirect
import "dep"
func F(dep.Interface) {}

View file

@ -0,0 +1,9 @@
package p
import (
"dep"
)
type algo struct {
indrt func(dep.Interface)
}

View file

@ -0,0 +1,7 @@
package p
import "indirect"
var in = []algo{
{indirect.F},
}

View file

@ -0,0 +1,11 @@
// +build !amd64
package p
import (
"indirect"
)
var in = []algo{
{indirect.F},
}

View file

@ -308,6 +308,28 @@ func (p *Parser) asmPCData(operands [][]lex.Token) {
p.append(prog, "", true) p.append(prog, "", true)
} }
// asmPCAlign assembles a PCALIGN pseudo-op.
// PCALIGN $16
func (p *Parser) asmPCAlign(operands [][]lex.Token) {
if len(operands) != 1 {
p.errorf("expect one operand for PCALIGN")
return
}
// Operand 0 must be an immediate constant.
key := p.address(operands[0])
if !p.validImmediate("PCALIGN", &key) {
return
}
prog := &obj.Prog{
Ctxt: p.ctxt,
As: obj.APCALIGN,
From: key,
}
p.append(prog, "", true)
}
// asmFuncData assembles a FUNCDATA pseudo-op. // asmFuncData assembles a FUNCDATA pseudo-op.
// FUNCDATA $1, funcdata<>+4(SB) // FUNCDATA $1, funcdata<>+4(SB)
func (p *Parser) asmFuncData(operands [][]lex.Token) { func (p *Parser) asmFuncData(operands [][]lex.Token) {

View file

@ -227,6 +227,8 @@ func (p *Parser) pseudo(word string, operands [][]lex.Token) bool {
p.asmGlobl(operands) p.asmGlobl(operands)
case "PCDATA": case "PCDATA":
p.asmPCData(operands) p.asmPCData(operands)
case "PCALIGN":
p.asmPCAlign(operands)
case "TEXT": case "TEXT":
p.asmText(operands) p.asmText(operands)
default: default:

View file

@ -18,7 +18,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
MOVL -2147483648(AX), AX // 8b8000000080 MOVL -2147483648(AX), AX // 8b8000000080
ADDL 2147483648(AX), AX // 038000000080 ADDL 2147483648(AX), AX // 038000000080
ADDL -2147483648(AX), AX // 038000000080 ADDL -2147483648(AX), AX // 038000000080
// Make sure MOV CR/DR continues to work after changing it's movtabs. // Make sure MOV CR/DR continues to work after changing its movtabs.
MOVL CR0, AX // 0f20c0 MOVL CR0, AX // 0f20c0
MOVL CR0, DX // 0f20c2 MOVL CR0, DX // 0f20c2
MOVL CR4, DI // 0f20e7 MOVL CR4, DI // 0f20e7

View file

@ -302,7 +302,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
// Check that LEAL is permitted to use overflowing offset. // Check that LEAL is permitted to use overflowing offset.
LEAL 2400959708(BP)(R10*1), BP // 428dac15dcbc1b8f LEAL 2400959708(BP)(R10*1), BP // 428dac15dcbc1b8f
LEAL 3395469782(AX)(R10*1), AX // 428d8410d6c162ca LEAL 3395469782(AX)(R10*1), AX // 428d8410d6c162ca
// Make sure MOV CR/DR continues to work after changing it's movtabs. // Make sure MOV CR/DR continues to work after changing its movtabs.
MOVQ CR0, AX // 0f20c0 MOVQ CR0, AX // 0f20c0
MOVQ CR0, DX // 0f20c2 MOVQ CR0, DX // 0f20c2
MOVQ CR4, DI // 0f20e7 MOVQ CR4, DI // 0f20e7

View file

@ -25,6 +25,18 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
ADD R1, R2, R3 ADD R1, R2, R3
ADD R1, ZR, R3 ADD R1, ZR, R3
ADD $1, R2, R3 ADD $1, R2, R3
ADD $0x000aaa, R2, R3 // ADD $2730, R2, R3 // 43a82a91
ADD $0x000aaa, R2 // ADD $2730, R2 // 42a82a91
ADD $0xaaa000, R2, R3 // ADD $11182080, R2, R3 // 43a86a91
ADD $0xaaa000, R2 // ADD $11182080, R2 // 42a86a91
ADD $0xaaaaaa, R2, R3 // ADD $11184810, R2, R3 // 43a82a9163a86a91
ADD $0xaaaaaa, R2 // ADD $11184810, R2 // 42a82a9142a86a91
SUB $0x000aaa, R2, R3 // SUB $2730, R2, R3 // 43a82ad1
SUB $0x000aaa, R2 // SUB $2730, R2 // 42a82ad1
SUB $0xaaa000, R2, R3 // SUB $11182080, R2, R3 // 43a86ad1
SUB $0xaaa000, R2 // SUB $11182080, R2 // 42a86ad1
SUB $0xaaaaaa, R2, R3 // SUB $11184810, R2, R3 // 43a82ad163a86ad1
SUB $0xaaaaaa, R2 // SUB $11184810, R2 // 42a82ad142a86ad1
ADD R1>>11, R2, R3 ADD R1>>11, R2, R3
ADD R1<<22, R2, R3 ADD R1<<22, R2, R3
ADD R1->33, R2, R3 ADD R1->33, R2, R3
@ -179,6 +191,11 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
FMOVD F4, (R2)(R6) // FMOVD F4, (R2)(R6*1) // 446826fc FMOVD F4, (R2)(R6) // FMOVD F4, (R2)(R6*1) // 446826fc
FMOVD F4, (R2)(R6<<3) // 447826fc FMOVD F4, (R2)(R6<<3) // 447826fc
CMPW $40960, R0 // 1f284071
CMPW $27745, R2 // 3b8c8d525f001b6b
CMNW $0x3fffffc0, R2 // CMNW $1073741760, R2 // fb5f1a325f001b2b
CMPW $0xffff0, R1 // CMPW $1048560, R1 // fb3f1c323f001b6b
ADD $0x3fffffffc000, R5 // ADD $70368744161280, R5 // fb7f72b2a5001b8b
// LTYPE1 imsr ',' spreg ',' // LTYPE1 imsr ',' spreg ','
// { // {
// outcode($1, &$2, $4, &nullgen); // outcode($1, &$2, $4, &nullgen);
@ -214,6 +231,16 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
ANDS $0x22220000, R3, R4 // ANDS $572653568, R3, R4 // 5b44a4d264001bea ANDS $0x22220000, R3, R4 // ANDS $572653568, R3, R4 // 5b44a4d264001bea
BICS $0x22220000, R3, R4 // BICS $572653568, R3, R4 // 5b44a4d264003bea BICS $0x22220000, R3, R4 // BICS $572653568, R3, R4 // 5b44a4d264003bea
EOR $0xe03fffffffffffff, R20, R22 // EOR $-2287828610704211969, R20, R22 // 96e243d2
TSTW $0x600000006, R1 // TSTW $25769803782, R1 // 3f041f72
ANDS $0xffff, R2 // ANDS $65535, R2 // 423c40f2
AND $0x7fffffff, R3 // AND $2147483647, R3 // 63784092
ANDS $0x0ffffffff80000000, R2 // ANDS $-2147483648, R2 // 428061f2
AND $0xfffff, R2 // AND $1048575, R2 // 424c4092
ANDW $0xf00fffff, R1 // ANDW $4027580415, R1 // 215c0412
ANDSW $0xff00ffff, R1 // ANDSW $4278255615, R1 // 215c0872
TSTW $0xff00ff, R1 // TSTW $16711935, R1 // 3f9c0072
AND $8, R0, RSP // 1f007d92 AND $8, R0, RSP // 1f007d92
ORR $8, R0, RSP // 1f007db2 ORR $8, R0, RSP // 1f007db2
EOR $8, R0, RSP // 1f007dd2 EOR $8, R0, RSP // 1f007dd2
@ -221,6 +248,19 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
ORN $8, R0, RSP // 1ff87cb2 ORN $8, R0, RSP // 1ff87cb2
EON $8, R0, RSP // 1ff87cd2 EON $8, R0, RSP // 1ff87cd2
MOVD $0x3fffffffc000, R0 // MOVD $70368744161280, R0 // e07f72b2
MOVW $0xaaaa0000, R1 // MOVW $2863267840, R1 // 4155b552
MOVW $0xaaaaffff, R1 // MOVW $2863333375, R1 // a1aaaa12
MOVW $0xaaaa, R1 // MOVW $43690, R1 // 41559552
MOVW $0xffffaaaa, R1 // MOVW $4294945450, R1 // a1aa8a12
MOVW $0xffff0000, R1 // MOVW $4294901760, R1 // e1ffbf52
MOVD $0xffff00000000000, R1 // MOVD $1152903912420802560, R1 // e13f54b2
MOVD $0x11110000, R1 // MOVD $286326784, R1 // 2122a2d2
MOVD $0, R1 // 010080d2
MOVD $-1, R1 // 01008092
MOVD $0x210000, R0 // MOVD $2162688, R0 // 2004a0d2
MOVD $0xffffffffffffaaaa, R1 // MOVD $-21846, R1 // a1aa8a92
// //
// CLS // CLS
// //
@ -416,7 +456,7 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
CMP R22.SXTX, RSP // ffe336eb CMP R22.SXTX, RSP // ffe336eb
CMP $0x22220000, RSP // CMP $572653568, RSP // 5b44a4d2ff633beb CMP $0x22220000, RSP // CMP $572653568, RSP // 5b44a4d2ff633beb
CMPW $0x22220000, RSP // CMPW $572653568, RSP // 5b44a4d2ff633b6b CMPW $0x22220000, RSP // CMPW $572653568, RSP // 5b44a452ff633b6b
// TST // TST
TST $15, R2 // 5f0c40f2 TST $15, R2 // 5f0c40f2

View file

@ -98,4 +98,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
LDAR (R4),$0,R5 // 7ca020a8 LDAR (R4),$0,R5 // 7ca020a8
LDAR (R3),R5 // 7ca018a8 LDAR (R3),R5 // 7ca018a8
// float constants
FMOVD $(0.0), F1 // f0210cd0
FMOVD $(-0.0), F1 // f0210cd0fc200850
RET RET

View file

@ -9,6 +9,7 @@ package main
import ( import (
"bytes" "bytes"
"cmd/internal/xcoff"
"debug/dwarf" "debug/dwarf"
"debug/elf" "debug/elf"
"debug/macho" "debug/macho"
@ -188,6 +189,7 @@ func (p *Package) Translate(f *File) {
break break
} }
} }
p.prepareNames(f)
if p.rewriteCalls(f) { if p.rewriteCalls(f) {
// Add `import _cgo_unsafe "unsafe"` after the package statement. // Add `import _cgo_unsafe "unsafe"` after the package statement.
f.Edit.Insert(f.offset(f.AST.Name.End()), "; import _cgo_unsafe \"unsafe\"") f.Edit.Insert(f.offset(f.AST.Name.End()), "; import _cgo_unsafe \"unsafe\"")
@ -679,6 +681,27 @@ func (p *Package) recordTypedefs1(dtype dwarf.Type, visited map[dwarf.Type]bool)
} }
} }
// prepareNames finalizes the Kind field of not-type names and sets
// the mangled name of all names.
func (p *Package) prepareNames(f *File) {
for _, n := range f.Name {
if n.Kind == "not-type" {
if n.Define == "" {
n.Kind = "var"
} else {
n.Kind = "macro"
n.FuncType = &FuncType{
Result: n.Type,
Go: &ast.FuncType{
Results: &ast.FieldList{List: []*ast.Field{{Type: n.Type.Go}}},
},
}
}
}
p.mangleName(n)
}
}
// mangleName does name mangling to translate names // mangleName does name mangling to translate names
// from the original Go source files to the names // from the original Go source files to the names
// used in the final Go files generated by cgo. // used in the final Go files generated by cgo.
@ -722,16 +745,19 @@ func (p *Package) rewriteCalls(f *File) bool {
// argument and then calls the original function. // argument and then calls the original function.
// This returns whether the package needs to import unsafe as _cgo_unsafe. // This returns whether the package needs to import unsafe as _cgo_unsafe.
func (p *Package) rewriteCall(f *File, call *Call, name *Name) bool { func (p *Package) rewriteCall(f *File, call *Call, name *Name) bool {
params := name.FuncType.Params
args := call.Call.Args
// Avoid a crash if the number of arguments is // Avoid a crash if the number of arguments is
// less than the number of parameters. // less than the number of parameters.
// This will be caught when the generated file is compiled. // This will be caught when the generated file is compiled.
if len(call.Call.Args) < len(name.FuncType.Params) { if len(args) < len(params) {
return false return false
} }
any := false any := false
for i, param := range name.FuncType.Params { for i, param := range params {
if p.needsPointerCheck(f, param.Go, call.Call.Args[i]) { if p.needsPointerCheck(f, param.Go, args[i]) {
any = true any = true
break break
} }
@ -750,127 +776,108 @@ func (p *Package) rewriteCall(f *File, call *Call, name *Name) bool {
// Using a function literal like this lets us do correct // Using a function literal like this lets us do correct
// argument type checking, and works correctly if the call is // argument type checking, and works correctly if the call is
// deferred. // deferred.
var sb bytes.Buffer
sb.WriteString("func(")
needsUnsafe := false needsUnsafe := false
params := make([]*ast.Field, len(name.FuncType.Params))
nargs := make([]ast.Expr, len(name.FuncType.Params))
var stmts []ast.Stmt
for i, param := range name.FuncType.Params {
// params is going to become the parameters of the
// function literal.
// nargs is going to become the list of arguments made
// by the call within the function literal.
// nparam is the parameter of the function literal that
// corresponds to param.
origArg := call.Call.Args[i] for i, param := range params {
nparam := ast.NewIdent(fmt.Sprintf("_cgo%d", i)) if i > 0 {
nargs[i] = nparam sb.WriteString(", ")
}
fmt.Fprintf(&sb, "_cgo%d ", i)
// The Go version of the C type might use unsafe.Pointer,
// but the file might not import unsafe.
// Rewrite the Go type if necessary to use _cgo_unsafe.
ptype := p.rewriteUnsafe(param.Go) ptype := p.rewriteUnsafe(param.Go)
if ptype != param.Go { if ptype != param.Go {
needsUnsafe = true needsUnsafe = true
} }
sb.WriteString(gofmtLine(ptype))
params[i] = &ast.Field{
Names: []*ast.Ident{nparam},
Type: ptype,
} }
if !p.needsPointerCheck(f, param.Go, origArg) { sb.WriteString(")")
result := false
twoResults := false
// Check whether this call expects two results.
for _, ref := range f.Ref {
if ref.Expr != &call.Call.Fun {
continue continue
} }
if ref.Context == ctxCall2 {
// Run the cgo pointer checks on nparam. sb.WriteString(" (")
result = true
// Change the function literal to call the real function twoResults = true
// with the parameter passed through _cgoCheckPointer. }
c := &ast.CallExpr{ break
Fun: ast.NewIdent("_cgoCheckPointer"),
Args: []ast.Expr{
nparam,
},
} }
// Add optional additional arguments for an address // Add the result type, if any.
// expression.
c.Args = p.checkAddrArgs(f, c.Args, origArg)
stmt := &ast.ExprStmt{
X: c,
}
stmts = append(stmts, stmt)
}
const cgoMarker = "__cgo__###__marker__"
fcall := &ast.CallExpr{
Fun: ast.NewIdent(cgoMarker),
Args: nargs,
}
ftype := &ast.FuncType{
Params: &ast.FieldList{
List: params,
},
}
if name.FuncType.Result != nil { if name.FuncType.Result != nil {
rtype := p.rewriteUnsafe(name.FuncType.Result.Go) rtype := p.rewriteUnsafe(name.FuncType.Result.Go)
if rtype != name.FuncType.Result.Go { if rtype != name.FuncType.Result.Go {
needsUnsafe = true needsUnsafe = true
} }
ftype.Results = &ast.FieldList{ if !twoResults {
List: []*ast.Field{ sb.WriteString(" ")
&ast.Field{
Type: rtype,
},
},
} }
sb.WriteString(gofmtLine(rtype))
result = true
} }
// If this call expects two results, we have to // Add the second result type, if any.
// adjust the results of the function we generated. if twoResults {
for _, ref := range f.Ref { if name.FuncType.Result == nil {
if ref.Expr == &call.Call.Fun && ref.Context == ctxCall2 { // An explicit void result looks odd but it
if ftype.Results == nil { // seems to be how cgo has worked historically.
// An explicit void argument sb.WriteString("_Ctype_void")
// looks odd but it seems to
// be how cgo has worked historically.
ftype.Results = &ast.FieldList{
List: []*ast.Field{
&ast.Field{
Type: ast.NewIdent("_Ctype_void"),
},
},
}
}
ftype.Results.List = append(ftype.Results.List,
&ast.Field{
Type: ast.NewIdent("error"),
})
} }
sb.WriteString(", error)")
} }
var fbody ast.Stmt sb.WriteString(" { ")
if ftype.Results == nil {
fbody = &ast.ExprStmt{ for i, param := range params {
X: fcall, arg := args[i]
if !p.needsPointerCheck(f, param.Go, arg) {
continue
} }
} else {
fbody = &ast.ReturnStmt{ // Check for &a[i].
Results: []ast.Expr{fcall}, if p.checkIndex(&sb, f, arg, i) {
continue
} }
// Check for &x.
if p.checkAddr(&sb, arg, i) {
continue
} }
lit := &ast.FuncLit{
Type: ftype, fmt.Fprintf(&sb, "_cgoCheckPointer(_cgo%d); ", i)
Body: &ast.BlockStmt{
List: append(stmts, fbody),
},
} }
text := strings.Replace(gofmt(lit), "\n", ";", -1)
repl := strings.Split(text, cgoMarker) if result {
f.Edit.Insert(f.offset(call.Call.Fun.Pos()), repl[0]) sb.WriteString("return ")
f.Edit.Insert(f.offset(call.Call.Fun.End()), repl[1]) }
// Now we are ready to call the C function.
// To work smoothly with rewriteRef we leave the call in place
// and just insert our new arguments between the function
// and the old arguments.
f.Edit.Insert(f.offset(call.Call.Fun.Pos()), sb.String())
sb.Reset()
sb.WriteString("(")
for i := range params {
if i > 0 {
sb.WriteString(", ")
}
fmt.Fprintf(&sb, "_cgo%d", i)
}
sb.WriteString("); }")
f.Edit.Insert(f.offset(call.Call.Lparen), sb.String())
return needsUnsafe return needsUnsafe
} }
@ -979,19 +986,13 @@ func (p *Package) hasPointer(f *File, t ast.Expr, top bool) bool {
} }
} }
// checkAddrArgs tries to add arguments to the call of // checkIndex checks whether arg the form &a[i], possibly inside type
// _cgoCheckPointer when the argument is an address expression. We // conversions. If so, and if a has no side effects, it writes
// pass true to mean that the argument is an address operation of // _cgoCheckPointer(_cgoNN, a) to sb and returns true. This tells
// something other than a slice index, which means that it's only // _cgoCheckPointer to check the complete contents of the slice.
// necessary to check the specific element pointed to, not the entire func (p *Package) checkIndex(sb *bytes.Buffer, f *File, arg ast.Expr, i int) bool {
// object. This is for &s.f, where f is a field in a struct. We can
// pass a slice or array, meaning that we should check the entire
// slice or array but need not check any other part of the object.
// This is for &s.a[i], where we need to check all of a. However, we
// only pass the slice or array if we can refer to it without side
// effects.
func (p *Package) checkAddrArgs(f *File, args []ast.Expr, x ast.Expr) []ast.Expr {
// Strip type conversions. // Strip type conversions.
x := arg
for { for {
c, ok := x.(*ast.CallExpr) c, ok := x.(*ast.CallExpr)
if !ok || len(c.Args) != 1 || !p.isType(c.Fun) { if !ok || len(c.Args) != 1 || !p.isType(c.Fun) {
@ -1001,22 +1002,46 @@ func (p *Package) checkAddrArgs(f *File, args []ast.Expr, x ast.Expr) []ast.Expr
} }
u, ok := x.(*ast.UnaryExpr) u, ok := x.(*ast.UnaryExpr)
if !ok || u.Op != token.AND { if !ok || u.Op != token.AND {
return args return false
} }
index, ok := u.X.(*ast.IndexExpr) index, ok := u.X.(*ast.IndexExpr)
if !ok { if !ok {
// This is the address of something that is not an return false
// index expression. We only need to examine the
// single value to which it points.
// TODO: what if true is shadowed?
return append(args, ast.NewIdent("true"))
} }
if !p.hasSideEffects(f, index.X) { if p.hasSideEffects(f, index.X) {
// Examine the entire slice. return false
return append(args, index.X)
} }
// Treat the pointer as unknown.
return args fmt.Fprintf(sb, "_cgoCheckPointer(_cgo%d, %s); ", i, gofmtLine(index.X))
return true
}
// checkAddr checks whether arg has the form &x, possibly inside type
// conversions. If so it writes _cgoCheckPointer(_cgoNN, true) to sb
// and returns true. This tells _cgoCheckPointer to check just the
// contents of the pointer being passed, not any other part of the
// memory allocation. This is run after checkIndex, which looks for
// the special case of &a[i], which requires different checks.
func (p *Package) checkAddr(sb *bytes.Buffer, arg ast.Expr, i int) bool {
// Strip type conversions.
px := &arg
for {
c, ok := (*px).(*ast.CallExpr)
if !ok || len(c.Args) != 1 || !p.isType(c.Fun) {
break
}
px = &c.Args[0]
}
if u, ok := (*px).(*ast.UnaryExpr); !ok || u.Op != token.AND {
return false
}
// Use "0 == 0" to do the right thing in the unlikely event
// that "true" is shadowed.
fmt.Fprintf(sb, "_cgoCheckPointer(_cgo%d, 0 == 0); ", i)
return true
} }
// hasSideEffects returns whether the expression x has any side // hasSideEffects returns whether the expression x has any side
@ -1026,8 +1051,7 @@ func (p *Package) hasSideEffects(f *File, x ast.Expr) bool {
found := false found := false
f.walk(x, ctxExpr, f.walk(x, ctxExpr,
func(f *File, x interface{}, context astContext) { func(f *File, x interface{}, context astContext) {
switch x.(type) { if _, ok := x.(*ast.CallExpr); ok {
case *ast.CallExpr:
found = true found = true
} }
}) })
@ -1131,24 +1155,7 @@ func (p *Package) rewriteRef(f *File) {
// code for them. // code for them.
functions := make(map[string]bool) functions := make(map[string]bool)
// Assign mangled names.
for _, n := range f.Name { for _, n := range f.Name {
if n.Kind == "not-type" {
if n.Define == "" {
n.Kind = "var"
} else {
n.Kind = "macro"
n.FuncType = &FuncType{
Result: n.Type,
Go: &ast.FuncType{
Results: &ast.FieldList{List: []*ast.Field{{Type: n.Type.Go}}},
},
}
}
}
if n.Mangle == "" {
p.mangleName(n)
}
if n.Kind == "func" { if n.Kind == "func" {
functions[n.Go] = false functions[n.Go] = false
} }
@ -1162,6 +1169,60 @@ func (p *Package) rewriteRef(f *File) {
if r.Name.IsConst() && r.Name.Const == "" { if r.Name.IsConst() && r.Name.Const == "" {
error_(r.Pos(), "unable to find value of constant C.%s", fixGo(r.Name.Go)) error_(r.Pos(), "unable to find value of constant C.%s", fixGo(r.Name.Go))
} }
if r.Name.Kind == "func" {
switch r.Context {
case ctxCall, ctxCall2:
functions[r.Name.Go] = true
}
}
expr := p.rewriteName(f, r)
if *godefs {
// Substitute definition for mangled type name.
if id, ok := expr.(*ast.Ident); ok {
if t := typedef[id.Name]; t != nil {
expr = t.Go
}
if id.Name == r.Name.Mangle && r.Name.Const != "" {
expr = ast.NewIdent(r.Name.Const)
}
}
}
// Copy position information from old expr into new expr,
// in case expression being replaced is first on line.
// See golang.org/issue/6563.
pos := (*r.Expr).Pos()
if x, ok := expr.(*ast.Ident); ok {
expr = &ast.Ident{NamePos: pos, Name: x.Name}
}
// Change AST, because some later processing depends on it,
// and also because -godefs mode still prints the AST.
old := *r.Expr
*r.Expr = expr
// Record source-level edit for cgo output.
repl := gofmt(expr)
if r.Name.Kind != "type" {
repl = "(" + repl + ")"
}
f.Edit.Replace(f.offset(old.Pos()), f.offset(old.End()), repl)
}
// Remove functions only used as expressions, so their respective
// bridge functions are not generated.
for name, used := range functions {
if !used {
delete(f.Name, name)
}
}
}
// rewriteName returns the expression used to rewrite a reference.
func (p *Package) rewriteName(f *File, r *Ref) ast.Expr {
var expr ast.Expr = ast.NewIdent(r.Name.Mangle) // default var expr ast.Expr = ast.NewIdent(r.Name.Mangle) // default
switch r.Context { switch r.Context {
case ctxCall, ctxCall2: case ctxCall, ctxCall2:
@ -1178,7 +1239,6 @@ func (p *Package) rewriteRef(f *File) {
error_(r.Pos(), "call of non-function C.%s", fixGo(r.Name.Go)) error_(r.Pos(), "call of non-function C.%s", fixGo(r.Name.Go))
break break
} }
functions[r.Name.Go] = true
if r.Context == ctxCall2 { if r.Context == ctxCall2 {
if r.Name.Go == "_CMalloc" { if r.Name.Go == "_CMalloc" {
error_(r.Pos(), "no two-result form for C.malloc") error_(r.Pos(), "no two-result form for C.malloc")
@ -1259,48 +1319,7 @@ func (p *Package) rewriteRef(f *File) {
error_(r.Pos(), "must call C.%s", fixGo(r.Name.Go)) error_(r.Pos(), "must call C.%s", fixGo(r.Name.Go))
} }
} }
return expr
if *godefs {
// Substitute definition for mangled type name.
if id, ok := expr.(*ast.Ident); ok {
if t := typedef[id.Name]; t != nil {
expr = t.Go
}
if id.Name == r.Name.Mangle && r.Name.Const != "" {
expr = ast.NewIdent(r.Name.Const)
}
}
}
// Copy position information from old expr into new expr,
// in case expression being replaced is first on line.
// See golang.org/issue/6563.
pos := (*r.Expr).Pos()
switch x := expr.(type) {
case *ast.Ident:
expr = &ast.Ident{NamePos: pos, Name: x.Name}
}
// Change AST, because some later processing depends on it,
// and also because -godefs mode still prints the AST.
old := *r.Expr
*r.Expr = expr
// Record source-level edit for cgo output.
repl := gofmt(expr)
if r.Name.Kind != "type" {
repl = "(" + repl + ")"
}
f.Edit.Replace(f.offset(old.Pos()), f.offset(old.End()), repl)
}
// Remove functions only used as expressions, so their respective
// bridge functions are not generated.
for name, used := range functions {
if !used {
delete(f.Name, name)
}
}
} }
// gccBaseCmd returns the start of the compiler command line. // gccBaseCmd returns the start of the compiler command line.
@ -1377,6 +1396,9 @@ func (p *Package) gccCmd() []string {
c = append(c, p.GccOptions...) c = append(c, p.GccOptions...)
c = append(c, p.gccMachine()...) c = append(c, p.gccMachine()...)
if goos == "aix" {
c = append(c, "-maix64")
}
c = append(c, "-") //read input from standard input c = append(c, "-") //read input from standard input
return c return c
} }
@ -1663,7 +1685,77 @@ func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int6
return d, ints, floats, strs return d, ints, floats, strs
} }
fatalf("cannot parse gcc output %s as ELF, Mach-O, PE object", gccTmp()) if f, err := xcoff.Open(gccTmp()); err == nil {
defer f.Close()
d, err := f.DWARF()
if err != nil {
fatalf("cannot load DWARF output from %s: %v", gccTmp(), err)
}
bo := binary.BigEndian
for _, s := range f.Symbols {
switch {
case isDebugInts(s.Name):
if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value:]
ints = make([]int64, len(data)/8)
for i := range ints {
ints[i] = int64(bo.Uint64(data[i*8:]))
}
}
}
}
case isDebugFloats(s.Name):
if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value:]
floats = make([]float64, len(data)/8)
for i := range floats {
floats[i] = math.Float64frombits(bo.Uint64(data[i*8:]))
}
}
}
}
default:
if n := indexOfDebugStr(s.Name); n != -1 {
if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value:]
strdata[n] = string(data)
}
}
}
break
}
if n := indexOfDebugStrlen(s.Name); n != -1 {
if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value:]
strlen := bo.Uint64(data[:8])
if strlen > (1<<(uint(p.IntSize*8)-1) - 1) { // greater than MaxInt?
fatalf("string literal too big")
}
strlens[n] = int(strlen)
}
}
}
break
}
}
}
buildStrings()
return d, ints, floats, strs
}
fatalf("cannot parse gcc output %s as ELF, Mach-O, PE, XCOFF object", gccTmp())
panic("not reached") panic("not reached")
} }

View file

@ -126,3 +126,9 @@ func gofmt(n interface{}) string {
} }
return gofmtBuf.String() return gofmtBuf.String()
} }
// gofmtLine returns the gofmt-formatted string for an AST node,
// ensuring that it is on a single line.
func gofmtLine(n interface{}) string {
return strings.Replace(gofmt(n), "\n", ";", -1)
}

View file

@ -6,6 +6,7 @@ package main
import ( import (
"bytes" "bytes"
"cmd/internal/xcoff"
"debug/elf" "debug/elf"
"debug/macho" "debug/macho"
"debug/pe" "debug/pe"
@ -312,7 +313,25 @@ func dynimport(obj string) {
return return
} }
fatalf("cannot parse %s as ELF, Mach-O or PE", obj) if f, err := xcoff.Open(obj); err == nil {
sym, err := f.ImportedSymbols()
if err != nil {
fatalf("cannot load imported symbols from XCOFF file %s: %v", obj, err)
}
for _, s := range sym {
fmt.Fprintf(stdout, "//go:cgo_import_dynamic %s %s %q\n", s.Name, s.Name, s.Library)
}
lib, err := f.ImportedLibraries()
if err != nil {
fatalf("cannot load imported libraries from XCOFF file %s: %v", obj, err)
}
for _, l := range lib {
fmt.Fprintf(stdout, "//go:cgo_import_dynamic _ _ %q\n", l)
}
return
}
fatalf("cannot parse %s as ELF, Mach-O, PE or XCOFF", obj)
} }
// Construct a gcc struct matching the gc argument frame. // Construct a gcc struct matching the gc argument frame.

View file

@ -92,8 +92,6 @@ Flags:
Compile with race detector enabled. Compile with race detector enabled.
-trimpath prefix -trimpath prefix
Remove prefix from recorded source file paths. Remove prefix from recorded source file paths.
-u
Disallow importing packages not marked as safe; implies -nolocalimports.
There are also a number of debugging flags; run the command with no arguments There are also a number of debugging flags; run the command with no arguments
for a usage message. for a usage message.
@ -125,7 +123,7 @@ directive can skip over a directive like any other comment.
// For a //line comment, this is the first character of the next line, and // For a //line comment, this is the first character of the next line, and
// for a /*line comment this is the character position immediately following the closing */. // for a /*line comment this is the character position immediately following the closing */.
// If no filename is given, the recorded filename is empty if there is also no column number; // If no filename is given, the recorded filename is empty if there is also no column number;
// otherwise is is the most recently recorded filename (actual filename or filename specified // otherwise it is the most recently recorded filename (actual filename or filename specified
// by previous line directive). // by previous line directive).
// If a line directive doesn't specify a column number, the column is "unknown" until // If a line directive doesn't specify a column number, the column is "unknown" until
// the next directive and the compiler does not report column numbers for that range. // the next directive and the compiler does not report column numbers for that range.
@ -146,7 +144,7 @@ directive can skip over a directive like any other comment.
// will report positions in the original input to the generator. // will report positions in the original input to the generator.
/* /*
The line directive is an historical special case; all other directives are of the form The line directive is an historical special case; all other directives are of the form
//go:name and must start at the begnning of a line, indicating that the directive is defined //go:name and must start at the beginning of a line, indicating that the directive is defined
by the Go toolchain. by the Go toolchain.
//go:noescape //go:noescape

View file

@ -583,7 +583,6 @@ var knownFormats = map[string]string{
"*cmd/compile/internal/ssa.sparseTreeMapEntry %v": "", "*cmd/compile/internal/ssa.sparseTreeMapEntry %v": "",
"*cmd/compile/internal/types.Field %p": "", "*cmd/compile/internal/types.Field %p": "",
"*cmd/compile/internal/types.Field %v": "", "*cmd/compile/internal/types.Field %v": "",
"*cmd/compile/internal/types.Sym %+v": "",
"*cmd/compile/internal/types.Sym %0S": "", "*cmd/compile/internal/types.Sym %0S": "",
"*cmd/compile/internal/types.Sym %S": "", "*cmd/compile/internal/types.Sym %S": "",
"*cmd/compile/internal/types.Sym %p": "", "*cmd/compile/internal/types.Sym %p": "",

View file

@ -229,9 +229,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// Result[0] (the quotient) is in AX. // Result[0] (the quotient) is in AX.
// Result[1] (the remainder) is in DX. // Result[1] (the remainder) is in DX.
r := v.Args[1].Reg() r := v.Args[1].Reg()
var j1 *obj.Prog
// CPU faults upon signed overflow, which occurs when the most // CPU faults upon signed overflow, which occurs when the most
// negative int is divided by -1. Handle divide by -1 as a special case. // negative int is divided by -1. Handle divide by -1 as a special case.
if ssa.NeedsFixUp(v) {
var c *obj.Prog var c *obj.Prog
switch v.Op { switch v.Op {
case ssa.OpAMD64DIVQ: case ssa.OpAMD64DIVQ:
@ -245,8 +247,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
c.From.Reg = r c.From.Reg = r
c.To.Type = obj.TYPE_CONST c.To.Type = obj.TYPE_CONST
c.To.Offset = -1 c.To.Offset = -1
j1 := s.Prog(x86.AJEQ) j1 = s.Prog(x86.AJEQ)
j1.To.Type = obj.TYPE_BRANCH j1.To.Type = obj.TYPE_BRANCH
}
// Sign extend dividend. // Sign extend dividend.
switch v.Op { switch v.Op {
@ -263,6 +266,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r p.From.Reg = r
if j1 != nil {
// Skip over -1 fixup code. // Skip over -1 fixup code.
j2 := s.Prog(obj.AJMP) j2 := s.Prog(obj.AJMP)
j2.To.Type = obj.TYPE_BRANCH j2.To.Type = obj.TYPE_BRANCH
@ -293,6 +297,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
j1.To.Val = n1 j1.To.Val = n1
j2.To.Val = s.Pc() j2.To.Val = s.Pc()
}
case ssa.OpAMD64HMULQ, ssa.OpAMD64HMULL, ssa.OpAMD64HMULQU, ssa.OpAMD64HMULLU: case ssa.OpAMD64HMULQ, ssa.OpAMD64HMULL, ssa.OpAMD64HMULQU, ssa.OpAMD64HMULLU:
// the frontend rewrites constant division by 8/16/32 bit integers into // the frontend rewrites constant division by 8/16/32 bit integers into
@ -315,6 +320,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
m.To.Reg = x86.REG_DX m.To.Reg = x86.REG_DX
} }
case ssa.OpAMD64MULQU, ssa.OpAMD64MULLU:
// Arg[0] is already in AX as it's the only register we allow
// results lo in AX
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
case ssa.OpAMD64MULQU2: case ssa.OpAMD64MULQU2:
// Arg[0] is already in AX as it's the only register we allow // Arg[0] is already in AX as it's the only register we allow
// results hi in DX, lo in AX // results hi in DX, lo in AX
@ -653,43 +665,26 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8: case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1,
p := s.Prog(v.Op.Asm()) ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2:
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.From.Scale = 8
p.From.Index = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.From.Scale = 4
p.From.Index = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVWloadidx2:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.From.Scale = 2
p.From.Index = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1:
r := v.Args[0].Reg() r := v.Args[0].Reg()
i := v.Args[1].Reg() i := v.Args[1].Reg()
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
switch v.Op {
case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1:
if i == x86.REG_SP { if i == x86.REG_SP {
r, i = i, r r, i = i, r
} }
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = r
p.From.Scale = 1 p.From.Scale = 1
case ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8:
p.From.Scale = 8
case ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4:
p.From.Scale = 4
case ssa.OpAMD64MOVWloadidx2:
p.From.Scale = 2
}
p.From.Reg = r
p.From.Index = i p.From.Index = i
gc.AddAux(&p.From, v) gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
@ -704,45 +699,28 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg() p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v) gc.AddAux(&p.To, v)
case ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8: case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1,
p := s.Prog(v.Op.Asm()) ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8, ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4, ssa.OpAMD64MOVWstoreidx2:
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
p.To.Scale = 8
p.To.Index = v.Args[1].Reg()
gc.AddAux(&p.To, v)
case ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
p.To.Scale = 4
p.To.Index = v.Args[1].Reg()
gc.AddAux(&p.To, v)
case ssa.OpAMD64MOVWstoreidx2:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
p.To.Scale = 2
p.To.Index = v.Args[1].Reg()
gc.AddAux(&p.To, v)
case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1:
r := v.Args[0].Reg() r := v.Args[0].Reg()
i := v.Args[1].Reg() i := v.Args[1].Reg()
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
switch v.Op {
case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1:
if i == x86.REG_SP { if i == x86.REG_SP {
r, i = i, r r, i = i, r
} }
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = r
p.To.Scale = 1 p.To.Scale = 1
case ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8:
p.To.Scale = 8
case ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4:
p.To.Scale = 4
case ssa.OpAMD64MOVWstoreidx2:
p.To.Scale = 2
}
p.To.Reg = r
p.To.Index = i p.To.Index = i
gc.AddAux(&p.To, v) gc.AddAux(&p.To, v)
case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify: case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify:
@ -816,14 +794,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// Break false dependency on destination register. // Break false dependency on destination register.
opregreg(s, x86.AXORPS, r, r) opregreg(s, x86.AXORPS, r, r)
opregreg(s, v.Op.Asm(), r, v.Args[0].Reg()) opregreg(s, v.Op.Asm(), r, v.Args[0].Reg())
case ssa.OpAMD64MOVQi2f, ssa.OpAMD64MOVQf2i, ssa.OpAMD64MOVLi2f, ssa.OpAMD64MOVLf2i:
var p *obj.Prog
switch v.Op {
case ssa.OpAMD64MOVQi2f, ssa.OpAMD64MOVQf2i: case ssa.OpAMD64MOVQi2f, ssa.OpAMD64MOVQf2i:
p := s.Prog(x86.AMOVQ) p = s.Prog(x86.AMOVQ)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVLi2f, ssa.OpAMD64MOVLf2i: case ssa.OpAMD64MOVLi2f, ssa.OpAMD64MOVLf2i:
p := s.Prog(x86.AMOVL) p = s.Prog(x86.AMOVL)
}
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg() p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
@ -968,24 +946,17 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ, ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
switch v.Op {
case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ: case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0() p.To.Reg = v.Reg0()
case ssa.OpAMD64BSFL, ssa.OpAMD64BSRL: case ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64SQRTSD:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
}
case ssa.OpAMD64ROUNDSD: case ssa.OpAMD64ROUNDSD:
p := s.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
val := v.AuxInt val := v.AuxInt
@ -1020,7 +991,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpAMD64SETGF, ssa.OpAMD64SETGEF, ssa.OpAMD64SETGF, ssa.OpAMD64SETGEF,
ssa.OpAMD64SETB, ssa.OpAMD64SETBE, ssa.OpAMD64SETB, ssa.OpAMD64SETBE,
ssa.OpAMD64SETORD, ssa.OpAMD64SETNAN, ssa.OpAMD64SETORD, ssa.OpAMD64SETNAN,
ssa.OpAMD64SETA, ssa.OpAMD64SETAE: ssa.OpAMD64SETA, ssa.OpAMD64SETAE,
ssa.OpAMD64SETO:
p := s.Prog(v.Op.Asm()) p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
@ -1163,6 +1135,8 @@ var blockJump = [...]struct {
ssa.BlockAMD64GE: {x86.AJGE, x86.AJLT}, ssa.BlockAMD64GE: {x86.AJGE, x86.AJLT},
ssa.BlockAMD64LE: {x86.AJLE, x86.AJGT}, ssa.BlockAMD64LE: {x86.AJLE, x86.AJGT},
ssa.BlockAMD64GT: {x86.AJGT, x86.AJLE}, ssa.BlockAMD64GT: {x86.AJGT, x86.AJLE},
ssa.BlockAMD64OS: {x86.AJOS, x86.AJOC},
ssa.BlockAMD64OC: {x86.AJOC, x86.AJOS},
ssa.BlockAMD64ULT: {x86.AJCS, x86.AJCC}, ssa.BlockAMD64ULT: {x86.AJCS, x86.AJCC},
ssa.BlockAMD64UGE: {x86.AJCC, x86.AJCS}, ssa.BlockAMD64UGE: {x86.AJCC, x86.AJCS},
ssa.BlockAMD64UGT: {x86.AJHI, x86.AJLS}, ssa.BlockAMD64UGT: {x86.AJHI, x86.AJLS},
@ -1224,6 +1198,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
case ssa.BlockAMD64EQ, ssa.BlockAMD64NE, case ssa.BlockAMD64EQ, ssa.BlockAMD64NE,
ssa.BlockAMD64LT, ssa.BlockAMD64GE, ssa.BlockAMD64LT, ssa.BlockAMD64GE,
ssa.BlockAMD64LE, ssa.BlockAMD64GT, ssa.BlockAMD64LE, ssa.BlockAMD64GT,
ssa.BlockAMD64OS, ssa.BlockAMD64OC,
ssa.BlockAMD64ULT, ssa.BlockAMD64UGT, ssa.BlockAMD64ULT, ssa.BlockAMD64UGT,
ssa.BlockAMD64ULE, ssa.BlockAMD64UGE: ssa.BlockAMD64ULE, ssa.BlockAMD64UGE:
jmp := blockJump[b.Kind] jmp := blockJump[b.Kind]

View file

@ -102,7 +102,7 @@ func algtype1(t *types.Type) (AlgKind, *types.Type) {
case TINT8, TUINT8, TINT16, TUINT16, case TINT8, TUINT8, TINT16, TUINT16,
TINT32, TUINT32, TINT64, TUINT64, TINT32, TUINT32, TINT64, TUINT64,
TINT, TUINT, TUINTPTR, TINT, TUINT, TUINTPTR,
TBOOL, TPTR32, TPTR64, TBOOL, TPTR,
TCHAN, TUNSAFEPTR: TCHAN, TUNSAFEPTR:
return AMEM, nil return AMEM, nil
@ -300,18 +300,8 @@ func genhash(sym *types.Sym, t *types.Type) {
testdclstack() testdclstack()
} }
// Disable safemode while compiling this code: the code we
// generate internally can refer to unsafe.Pointer.
// In this case it can happen if we need to generate an ==
// for a struct containing a reflect.Value, which itself has
// an unexported field of type unsafe.Pointer.
old_safemode := safemode
safemode = false
fn.Func.SetNilCheckDisabled(true) fn.Func.SetNilCheckDisabled(true)
funccompile(fn) funccompile(fn)
safemode = old_safemode
} }
func hashfor(t *types.Type) *Node { func hashfor(t *types.Type) *Node {
@ -484,22 +474,12 @@ func geneq(sym *types.Sym, t *types.Type) {
testdclstack() testdclstack()
} }
// Disable safemode while compiling this code: the code we
// generate internally can refer to unsafe.Pointer.
// In this case it can happen if we need to generate an ==
// for a struct containing a reflect.Value, which itself has
// an unexported field of type unsafe.Pointer.
old_safemode := safemode
safemode = false
// Disable checknils while compiling this code. // Disable checknils while compiling this code.
// We are comparing a struct or an array, // We are comparing a struct or an array,
// neither of which can be nil, and our comparisons // neither of which can be nil, and our comparisons
// are shallow. // are shallow.
fn.Func.SetNilCheckDisabled(true) fn.Func.SetNilCheckDisabled(true)
funccompile(fn) funccompile(fn)
safemode = old_safemode
} }
// eqfield returns the node // eqfield returns the node

View file

@ -250,12 +250,8 @@ func dowidth(t *types.Type) {
w = 16 w = 16
t.Align = uint8(Widthreg) t.Align = uint8(Widthreg)
case TPTR32: case TPTR:
w = 4 w = int64(Widthptr)
checkwidth(t.Elem())
case TPTR64:
w = 8
checkwidth(t.Elem()) checkwidth(t.Elem())
case TUNSAFEPTR: case TUNSAFEPTR:

View file

@ -43,7 +43,7 @@ func (p *exporter) markType(t *types.Type) {
// the user already needs some way to construct values of // the user already needs some way to construct values of
// those types. // those types.
switch t.Etype { switch t.Etype {
case TPTR32, TPTR64, TARRAY, TSLICE, TCHAN: case TPTR, TARRAY, TSLICE, TCHAN:
// TODO(mdempsky): Skip marking element type for // TODO(mdempsky): Skip marking element type for
// send-only channels? // send-only channels?
p.markType(t.Elem()) p.markType(t.Elem())

View file

@ -55,15 +55,15 @@ var runtimeDecls = [...]struct {
{"convT2E16", funcTag, 52}, {"convT2E16", funcTag, 52},
{"convT2E32", funcTag, 52}, {"convT2E32", funcTag, 52},
{"convT2E64", funcTag, 52}, {"convT2E64", funcTag, 52},
{"convT2Estring", funcTag, 53}, {"convT2Estring", funcTag, 52},
{"convT2Eslice", funcTag, 53}, {"convT2Eslice", funcTag, 52},
{"convT2Enoptr", funcTag, 53}, {"convT2Enoptr", funcTag, 53},
{"convT2I", funcTag, 53}, {"convT2I", funcTag, 53},
{"convT2I16", funcTag, 52}, {"convT2I16", funcTag, 52},
{"convT2I32", funcTag, 52}, {"convT2I32", funcTag, 52},
{"convT2I64", funcTag, 52}, {"convT2I64", funcTag, 52},
{"convT2Istring", funcTag, 53}, {"convT2Istring", funcTag, 52},
{"convT2Islice", funcTag, 53}, {"convT2Islice", funcTag, 52},
{"convT2Inoptr", funcTag, 53}, {"convT2Inoptr", funcTag, 53},
{"assertE2I", funcTag, 52}, {"assertE2I", funcTag, 52},
{"assertE2I2", funcTag, 54}, {"assertE2I2", funcTag, 54},

View file

@ -68,16 +68,16 @@ func convT2E(typ *byte, elem *any) (ret any)
func convT2E16(typ *byte, val any) (ret any) func convT2E16(typ *byte, val any) (ret any)
func convT2E32(typ *byte, val any) (ret any) func convT2E32(typ *byte, val any) (ret any)
func convT2E64(typ *byte, val any) (ret any) func convT2E64(typ *byte, val any) (ret any)
func convT2Estring(typ *byte, elem *any) (ret any) func convT2Estring(typ *byte, val any) (ret any) // val must be a string
func convT2Eslice(typ *byte, elem *any) (ret any) func convT2Eslice(typ *byte, val any) (ret any) // val must be a slice
func convT2Enoptr(typ *byte, elem *any) (ret any) func convT2Enoptr(typ *byte, elem *any) (ret any)
func convT2I(tab *byte, elem *any) (ret any) func convT2I(tab *byte, elem *any) (ret any)
func convT2I16(tab *byte, val any) (ret any) func convT2I16(tab *byte, val any) (ret any)
func convT2I32(tab *byte, val any) (ret any) func convT2I32(tab *byte, val any) (ret any)
func convT2I64(tab *byte, val any) (ret any) func convT2I64(tab *byte, val any) (ret any)
func convT2Istring(tab *byte, elem *any) (ret any) func convT2Istring(tab *byte, val any) (ret any) // val must be a string
func convT2Islice(tab *byte, elem *any) (ret any) func convT2Islice(tab *byte, val any) (ret any) // val must be a slice
func convT2Inoptr(tab *byte, elem *any) (ret any) func convT2Inoptr(tab *byte, elem *any) (ret any)
// interface type assertions x.(T) // interface type assertions x.(T)

View file

@ -337,18 +337,10 @@ func closuredebugruntimecheck(clo *Node) {
} }
} }
func walkclosure(clo *Node, init *Nodes) *Node { // closureType returns the struct type used to hold all the information
xfunc := clo.Func.Closure // needed in the closure for clo (clo must be a OCLOSURE node).
// The address of a variable of the returned type can be cast to a func.
// If no closure vars, don't bother wrapping. func closureType(clo *Node) *types.Type {
if hasemptycvars(clo) {
if Debug_closure > 0 {
Warnl(clo.Pos, "closure converted to global")
}
return xfunc.Func.Nname
}
closuredebugruntimecheck(clo)
// Create closure in the form of a composite literal. // Create closure in the form of a composite literal.
// supposing the closure captures an int i and a string s // supposing the closure captures an int i and a string s
// and has one float64 argument and no results, // and has one float64 argument and no results,
@ -362,11 +354,10 @@ func walkclosure(clo *Node, init *Nodes) *Node {
// The information appears in the binary in the form of type descriptors; // The information appears in the binary in the form of type descriptors;
// the struct is unnamed so that closures in multiple packages with the // the struct is unnamed so that closures in multiple packages with the
// same struct type can share the descriptor. // same struct type can share the descriptor.
fields := []*Node{ fields := []*Node{
namedfield(".F", types.Types[TUINTPTR]), namedfield(".F", types.Types[TUINTPTR]),
} }
for _, v := range xfunc.Func.Cvars.Slice() { for _, v := range clo.Func.Closure.Func.Cvars.Slice() {
typ := v.Type typ := v.Type
if !v.Name.Byval() { if !v.Name.Byval() {
typ = types.NewPtr(typ) typ = types.NewPtr(typ)
@ -375,6 +366,22 @@ func walkclosure(clo *Node, init *Nodes) *Node {
} }
typ := tostruct(fields) typ := tostruct(fields)
typ.SetNoalg(true) typ.SetNoalg(true)
return typ
}
func walkclosure(clo *Node, init *Nodes) *Node {
xfunc := clo.Func.Closure
// If no closure vars, don't bother wrapping.
if hasemptycvars(clo) {
if Debug_closure > 0 {
Warnl(clo.Pos, "closure converted to global")
}
return xfunc.Func.Nname
}
closuredebugruntimecheck(clo)
typ := closureType(clo)
clos := nod(OCOMPLIT, nil, nod(OIND, typenod(typ), nil)) clos := nod(OCOMPLIT, nil, nod(OIND, typenod(typ), nil))
clos.Esc = clo.Esc clos.Esc = clo.Esc
@ -389,10 +396,10 @@ func walkclosure(clo *Node, init *Nodes) *Node {
clos.Left.Esc = clo.Esc clos.Left.Esc = clo.Esc
// non-escaping temp to use, if any. // non-escaping temp to use, if any.
// orderexpr did not compute the type; fill it in now.
if x := prealloc[clo]; x != nil { if x := prealloc[clo]; x != nil {
x.Type = clos.Left.Left.Type if !types.Identical(typ, x.Type) {
x.Orig.Type = x.Type panic("closure type does not match order's assigned type")
}
clos.Left.Right = x clos.Left.Right = x
delete(prealloc, clo) delete(prealloc, clo)
} }
@ -479,6 +486,18 @@ func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
return xfunc return xfunc
} }
// partialCallType returns the struct type used to hold all the information
// needed in the closure for n (n must be a OCALLPART node).
// The address of a variable of the returned type can be cast to a func.
func partialCallType(n *Node) *types.Type {
t := tostruct([]*Node{
namedfield("F", types.Types[TUINTPTR]),
namedfield("R", n.Left.Type),
})
t.SetNoalg(true)
return t
}
func walkpartialcall(n *Node, init *Nodes) *Node { func walkpartialcall(n *Node, init *Nodes) *Node {
// Create closure in the form of a composite literal. // Create closure in the form of a composite literal.
// For x.M with receiver (x) type T, the generated code looks like: // For x.M with receiver (x) type T, the generated code looks like:
@ -495,30 +514,25 @@ func walkpartialcall(n *Node, init *Nodes) *Node {
checknil(n.Left, init) checknil(n.Left, init)
} }
typ := tostruct([]*Node{ typ := partialCallType(n)
namedfield("F", types.Types[TUINTPTR]),
namedfield("R", n.Left.Type),
})
typ.SetNoalg(true)
clos := nod(OCOMPLIT, nil, nod(OIND, typenod(typ), nil)) clos := nod(OCOMPLIT, nil, nod(OIND, typenod(typ), nil))
clos.Esc = n.Esc clos.Esc = n.Esc
clos.Right.SetImplicit(true) clos.Right.SetImplicit(true)
clos.List.Set1(nod(OCFUNC, n.Func.Nname, nil)) clos.List.Set2(nod(OCFUNC, n.Func.Nname, nil), n.Left)
clos.List.Append(n.Left)
// Force type conversion from *struct to the func type. // Force type conversion from *struct to the func type.
clos = convnop(clos, n.Type) clos = convnop(clos, n.Type)
// typecheck will insert a PTRLIT node under CONVNOP, // The typecheck inside convnop will insert a PTRLIT node under CONVNOP.
// tag it with escape analysis result. // Tag it with escape analysis result.
clos.Left.Esc = n.Esc clos.Left.Esc = n.Esc
// non-escaping temp to use, if any. // non-escaping temp to use, if any.
// orderexpr did not compute the type; fill it in now.
if x := prealloc[n]; x != nil { if x := prealloc[n]; x != nil {
x.Type = clos.Left.Left.Type if !types.Identical(typ, x.Type) {
x.Orig.Type = x.Type panic("partial call type does not match order's assigned type")
}
clos.Left.Right = x clos.Left.Right = x
delete(prealloc, n) delete(prealloc, n)
} }

View file

@ -40,7 +40,7 @@ func (v Val) Ctype() Ctype {
switch x := v.U.(type) { switch x := v.U.(type) {
default: default:
Fatalf("unexpected Ctype for %T", v.U) Fatalf("unexpected Ctype for %T", v.U)
panic("not reached") panic("unreachable")
case nil: case nil:
return 0 return 0
case *NilVal: case *NilVal:
@ -68,7 +68,7 @@ func eqval(a, b Val) bool {
switch x := a.U.(type) { switch x := a.U.(type) {
default: default:
Fatalf("unexpected Ctype for %T", a.U) Fatalf("unexpected Ctype for %T", a.U)
panic("not reached") panic("unreachable")
case *NilVal: case *NilVal:
return true return true
case bool: case bool:
@ -96,7 +96,7 @@ func (v Val) Interface() interface{} {
switch x := v.U.(type) { switch x := v.U.(type) {
default: default:
Fatalf("unexpected Interface for %T", v.U) Fatalf("unexpected Interface for %T", v.U)
panic("not reached") panic("unreachable")
case *NilVal: case *NilVal:
return nil return nil
case bool, string: case bool, string:
@ -311,7 +311,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, reuse canReuseNode) *Node {
} }
// avoid repeated calculations, errors // avoid repeated calculations, errors
if eqtype(n.Type, t) { if types.Identical(n.Type, t) {
return n return n
} }
@ -347,7 +347,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, reuse canReuseNode) *Node {
case TARRAY: case TARRAY:
goto bad goto bad
case TPTR32, TPTR64, TUNSAFEPTR: case TPTR, TUNSAFEPTR:
n.SetVal(Val{new(Mpint)}) n.SetVal(Val{new(Mpint)})
case TCHAN, TFUNC, TINTER, TMAP, TSLICE: case TCHAN, TFUNC, TINTER, TMAP, TSLICE:
@ -424,29 +424,6 @@ bad:
return n return n
} }
func copyval(v Val) Val {
switch u := v.U.(type) {
case *Mpint:
i := new(Mpint)
i.Set(u)
i.Rune = u.Rune
v.U = i
case *Mpflt:
f := newMpflt()
f.Set(u)
v.U = f
case *Mpcplx:
c := new(Mpcplx)
c.Real.Set(&u.Real)
c.Imag.Set(&u.Imag)
v.U = c
}
return v
}
func tocplx(v Val) Val { func tocplx(v Val) Val {
switch u := v.U.(type) { switch u := v.U.(type) {
case *Mpint: case *Mpint:
@ -585,10 +562,6 @@ func tostr(v Val) Val {
i = u.Int64() i = u.Int64()
} }
v.U = string(i) v.U = string(i)
case *NilVal:
// Can happen because of string([]byte(nil)).
v.U = ""
} }
return v return v
@ -609,50 +582,55 @@ func Isconst(n *Node, ct Ctype) bool {
return t == ct || (ct == CTINT && t == CTRUNE) return t == ct || (ct == CTINT && t == CTRUNE)
} }
// if n is constant, rewrite as OLITERAL node. // evconst rewrites constant expressions into OLITERAL nodes.
func evconst(n *Node) { func evconst(n *Node) {
// pick off just the opcodes that can be nl, nr := n.Left, n.Right
// constant evaluated.
switch n.Op {
default:
return
case OADD, // Pick off just the opcodes that can be constant evaluated.
OAND, switch op := n.Op; op {
OANDAND, case OPLUS, OMINUS, OCOM, ONOT:
OANDNOT, if nl.Op == OLITERAL {
OARRAYBYTESTR, setconst(n, unaryOp(op, nl.Val(), n.Type))
OCOM, }
ODIV,
OEQ, case OADD, OSUB, OMUL, ODIV, OMOD, OOR, OXOR, OAND, OANDNOT, OOROR, OANDAND:
OGE, if nl.Op == OLITERAL && nr.Op == OLITERAL {
OGT, setconst(n, binaryOp(nl.Val(), op, nr.Val()))
OLE, }
OLSH,
OLT, case OEQ, ONE, OLT, OLE, OGT, OGE:
OMINUS, if nl.Op == OLITERAL && nr.Op == OLITERAL {
OMOD, if nl.Type.IsInterface() != nr.Type.IsInterface() {
OMUL, // Mixed interface/non-interface
ONE, // constant comparison means comparing
ONOT, // nil interface with some typed
OOR, // constant, which is always unequal.
OOROR, // E.g., interface{}(nil) == (*int)(nil).
OPLUS, setboolconst(n, op == ONE)
ORSH, } else {
OSUB, setboolconst(n, compareOp(nl.Val(), op, nr.Val()))
OXOR: }
break }
case OLSH, ORSH:
if nl.Op == OLITERAL && nr.Op == OLITERAL {
setconst(n, shiftOp(nl.Val(), op, nr.Val()))
}
case OCONV: case OCONV:
if n.Type == nil { if n.Type != nil && okforconst[n.Type.Etype] && nl.Op == OLITERAL {
return // TODO(mdempsky): There should be a convval function.
} setconst(n, convlit1(nl, n.Type, true, false).Val())
if !okforconst[n.Type.Etype] && n.Type.Etype != TNIL { }
return
case OARRAYBYTESTR:
// string([]byte(nil)) or string([]rune(nil))
if nl.Op == OLITERAL && nl.Val().Ctype() == CTNIL {
setconst(n, Val{U: ""})
} }
// merge adjacent constants in the argument list.
case OADDSTR: case OADDSTR:
// Merge adjacent constants in the argument list.
s := n.List.Slice() s := n.List.Slice()
for i1 := 0; i1 < len(s); i1++ { for i1 := 0; i1 < len(s); i1++ {
if Isconst(s[i1], CTSTR) && i1+1 < len(s) && Isconst(s[i1+1], CTSTR) { if Isconst(s[i1], CTSTR) && i1+1 < len(s) && Isconst(s[i1+1], CTSTR) {
@ -678,521 +656,292 @@ func evconst(n *Node) {
} else { } else {
n.List.Set(s) n.List.Set(s)
} }
}
}
return func match(x, y Val) (Val, Val) {
switch {
case x.Ctype() == CTCPLX || y.Ctype() == CTCPLX:
return tocplx(x), tocplx(y)
case x.Ctype() == CTFLT || y.Ctype() == CTFLT:
return toflt(x), toflt(y)
} }
nl := n.Left // Mixed int/rune are fine.
if nl == nil || nl.Type == nil { return x, y
return }
}
if consttype(nl) == 0 { func compareOp(x Val, op Op, y Val) bool {
return x, y = match(x, y)
}
wl := nl.Type.Etype switch x.Ctype() {
if isInt[wl] || isFloat[wl] || isComplex[wl] { case CTNIL:
wl = TIDEAL _, _ = x.U.(*NilVal), y.U.(*NilVal) // assert dynamic types match
switch op {
case OEQ:
return true
case ONE:
return false
} }
// avoid constant conversions in switches below case CTBOOL:
const ( x, y := x.U.(bool), y.U.(bool)
CTINT_ = uint32(CTINT) switch op {
CTRUNE_ = uint32(CTRUNE) case OEQ:
CTFLT_ = uint32(CTFLT) return x == y
CTCPLX_ = uint32(CTCPLX) case ONE:
CTSTR_ = uint32(CTSTR) return x != y
CTBOOL_ = uint32(CTBOOL)
CTNIL_ = uint32(CTNIL)
OCONV_ = uint32(OCONV) << 16
OARRAYBYTESTR_ = uint32(OARRAYBYTESTR) << 16
OPLUS_ = uint32(OPLUS) << 16
OMINUS_ = uint32(OMINUS) << 16
OCOM_ = uint32(OCOM) << 16
ONOT_ = uint32(ONOT) << 16
OLSH_ = uint32(OLSH) << 16
ORSH_ = uint32(ORSH) << 16
OADD_ = uint32(OADD) << 16
OSUB_ = uint32(OSUB) << 16
OMUL_ = uint32(OMUL) << 16
ODIV_ = uint32(ODIV) << 16
OMOD_ = uint32(OMOD) << 16
OOR_ = uint32(OOR) << 16
OAND_ = uint32(OAND) << 16
OANDNOT_ = uint32(OANDNOT) << 16
OXOR_ = uint32(OXOR) << 16
OEQ_ = uint32(OEQ) << 16
ONE_ = uint32(ONE) << 16
OLT_ = uint32(OLT) << 16
OLE_ = uint32(OLE) << 16
OGE_ = uint32(OGE) << 16
OGT_ = uint32(OGT) << 16
OOROR_ = uint32(OOROR) << 16
OANDAND_ = uint32(OANDAND) << 16
)
nr := n.Right
var rv Val
var wr types.EType
var ctype uint32
var v Val
if nr == nil {
// copy numeric value to avoid modifying
// nl, in case someone still refers to it (e.g. iota).
v = copyval(nl.Val())
// rune values are int values for the purpose of constant folding.
ctype = uint32(v.Ctype())
if ctype == CTRUNE_ {
ctype = CTINT_
} }
switch uint32(n.Op)<<16 | ctype { case CTINT, CTRUNE:
default: x, y := x.U.(*Mpint), y.U.(*Mpint)
if !n.Diag() { return cmpZero(x.Cmp(y), op)
yyerror("illegal constant expression %v %v", n.Op, nl.Type)
n.SetDiag(true)
}
return
case OCONV_ | CTNIL_, case CTFLT:
OARRAYBYTESTR_ | CTNIL_: x, y := x.U.(*Mpflt), y.U.(*Mpflt)
if n.Type.IsString() { return cmpZero(x.Cmp(y), op)
v = tostr(v)
nl.Type = n.Type
break
}
fallthrough
case OCONV_ | CTINT_,
OCONV_ | CTFLT_,
OCONV_ | CTCPLX_,
OCONV_ | CTSTR_,
OCONV_ | CTBOOL_:
nl = convlit1(nl, n.Type, true, false)
v = nl.Val()
case OPLUS_ | CTINT_: case CTCPLX:
break x, y := x.U.(*Mpcplx), y.U.(*Mpcplx)
eq := x.Real.Cmp(&y.Real) == 0 && x.Imag.Cmp(&y.Imag) == 0
case OMINUS_ | CTINT_: switch op {
v.U.(*Mpint).Neg() case OEQ:
return eq
case OCOM_ | CTINT_: case ONE:
et := Txxx return !eq
if nl.Type != nil {
et = nl.Type.Etype
} }
// calculate the mask in b case CTSTR:
// result will be (a ^ mask) x, y := x.U.(string), y.U.(string)
var b Mpint switch op {
switch et { case OEQ:
// signed guys change sign return x == y
default: case ONE:
b.SetInt64(-1) return x != y
case OLT:
// unsigned guys invert their bits return x < y
case TUINT8, case OLE:
TUINT16, return x <= y
TUINT32, case OGT:
TUINT64, return x > y
TUINT, case OGE:
TUINTPTR: return x >= y
b.Set(maxintval[et])
}
v.U.(*Mpint).Xor(&b)
case OPLUS_ | CTFLT_:
break
case OMINUS_ | CTFLT_:
v.U.(*Mpflt).Neg()
case OPLUS_ | CTCPLX_:
break
case OMINUS_ | CTCPLX_:
v.U.(*Mpcplx).Real.Neg()
v.U.(*Mpcplx).Imag.Neg()
case ONOT_ | CTBOOL_:
if !v.U.(bool) {
goto settrue
}
goto setfalse
}
goto ret
}
if nr.Type == nil {
return
}
if consttype(nr) == 0 {
return
}
wr = nr.Type.Etype
if isInt[wr] || isFloat[wr] || isComplex[wr] {
wr = TIDEAL
}
// check for compatible general types (numeric, string, etc)
if wl != wr {
if wl == TINTER || wr == TINTER {
if n.Op == ONE {
goto settrue
}
goto setfalse
}
goto illegal
}
// check for compatible types.
switch n.Op {
// ideal const mixes with anything but otherwise must match.
default:
if nl.Type.Etype != TIDEAL {
nr = defaultlit(nr, nl.Type)
n.Right = nr
}
if nr.Type.Etype != TIDEAL {
nl = defaultlit(nl, nr.Type)
n.Left = nl
}
if nl.Type.Etype != nr.Type.Etype {
goto illegal
}
// right must be unsigned.
// left can be ideal.
case OLSH, ORSH:
nr = defaultlit(nr, types.Types[TUINT])
n.Right = nr
if nr.Type != nil && (nr.Type.IsSigned() || !nr.Type.IsInteger()) {
goto illegal
}
if nl.Val().Ctype() != CTRUNE {
nl.SetVal(toint(nl.Val()))
}
nr.SetVal(toint(nr.Val()))
}
// copy numeric value to avoid modifying
// n->left, in case someone still refers to it (e.g. iota).
v = copyval(nl.Val())
rv = nr.Val()
// convert to common ideal
if v.Ctype() == CTCPLX || rv.Ctype() == CTCPLX {
v = tocplx(v)
rv = tocplx(rv)
}
if v.Ctype() == CTFLT || rv.Ctype() == CTFLT {
v = toflt(v)
rv = toflt(rv)
}
// Rune and int turns into rune.
if v.Ctype() == CTRUNE && rv.Ctype() == CTINT {
i := new(Mpint)
i.Set(rv.U.(*Mpint))
i.Rune = true
rv.U = i
}
if v.Ctype() == CTINT && rv.Ctype() == CTRUNE {
if n.Op == OLSH || n.Op == ORSH {
i := new(Mpint)
i.Set(rv.U.(*Mpint))
rv.U = i
} else {
i := new(Mpint)
i.Set(v.U.(*Mpint))
i.Rune = true
v.U = i
} }
} }
if v.Ctype() != rv.Ctype() { Fatalf("compareOp: bad comparison: %v %v %v", x, op, y)
// Use of undefined name as constant? panic("unreachable")
if (v.Ctype() == 0 || rv.Ctype() == 0) && nerrors > 0 { }
return
} func cmpZero(x int, op Op) bool {
Fatalf("constant type mismatch %v(%d) %v(%d)", nl.Type, v.Ctype(), nr.Type, rv.Ctype()) switch op {
case OEQ:
return x == 0
case ONE:
return x != 0
case OLT:
return x < 0
case OLE:
return x <= 0
case OGT:
return x > 0
case OGE:
return x >= 0
} }
// rune values are int values for the purpose of constant folding. Fatalf("cmpZero: want comparison operator, got %v", op)
ctype = uint32(v.Ctype()) panic("unreachable")
if ctype == CTRUNE_ { }
ctype = CTINT_
func binaryOp(x Val, op Op, y Val) Val {
x, y = match(x, y)
Outer:
switch x.Ctype() {
case CTBOOL:
x, y := x.U.(bool), y.U.(bool)
switch op {
case OANDAND:
return Val{U: x && y}
case OOROR:
return Val{U: x || y}
} }
// run op case CTINT, CTRUNE:
switch uint32(n.Op)<<16 | ctype { x, y := x.U.(*Mpint), y.U.(*Mpint)
default:
goto illegal
case OADD_ | CTINT_: u := new(Mpint)
v.U.(*Mpint).Add(rv.U.(*Mpint)) u.Rune = x.Rune || y.Rune
u.Set(x)
case OSUB_ | CTINT_: switch op {
v.U.(*Mpint).Sub(rv.U.(*Mpint)) case OADD:
u.Add(y)
case OMUL_ | CTINT_: case OSUB:
v.U.(*Mpint).Mul(rv.U.(*Mpint)) u.Sub(y)
case OMUL:
case ODIV_ | CTINT_: u.Mul(y)
if rv.U.(*Mpint).CmpInt64(0) == 0 { case ODIV:
if y.CmpInt64(0) == 0 {
yyerror("division by zero") yyerror("division by zero")
v.U.(*Mpint).SetOverflow() u.SetOverflow()
break break
} }
u.Quo(y)
v.U.(*Mpint).Quo(rv.U.(*Mpint)) case OMOD:
if y.CmpInt64(0) == 0 {
case OMOD_ | CTINT_:
if rv.U.(*Mpint).CmpInt64(0) == 0 {
yyerror("division by zero") yyerror("division by zero")
v.U.(*Mpint).SetOverflow() u.SetOverflow()
break break
} }
u.Rem(y)
case OOR:
u.Or(y)
case OAND:
u.And(y)
case OANDNOT:
u.AndNot(y)
case OXOR:
u.Xor(y)
default:
break Outer
}
return Val{U: u}
v.U.(*Mpint).Rem(rv.U.(*Mpint)) case CTFLT:
x, y := x.U.(*Mpflt), y.U.(*Mpflt)
case OLSH_ | CTINT_: u := newMpflt()
v.U.(*Mpint).Lsh(rv.U.(*Mpint)) u.Set(x)
switch op {
case ORSH_ | CTINT_: case OADD:
v.U.(*Mpint).Rsh(rv.U.(*Mpint)) u.Add(y)
case OSUB:
case OOR_ | CTINT_: u.Sub(y)
v.U.(*Mpint).Or(rv.U.(*Mpint)) case OMUL:
u.Mul(y)
case OAND_ | CTINT_: case ODIV:
v.U.(*Mpint).And(rv.U.(*Mpint)) if y.CmpFloat64(0) == 0 {
case OANDNOT_ | CTINT_:
v.U.(*Mpint).AndNot(rv.U.(*Mpint))
case OXOR_ | CTINT_:
v.U.(*Mpint).Xor(rv.U.(*Mpint))
case OADD_ | CTFLT_:
v.U.(*Mpflt).Add(rv.U.(*Mpflt))
case OSUB_ | CTFLT_:
v.U.(*Mpflt).Sub(rv.U.(*Mpflt))
case OMUL_ | CTFLT_:
v.U.(*Mpflt).Mul(rv.U.(*Mpflt))
case ODIV_ | CTFLT_:
if rv.U.(*Mpflt).CmpFloat64(0) == 0 {
yyerror("division by zero") yyerror("division by zero")
v.U.(*Mpflt).SetFloat64(1.0) u.SetFloat64(1)
break break
} }
u.Quo(y)
v.U.(*Mpflt).Quo(rv.U.(*Mpflt)) case OMOD:
// TODO(mdempsky): Move to typecheck.
// The default case above would print 'ideal % ideal',
// which is not quite an ideal error.
case OMOD_ | CTFLT_:
if !n.Diag() {
yyerror("illegal constant expression: floating-point %% operation") yyerror("illegal constant expression: floating-point %% operation")
n.SetDiag(true) default:
break Outer
} }
return Val{U: u}
return case CTCPLX:
x, y := x.U.(*Mpcplx), y.U.(*Mpcplx)
case OADD_ | CTCPLX_: u := new(Mpcplx)
v.U.(*Mpcplx).Real.Add(&rv.U.(*Mpcplx).Real) u.Real.Set(&x.Real)
v.U.(*Mpcplx).Imag.Add(&rv.U.(*Mpcplx).Imag) u.Imag.Set(&x.Imag)
switch op {
case OSUB_ | CTCPLX_: case OADD:
v.U.(*Mpcplx).Real.Sub(&rv.U.(*Mpcplx).Real) u.Real.Add(&y.Real)
v.U.(*Mpcplx).Imag.Sub(&rv.U.(*Mpcplx).Imag) u.Imag.Add(&y.Imag)
case OSUB:
case OMUL_ | CTCPLX_: u.Real.Sub(&y.Real)
v.U.(*Mpcplx).Mul(rv.U.(*Mpcplx)) u.Imag.Sub(&y.Imag)
case OMUL:
case ODIV_ | CTCPLX_: u.Mul(y)
if !v.U.(*Mpcplx).Div(rv.U.(*Mpcplx)) { case ODIV:
if !u.Div(y) {
yyerror("complex division by zero") yyerror("complex division by zero")
rv.U.(*Mpcplx).Real.SetFloat64(1.0) u.Real.SetFloat64(1)
rv.U.(*Mpcplx).Imag.SetFloat64(0.0) u.Imag.SetFloat64(0)
break }
default:
break Outer
}
return Val{U: u}
} }
case OEQ_ | CTNIL_: Fatalf("binaryOp: bad operation: %v %v %v", x, op, y)
goto settrue panic("unreachable")
}
case ONE_ | CTNIL_: func unaryOp(op Op, x Val, t *types.Type) Val {
goto setfalse switch op {
case OPLUS:
case OEQ_ | CTINT_: switch x.Ctype() {
if v.U.(*Mpint).Cmp(rv.U.(*Mpint)) == 0 { case CTINT, CTRUNE, CTFLT, CTCPLX:
goto settrue return x
}
goto setfalse
case ONE_ | CTINT_:
if v.U.(*Mpint).Cmp(rv.U.(*Mpint)) != 0 {
goto settrue
}
goto setfalse
case OLT_ | CTINT_:
if v.U.(*Mpint).Cmp(rv.U.(*Mpint)) < 0 {
goto settrue
}
goto setfalse
case OLE_ | CTINT_:
if v.U.(*Mpint).Cmp(rv.U.(*Mpint)) <= 0 {
goto settrue
}
goto setfalse
case OGE_ | CTINT_:
if v.U.(*Mpint).Cmp(rv.U.(*Mpint)) >= 0 {
goto settrue
}
goto setfalse
case OGT_ | CTINT_:
if v.U.(*Mpint).Cmp(rv.U.(*Mpint)) > 0 {
goto settrue
}
goto setfalse
case OEQ_ | CTFLT_:
if v.U.(*Mpflt).Cmp(rv.U.(*Mpflt)) == 0 {
goto settrue
}
goto setfalse
case ONE_ | CTFLT_:
if v.U.(*Mpflt).Cmp(rv.U.(*Mpflt)) != 0 {
goto settrue
}
goto setfalse
case OLT_ | CTFLT_:
if v.U.(*Mpflt).Cmp(rv.U.(*Mpflt)) < 0 {
goto settrue
}
goto setfalse
case OLE_ | CTFLT_:
if v.U.(*Mpflt).Cmp(rv.U.(*Mpflt)) <= 0 {
goto settrue
}
goto setfalse
case OGE_ | CTFLT_:
if v.U.(*Mpflt).Cmp(rv.U.(*Mpflt)) >= 0 {
goto settrue
}
goto setfalse
case OGT_ | CTFLT_:
if v.U.(*Mpflt).Cmp(rv.U.(*Mpflt)) > 0 {
goto settrue
}
goto setfalse
case OEQ_ | CTCPLX_:
if v.U.(*Mpcplx).Real.Cmp(&rv.U.(*Mpcplx).Real) == 0 && v.U.(*Mpcplx).Imag.Cmp(&rv.U.(*Mpcplx).Imag) == 0 {
goto settrue
}
goto setfalse
case ONE_ | CTCPLX_:
if v.U.(*Mpcplx).Real.Cmp(&rv.U.(*Mpcplx).Real) != 0 || v.U.(*Mpcplx).Imag.Cmp(&rv.U.(*Mpcplx).Imag) != 0 {
goto settrue
}
goto setfalse
case OEQ_ | CTSTR_:
if strlit(nl) == strlit(nr) {
goto settrue
}
goto setfalse
case ONE_ | CTSTR_:
if strlit(nl) != strlit(nr) {
goto settrue
}
goto setfalse
case OLT_ | CTSTR_:
if strlit(nl) < strlit(nr) {
goto settrue
}
goto setfalse
case OLE_ | CTSTR_:
if strlit(nl) <= strlit(nr) {
goto settrue
}
goto setfalse
case OGE_ | CTSTR_:
if strlit(nl) >= strlit(nr) {
goto settrue
}
goto setfalse
case OGT_ | CTSTR_:
if strlit(nl) > strlit(nr) {
goto settrue
}
goto setfalse
case OOROR_ | CTBOOL_:
if v.U.(bool) || rv.U.(bool) {
goto settrue
}
goto setfalse
case OANDAND_ | CTBOOL_:
if v.U.(bool) && rv.U.(bool) {
goto settrue
}
goto setfalse
case OEQ_ | CTBOOL_:
if v.U.(bool) == rv.U.(bool) {
goto settrue
}
goto setfalse
case ONE_ | CTBOOL_:
if v.U.(bool) != rv.U.(bool) {
goto settrue
}
goto setfalse
} }
ret: case OMINUS:
setconst(n, v) switch x.Ctype() {
return case CTINT, CTRUNE:
x := x.U.(*Mpint)
u := new(Mpint)
u.Rune = x.Rune
u.Set(x)
u.Neg()
return Val{U: u}
settrue: case CTFLT:
setconst(n, Val{true}) x := x.U.(*Mpflt)
return u := newMpflt()
u.Set(x)
u.Neg()
return Val{U: u}
setfalse: case CTCPLX:
setconst(n, Val{false}) x := x.U.(*Mpcplx)
return u := new(Mpcplx)
u.Real.Set(&x.Real)
illegal: u.Imag.Set(&x.Imag)
if !n.Diag() { u.Real.Neg()
yyerror("illegal constant expression: %v %v %v", nl.Type, n.Op, nr.Type) u.Imag.Neg()
n.SetDiag(true) return Val{U: u}
} }
case OCOM:
x := x.U.(*Mpint)
u := new(Mpint)
u.Rune = x.Rune
if t.IsSigned() || t.IsUntyped() {
// Signed values change sign.
u.SetInt64(-1)
} else {
// Unsigned values invert their bits.
u.Set(maxintval[t.Etype])
}
u.Xor(x)
return Val{U: u}
case ONOT:
return Val{U: !x.U.(bool)}
}
Fatalf("unaryOp: bad operation: %v %v", op, x)
panic("unreachable")
}
func shiftOp(x Val, op Op, y Val) Val {
if x.Ctype() != CTRUNE {
x = toint(x)
}
y = toint(y)
u := new(Mpint)
u.Set(x.U.(*Mpint))
u.Rune = x.U.(*Mpint).Rune
switch op {
case OLSH:
u.Lsh(y.U.(*Mpint))
case ORSH:
u.Rsh(y.U.(*Mpint))
default:
Fatalf("shiftOp: bad operator: %v", op)
panic("unreachable")
}
return Val{U: u}
} }
// setconst rewrites n as an OLITERAL with value v. // setconst rewrites n as an OLITERAL with value v.
@ -1223,6 +972,10 @@ func setconst(n *Node, v Val) {
} }
} }
func setboolconst(n *Node, v bool) {
setconst(n, Val{U: v})
}
func setintconst(n *Node, v int64) { func setintconst(n *Node, v int64) {
u := new(Mpint) u := new(Mpint)
u.SetInt64(v) u.SetInt64(v)
@ -1305,9 +1058,7 @@ func idealkind(n *Node) Ctype {
OLT, OLT,
ONE, ONE,
ONOT, ONOT,
OOROR, OOROR:
OCMPSTR,
OCMPIFACE:
return CTBOOL return CTBOOL
// shifts (beware!). // shifts (beware!).
@ -1479,11 +1230,10 @@ func smallintconst(n *Node) bool {
TUINT16, TUINT16,
TINT32, TINT32,
TUINT32, TUINT32,
TBOOL, TBOOL:
TPTR32:
return true return true
case TIDEAL, TINT64, TUINT64, TPTR64: case TIDEAL, TINT64, TUINT64, TPTR:
v, ok := n.Val().U.(*Mpint) v, ok := n.Val().U.(*Mpint)
if ok && v.Cmp(minintval[TINT32]) > 0 && v.Cmp(maxintval[TINT32]) < 0 { if ok && v.Cmp(minintval[TINT32]) > 0 && v.Cmp(maxintval[TINT32]) < 0 {
return true return true

View file

@ -929,7 +929,7 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) *types.F
} }
// eqtype only checks that incoming and result parameters match, // eqtype only checks that incoming and result parameters match,
// so explicitly check that the receiver parameters match too. // so explicitly check that the receiver parameters match too.
if !eqtype(t, f.Type) || !eqtype(t.Recv().Type, f.Type.Recv().Type) { if !types.Identical(t, f.Type) || !types.Identical(t.Recv().Type, f.Type.Recv().Type) {
yyerror("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t) yyerror("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t)
} }
return f return f

View file

@ -798,9 +798,8 @@ func (e *EscState) esc(n *Node, parent *Node) {
// gathered here. // gathered here.
if n.Esc != EscHeap && n.Type != nil && if n.Esc != EscHeap && n.Type != nil &&
(n.Type.Width > maxStackVarSize || (n.Type.Width > maxStackVarSize ||
(n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= 1<<16 || (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize ||
n.Op == OMAKESLICE && !isSmallMakeSlice(n)) { n.Op == OMAKESLICE && !isSmallMakeSlice(n)) {
// isSmallMakeSlice returns false for non-constant len/cap. // isSmallMakeSlice returns false for non-constant len/cap.
// If that's the case, print a more accurate escape reason. // If that's the case, print a more accurate escape reason.
var msgVerb, escapeMsg string var msgVerb, escapeMsg string
@ -873,7 +872,7 @@ opSwitch:
// it is also a dereference, because it is implicitly // it is also a dereference, because it is implicitly
// dereferenced (see #12588) // dereferenced (see #12588)
if n.Type.IsArray() && if n.Type.IsArray() &&
!(n.Right.Type.IsPtr() && eqtype(n.Right.Type.Elem(), n.Type)) { !(n.Right.Type.IsPtr() && types.Identical(n.Right.Type.Elem(), n.Type)) {
e.escassignWhyWhere(n.List.Second(), n.Right, "range", n) e.escassignWhyWhere(n.List.Second(), n.Right, "range", n)
} else { } else {
e.escassignDereference(n.List.Second(), n.Right, e.stepAssignWhere(n.List.Second(), n.Right, "range-deref", n)) e.escassignDereference(n.List.Second(), n.Right, e.stepAssignWhere(n.List.Second(), n.Right, "range-deref", n))
@ -946,7 +945,8 @@ opSwitch:
case OCALLMETH, OCALLFUNC, OCALLINTER: case OCALLMETH, OCALLFUNC, OCALLINTER:
e.esccall(n, parent) e.esccall(n, parent)
// esccall already done on n.Rlist.First(). tie it's Retval to n.List // esccall already done on n.Rlist.First()
// tie its Retval to n.List
case OAS2FUNC: // x,y = f() case OAS2FUNC: // x,y = f()
rs := e.nodeEscState(n.Rlist.First()).Retval.Slice() rs := e.nodeEscState(n.Rlist.First()).Retval.Slice()
where := n where := n
@ -1507,7 +1507,7 @@ func (e *EscState) addDereference(n *Node) *Node {
e.nodeEscState(ind).Loopdepth = e.nodeEscState(n).Loopdepth e.nodeEscState(ind).Loopdepth = e.nodeEscState(n).Loopdepth
ind.Pos = n.Pos ind.Pos = n.Pos
t := n.Type t := n.Type
if t.IsKind(types.Tptr) || t.IsSlice() { if t.IsPtr() || t.IsSlice() {
// This should model our own sloppy use of OIND to encode // This should model our own sloppy use of OIND to encode
// decreasing levels of indirection; i.e., "indirecting" a slice // decreasing levels of indirection; i.e., "indirecting" a slice
// yields the type of an element. // yields the type of an element.

View file

@ -131,7 +131,7 @@ func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type {
func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op Op, ctxt Class, t *types.Type) *Node { func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op Op, ctxt Class, t *types.Type) *Node {
n := importsym(ipkg, s, op) n := importsym(ipkg, s, op)
if n.Op != ONONAME { if n.Op != ONONAME {
if n.Op == op && (n.Class() != ctxt || !eqtype(n.Type, t)) { if n.Op == op && (n.Class() != ctxt || !types.Identical(n.Type, t)) {
redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path)) redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path))
} }
return nil return nil

View file

@ -697,7 +697,7 @@ func typefmt(t *types.Type, flag FmtFlag, mode fmtMode, depth int) string {
} }
switch t.Etype { switch t.Etype {
case TPTR32, TPTR64: case TPTR:
switch mode { switch mode {
case FTypeId, FTypeIdName: case FTypeId, FTypeIdName:
if flag&FmtShort != 0 { if flag&FmtShort != 0 {
@ -1146,8 +1146,6 @@ var opprec = []int{
OGE: 4, OGE: 4,
OGT: 4, OGT: 4,
ONE: 4, ONE: 4,
OCMPSTR: 4,
OCMPIFACE: 4,
OSEND: 3, OSEND: 3,
OANDAND: 2, OANDAND: 2,
OOROR: 1, OOROR: 1,
@ -1507,11 +1505,6 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
n1.exprfmt(s, nprec, mode) n1.exprfmt(s, nprec, mode)
} }
case OCMPSTR, OCMPIFACE:
n.Left.exprfmt(s, nprec, mode)
mode.Fprintf(s, " %#v ", n.SubOp())
n.Right.exprfmt(s, nprec+1, mode)
default: default:
mode.Fprintf(s, "<node %v>", n.Op) mode.Fprintf(s, "<node %v>", n.Op)
} }

View file

@ -14,7 +14,17 @@ import (
const ( const (
BADWIDTH = types.BADWIDTH BADWIDTH = types.BADWIDTH
// maximum size variable which we will allocate on the stack.
// This limit is for explicit variable declarations like "var x T" or "x := ...".
maxStackVarSize = 10 * 1024 * 1024 maxStackVarSize = 10 * 1024 * 1024
// maximum size of implicit variables that we will allocate on the stack.
// p := new(T) allocating T on the stack
// p := &T{} allocating T on the stack
// s := make([]T, n) allocating [n]T on the stack
// s := []byte("...") allocating [n]byte on the stack
maxImplicitStackVarSize = 64 * 1024
) )
// isRuntimePkg reports whether p is package runtime. // isRuntimePkg reports whether p is package runtime.
@ -82,7 +92,6 @@ var pragcgobuf [][]string
var outfile string var outfile string
var linkobj string var linkobj string
var dolinkobj bool
// nerrors is the number of compiler errors reported // nerrors is the number of compiler errors reported
// since the last call to saveerrors. // since the last call to saveerrors.
@ -96,8 +105,6 @@ var nsyntaxerrors int
var decldepth int32 var decldepth int32
var safemode bool
var nolocalimports bool var nolocalimports bool
var Debug [256]int var Debug [256]int
@ -201,8 +208,6 @@ var compiling_runtime bool
// Compiling the standard library // Compiling the standard library
var compiling_std bool var compiling_std bool
var compiling_wrappers bool
var use_writebarrier bool var use_writebarrier bool
var pure_go bool var pure_go bool

View file

@ -617,7 +617,7 @@ func (w *exportWriter) doTyp(t *types.Type) {
} }
switch t.Etype { switch t.Etype {
case TPTR32, TPTR64: case TPTR:
w.startType(pointerType) w.startType(pointerType)
w.typ(t.Elem()) w.typ(t.Elem())
@ -743,7 +743,7 @@ func constTypeOf(typ *types.Type) Ctype {
return CTSTR return CTSTR
case TINT, TINT8, TINT16, TINT32, TINT64, case TINT, TINT8, TINT16, TINT32, TINT64,
TUINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINTPTR, TUINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINTPTR,
TPTR32, TPTR64, TUNSAFEPTR: TPTR, TUNSAFEPTR:
return CTINT return CTINT
case TFLOAT32, TFLOAT64: case TFLOAT32, TFLOAT64:
return CTFLT return CTFLT
@ -1319,12 +1319,6 @@ func (w *exportWriter) expr(n *Node) {
w.pos(n.Pos) w.pos(n.Pos)
w.exprList(n.List) w.exprList(n.List)
case OCMPSTR, OCMPIFACE:
w.op(n.SubOp())
w.pos(n.Pos)
w.expr(n.Left)
w.expr(n.Right)
case ODCLCONST: case ODCLCONST:
// if exporting, DCLCONST should just be removed as its usage // if exporting, DCLCONST should just be removed as its usage
// has already been replaced with literals // has already been replaced with literals

View file

@ -935,9 +935,6 @@ func (r *importReader) node() *Node {
} }
return x return x
// case OCMPSTR, OCMPIFACE:
// unreachable - mapped to std comparison operators by exporter
// -------------------------------------------------------------------- // --------------------------------------------------------------------
// statements // statements
case ODCL: case ODCL:

View file

@ -87,9 +87,6 @@ func typecheckinl(fn *Node) {
fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym, fn, asNodes(fn.Func.Inl.Body)) fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym, fn, asNodes(fn.Func.Inl.Body))
} }
save_safemode := safemode
safemode = false
savefn := Curfn savefn := Curfn
Curfn = fn Curfn = fn
typecheckslice(fn.Func.Inl.Body, Etop) typecheckslice(fn.Func.Inl.Body, Etop)
@ -102,8 +99,6 @@ func typecheckinl(fn *Node) {
fn.Func.Inl.Dcl = append(fn.Func.Inl.Dcl, fn.Func.Dcl...) fn.Func.Inl.Dcl = append(fn.Func.Inl.Dcl, fn.Func.Dcl...)
fn.Func.Dcl = nil fn.Func.Dcl = nil
safemode = save_safemode
lineno = lno lineno = lno
} }
@ -404,16 +399,6 @@ func (v *hairyVisitor) visit(n *Node) bool {
} }
v.budget-- v.budget--
// TODO(mdempsky/josharian): Hacks to appease toolstash; remove.
// See issue 17566 and CL 31674 for discussion.
switch n.Op {
case OSTRUCTKEY:
v.budget--
case OSLICE, OSLICEARR, OSLICESTR:
v.budget--
case OSLICE3, OSLICE3ARR:
v.budget -= 2
}
// When debugging, don't stop early, to get full cost of inlining this function // When debugging, don't stop early, to get full cost of inlining this function
if v.budget < 0 && Debug['m'] < 2 { if v.budget < 0 && Debug['m'] < 2 {
@ -813,23 +798,6 @@ func (v *reassignVisitor) visitList(l Nodes) *Node {
return nil return nil
} }
// The result of mkinlcall MUST be assigned back to n, e.g.
// n.Left = mkinlcall(n.Left, fn, isddd)
func mkinlcall(n *Node, fn *Node, maxCost int32) *Node {
save_safemode := safemode
// imported functions may refer to unsafe as long as the
// package was marked safe during import (already checked).
pkg := fnpkg(fn)
if pkg != localpkg && pkg != nil {
safemode = false
}
n = mkinlcall1(n, fn, maxCost)
safemode = save_safemode
return n
}
func tinlvar(t *types.Field, inlvars map[*Node]*Node) *Node { func tinlvar(t *types.Field, inlvars map[*Node]*Node) *Node {
if n := asNode(t.Nname); n != nil && !n.isBlank() { if n := asNode(t.Nname); n != nil && !n.isBlank() {
inlvar := inlvars[n] inlvar := inlvars[n]
@ -849,9 +817,9 @@ var inlgen int
// On return ninit has the parameter assignments, the nbody is the // On return ninit has the parameter assignments, the nbody is the
// inlined function body and list, rlist contain the input, output // inlined function body and list, rlist contain the input, output
// parameters. // parameters.
// The result of mkinlcall1 MUST be assigned back to n, e.g. // The result of mkinlcall MUST be assigned back to n, e.g.
// n.Left = mkinlcall1(n.Left, fn, isddd) // n.Left = mkinlcall(n.Left, fn, isddd)
func mkinlcall1(n, fn *Node, maxCost int32) *Node { func mkinlcall(n, fn *Node, maxCost int32) *Node {
if fn.Func.Inl == nil { if fn.Func.Inl == nil {
// No inlinable body. // No inlinable body.
return n return n

View file

@ -26,7 +26,8 @@ func TestIntendedInlining(t *testing.T) {
t.Parallel() t.Parallel()
// want is the list of function names (by package) that should // want is the list of function names (by package) that should
// be inlined. // be inlinable. If they have no callers in thier packages, they
// might not actually be inlined anywhere.
want := map[string][]string{ want := map[string][]string{
"runtime": { "runtime": {
// TODO(mvdan): enable these once mid-stack // TODO(mvdan): enable these once mid-stack
@ -53,7 +54,6 @@ func TestIntendedInlining(t *testing.T) {
"getm", "getm",
"isDirectIface", "isDirectIface",
"itabHashFunc", "itabHashFunc",
"maxSliceCap",
"noescape", "noescape",
"readUnaligned32", "readUnaligned32",
"readUnaligned64", "readUnaligned64",
@ -96,6 +96,9 @@ func TestIntendedInlining(t *testing.T) {
"(*puintptr).set", "(*puintptr).set",
}, },
"runtime/internal/sys": {}, "runtime/internal/sys": {},
"runtime/internal/math": {
"MulUintptr",
},
"bytes": { "bytes": {
"(*Buffer).Bytes", "(*Buffer).Bytes",
"(*Buffer).Cap", "(*Buffer).Cap",
@ -108,6 +111,11 @@ func TestIntendedInlining(t *testing.T) {
"(*Buffer).UnreadByte", "(*Buffer).UnreadByte",
"(*Buffer).tryGrowByReslice", "(*Buffer).tryGrowByReslice",
}, },
"compress/flate": {
"byLiteral.Len",
"byLiteral.Less",
"byLiteral.Swap",
},
"unicode/utf8": { "unicode/utf8": {
"FullRune", "FullRune",
"FullRuneInString", "FullRuneInString",
@ -159,6 +167,13 @@ func TestIntendedInlining(t *testing.T) {
want["runtime"] = append(want["runtime"], "rotl_31") want["runtime"] = append(want["runtime"], "rotl_31")
} }
// Functions that must actually be inlined; they must have actual callers.
must := map[string]bool{
"compress/flate.byLiteral.Len": true,
"compress/flate.byLiteral.Less": true,
"compress/flate.byLiteral.Swap": true,
}
notInlinedReason := make(map[string]string) notInlinedReason := make(map[string]string)
pkgs := make([]string, 0, len(want)) pkgs := make([]string, 0, len(want))
for pname, fnames := range want { for pname, fnames := range want {
@ -185,6 +200,7 @@ func TestIntendedInlining(t *testing.T) {
scanner := bufio.NewScanner(pr) scanner := bufio.NewScanner(pr)
curPkg := "" curPkg := ""
canInline := regexp.MustCompile(`: can inline ([^ ]*)`) canInline := regexp.MustCompile(`: can inline ([^ ]*)`)
haveInlined := regexp.MustCompile(`: inlining call to ([^ ]*)`)
cannotInline := regexp.MustCompile(`: cannot inline ([^ ]*): (.*)`) cannotInline := regexp.MustCompile(`: cannot inline ([^ ]*): (.*)`)
for scanner.Scan() { for scanner.Scan() {
line := scanner.Text() line := scanner.Text()
@ -192,11 +208,20 @@ func TestIntendedInlining(t *testing.T) {
curPkg = line[2:] curPkg = line[2:]
continue continue
} }
if m := canInline.FindStringSubmatch(line); m != nil { if m := haveInlined.FindStringSubmatch(line); m != nil {
fname := m[1] fname := m[1]
delete(notInlinedReason, curPkg+"."+fname) delete(notInlinedReason, curPkg+"."+fname)
continue continue
} }
if m := canInline.FindStringSubmatch(line); m != nil {
fname := m[1]
fullname := curPkg + "." + fname
// If function must be inlined somewhere, beeing inlinable is not enough
if _, ok := must[fullname]; !ok {
delete(notInlinedReason, fullname)
continue
}
}
if m := cannotInline.FindStringSubmatch(line); m != nil { if m := cannotInline.FindStringSubmatch(line); m != nil {
fname, reason := m[1], m[2] fname, reason := m[1], m[2]
fullName := curPkg + "." + fname fullName := curPkg + "." + fname

View file

@ -207,7 +207,6 @@ func Main(archInit func(*Arch)) {
objabi.Flagcount("e", "no limit on number of errors reported", &Debug['e']) objabi.Flagcount("e", "no limit on number of errors reported", &Debug['e'])
objabi.Flagcount("f", "debug stack frames", &Debug['f']) objabi.Flagcount("f", "debug stack frames", &Debug['f'])
objabi.Flagcount("h", "halt on error", &Debug['h']) objabi.Flagcount("h", "halt on error", &Debug['h'])
objabi.Flagcount("i", "debug line number stack", &Debug['i'])
objabi.Flagfn1("importmap", "add `definition` of the form source=actual to import map", addImportMap) objabi.Flagfn1("importmap", "add `definition` of the form source=actual to import map", addImportMap)
objabi.Flagfn1("importcfg", "read import configuration from `file`", readImportCfg) objabi.Flagfn1("importcfg", "read import configuration from `file`", readImportCfg)
flag.StringVar(&flag_installsuffix, "installsuffix", "", "set pkg directory `suffix`") flag.StringVar(&flag_installsuffix, "installsuffix", "", "set pkg directory `suffix`")
@ -219,7 +218,6 @@ func Main(archInit func(*Arch)) {
if sys.MSanSupported(objabi.GOOS, objabi.GOARCH) { if sys.MSanSupported(objabi.GOOS, objabi.GOARCH) {
flag.BoolVar(&flag_msan, "msan", false, "build code compatible with C/C++ memory sanitizer") flag.BoolVar(&flag_msan, "msan", false, "build code compatible with C/C++ memory sanitizer")
} }
flag.BoolVar(&dolinkobj, "dolinkobj", true, "generate linker-specific objects; if false, some invalid code may compile")
flag.BoolVar(&nolocalimports, "nolocalimports", false, "reject local (relative) imports") flag.BoolVar(&nolocalimports, "nolocalimports", false, "reject local (relative) imports")
flag.StringVar(&outfile, "o", "", "write output to `file`") flag.StringVar(&outfile, "o", "", "write output to `file`")
flag.StringVar(&myimportpath, "p", "", "set expected package import `path`") flag.StringVar(&myimportpath, "p", "", "set expected package import `path`")
@ -230,7 +228,6 @@ func Main(archInit func(*Arch)) {
} }
objabi.Flagcount("s", "warn about composite literals that can be simplified", &Debug['s']) objabi.Flagcount("s", "warn about composite literals that can be simplified", &Debug['s'])
flag.StringVar(&pathPrefix, "trimpath", "", "remove `prefix` from recorded source file paths") flag.StringVar(&pathPrefix, "trimpath", "", "remove `prefix` from recorded source file paths")
flag.BoolVar(&safemode, "u", false, "reject unsafe code")
flag.BoolVar(&Debug_vlog, "v", false, "increase debug verbosity") flag.BoolVar(&Debug_vlog, "v", false, "increase debug verbosity")
objabi.Flagcount("w", "debug type checking", &Debug['w']) objabi.Flagcount("w", "debug type checking", &Debug['w'])
flag.BoolVar(&use_writebarrier, "wb", true, "enable write barrier") flag.BoolVar(&use_writebarrier, "wb", true, "enable write barrier")
@ -535,7 +532,9 @@ func Main(archInit func(*Arch)) {
fcount++ fcount++
} }
} }
// With all types ckecked, it's now safe to verify map keys. // With all types ckecked, it's now safe to verify map keys. One single
// check past phase 9 isn't sufficient, as we may exit with other errors
// before then, thus skipping map key errors.
checkMapKeys() checkMapKeys()
timings.AddEvent(fcount, "funcs") timings.AddEvent(fcount, "funcs")
@ -605,7 +604,6 @@ func Main(archInit func(*Arch)) {
timings.Start("fe", "escapes") timings.Start("fe", "escapes")
escapes(xtop) escapes(xtop)
if dolinkobj {
// Collect information for go:nowritebarrierrec // Collect information for go:nowritebarrierrec
// checking. This must happen before transformclosure. // checking. This must happen before transformclosure.
// We'll do the final check after write barriers are // We'll do the final check after write barriers are
@ -670,7 +668,6 @@ func Main(archInit func(*Arch)) {
Ctxt.DwFixups = nil Ctxt.DwFixups = nil
genDwarfInline = 0 genDwarfInline = 0
} }
}
// Phase 9: Check external declarations. // Phase 9: Check external declarations.
timings.Start("be", "externaldcls") timings.Start("be", "externaldcls")
@ -679,6 +676,9 @@ func Main(archInit func(*Arch)) {
externdcl[i] = typecheck(externdcl[i], Erv) externdcl[i] = typecheck(externdcl[i], Erv)
} }
} }
// Check the map keys again, since we typechecked the external
// declarations.
checkMapKeys()
if nerrors+nsavederrors != 0 { if nerrors+nsavederrors != 0 {
errorexit() errorexit()
@ -839,7 +839,7 @@ func islocalname(name string) bool {
func findpkg(name string) (file string, ok bool) { func findpkg(name string) (file string, ok bool) {
if islocalname(name) { if islocalname(name) {
if safemode || nolocalimports { if nolocalimports {
return "", false return "", false
} }
@ -981,11 +981,6 @@ func importfile(f *Val) *types.Pkg {
} }
if path_ == "unsafe" { if path_ == "unsafe" {
if safemode {
yyerror("cannot import package unsafe")
errorexit()
}
imported_unsafe = true imported_unsafe = true
return unsafepkg return unsafepkg
} }
@ -1059,7 +1054,6 @@ func importfile(f *Val) *types.Pkg {
} }
// process header lines // process header lines
safe := false
for { for {
p, err = imp.ReadString('\n') p, err = imp.ReadString('\n')
if err != nil { if err != nil {
@ -1069,13 +1063,6 @@ func importfile(f *Val) *types.Pkg {
if p == "\n" { if p == "\n" {
break // header ends with blank line break // header ends with blank line
} }
if strings.HasPrefix(p, "safe") {
safe = true
break // ok to ignore rest
}
}
if safemode && !safe {
yyerror("cannot import unsafe package %q", importpkg.Path)
} }
// assume files move (get installed) so don't record the full path // assume files move (get installed) so don't record the full path

View file

@ -43,10 +43,6 @@ const (
) )
func dumpobj() { func dumpobj() {
if !dolinkobj {
dumpobj1(outfile, modeCompilerObj)
return
}
if linkobj == "" { if linkobj == "" {
dumpobj1(outfile, modeCompilerObj|modeLinkerObj) dumpobj1(outfile, modeCompilerObj|modeLinkerObj)
return return
@ -85,11 +81,6 @@ func printObjHeader(bout *bio.Writer) {
if localpkg.Name == "main" { if localpkg.Name == "main" {
fmt.Fprintf(bout, "main\n") fmt.Fprintf(bout, "main\n")
} }
if safemode {
fmt.Fprintf(bout, "safe\n")
} else {
fmt.Fprintf(bout, "----\n") // room for some other tool to write "safe"
}
fmt.Fprintf(bout, "\n") // header ends with blank line fmt.Fprintf(bout, "\n") // header ends with blank line
} }

View file

@ -4,9 +4,9 @@ package gc
import "strconv" import "strconv"
const _Op_name = "XXXNAMENONAMETYPEPACKLITERALADDSUBORXORADDSTRADDRANDANDAPPENDARRAYBYTESTRARRAYBYTESTRTMPARRAYRUNESTRSTRARRAYBYTESTRARRAYBYTETMPSTRARRAYRUNEASAS2AS2FUNCAS2RECVAS2MAPRAS2DOTTYPEASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECMPIFACECMPSTRCOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTINDINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMULDIVMODLSHRSHANDANDNOTNEWNOTCOMPLUSMINUSORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFBLOCKBREAKCASEXCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELPROCRANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDDDDARGINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARDEFVARKILLVARLIVEINDREGSPRETJMPGETGEND" const _Op_name = "XXXNAMENONAMETYPEPACKLITERALADDSUBORXORADDSTRADDRANDANDAPPENDARRAYBYTESTRARRAYBYTESTRTMPARRAYRUNESTRSTRARRAYBYTESTRARRAYBYTETMPSTRARRAYRUNEASAS2AS2FUNCAS2RECVAS2MAPRAS2DOTTYPEASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTINDINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMULDIVMODLSHRSHANDANDNOTNEWNOTCOMPLUSMINUSORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFBLOCKBREAKCASEXCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELPROCRANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDDDDARGINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARDEFVARKILLVARLIVEINDREGSPRETJMPGETGEND"
var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 36, 39, 45, 49, 55, 61, 73, 88, 100, 112, 127, 139, 141, 144, 151, 158, 165, 175, 179, 183, 191, 199, 208, 216, 219, 224, 231, 239, 245, 252, 258, 267, 275, 283, 289, 293, 302, 309, 313, 316, 323, 331, 339, 346, 352, 355, 361, 368, 376, 380, 387, 395, 397, 399, 401, 403, 405, 407, 410, 415, 423, 426, 435, 438, 442, 450, 457, 466, 469, 472, 475, 478, 481, 484, 490, 493, 496, 499, 503, 508, 512, 517, 522, 528, 533, 537, 542, 550, 558, 564, 573, 580, 584, 591, 598, 606, 610, 614, 618, 625, 632, 640, 646, 651, 656, 660, 665, 673, 678, 683, 687, 690, 698, 702, 704, 709, 713, 718, 724, 730, 736, 742, 747, 751, 758, 764, 769, 775, 778, 784, 791, 796, 800, 805, 809, 819, 824, 832, 838, 845, 852, 860, 866, 870, 873} var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 36, 39, 45, 49, 55, 61, 73, 88, 100, 112, 127, 139, 141, 144, 151, 158, 165, 175, 179, 183, 191, 199, 208, 216, 219, 224, 231, 238, 244, 253, 261, 269, 275, 279, 288, 295, 299, 302, 309, 317, 325, 332, 338, 341, 347, 354, 362, 366, 373, 381, 383, 385, 387, 389, 391, 393, 396, 401, 409, 412, 421, 424, 428, 436, 443, 452, 455, 458, 461, 464, 467, 470, 476, 479, 482, 485, 489, 494, 498, 503, 508, 514, 519, 523, 528, 536, 544, 550, 559, 566, 570, 577, 584, 592, 596, 600, 604, 611, 618, 626, 632, 637, 642, 646, 651, 659, 664, 669, 673, 676, 684, 688, 690, 695, 699, 704, 710, 716, 722, 728, 733, 737, 744, 750, 755, 761, 764, 770, 777, 782, 786, 791, 795, 805, 810, 818, 824, 831, 838, 846, 852, 856, 859}
func (i Op) String() string { func (i Op) String() string {
if i >= Op(len(_Op_index)-1) { if i >= Op(len(_Op_index)-1) {

View file

@ -44,6 +44,7 @@ import (
type Order struct { type Order struct {
out []*Node // list of generated statements out []*Node // list of generated statements
temp []*Node // stack of temporary variables temp []*Node // stack of temporary variables
free map[string][]*Node // free list of unused temporaries, by type.LongString().
} }
// Order rewrites fn.Nbody to apply the ordering constraints // Order rewrites fn.Nbody to apply the ordering constraints
@ -54,14 +55,30 @@ func order(fn *Node) {
dumplist(s, fn.Nbody) dumplist(s, fn.Nbody)
} }
orderBlock(&fn.Nbody) orderBlock(&fn.Nbody, map[string][]*Node{})
} }
// newTemp allocates a new temporary with the given type, // newTemp allocates a new temporary with the given type,
// pushes it onto the temp stack, and returns it. // pushes it onto the temp stack, and returns it.
// If clear is true, newTemp emits code to zero the temporary. // If clear is true, newTemp emits code to zero the temporary.
func (o *Order) newTemp(t *types.Type, clear bool) *Node { func (o *Order) newTemp(t *types.Type, clear bool) *Node {
v := temp(t) var v *Node
// Note: LongString is close to the type equality we want,
// but not exactly. We still need to double-check with eqtype.
key := t.LongString()
a := o.free[key]
for i, n := range a {
if types.Identical(t, n.Type) {
v = a[i]
a[i] = a[len(a)-1]
a = a[:len(a)-1]
o.free[key] = a
break
}
}
if v == nil {
v = temp(t)
}
if clear { if clear {
a := nod(OAS, v, nil) a := nod(OAS, v, nil)
a = typecheck(a, Etop) a = typecheck(a, Etop)
@ -216,6 +233,45 @@ func (o *Order) mapKeyTemp(t *types.Type, n *Node) *Node {
return n return n
} }
// mapKeyReplaceStrConv replaces OARRAYBYTESTR by OARRAYBYTESTRTMP
// in n to avoid string allocations for keys in map lookups.
// Returns a bool that signals if a modification was made.
//
// For:
// x = m[string(k)]
// x = m[T1{... Tn{..., string(k), ...}]
// where k is []byte, T1 to Tn is a nesting of struct and array literals,
// the allocation of backing bytes for the string can be avoided
// by reusing the []byte backing array. These are special cases
// for avoiding allocations when converting byte slices to strings.
// It would be nice to handle these generally, but because
// []byte keys are not allowed in maps, the use of string(k)
// comes up in important cases in practice. See issue 3512.
func mapKeyReplaceStrConv(n *Node) bool {
var replaced bool
switch n.Op {
case OARRAYBYTESTR:
n.Op = OARRAYBYTESTRTMP
replaced = true
case OSTRUCTLIT:
for _, elem := range n.List.Slice() {
if mapKeyReplaceStrConv(elem.Left) {
replaced = true
}
}
case OARRAYLIT:
for _, elem := range n.List.Slice() {
if elem.Op == OKEY {
elem = elem.Right
}
if mapKeyReplaceStrConv(elem) {
replaced = true
}
}
}
return replaced
}
type ordermarker int type ordermarker int
// Marktemp returns the top of the temporary variable stack. // Marktemp returns the top of the temporary variable stack.
@ -226,6 +282,10 @@ func (o *Order) markTemp() ordermarker {
// Poptemp pops temporaries off the stack until reaching the mark, // Poptemp pops temporaries off the stack until reaching the mark,
// which must have been returned by marktemp. // which must have been returned by marktemp.
func (o *Order) popTemp(mark ordermarker) { func (o *Order) popTemp(mark ordermarker) {
for _, n := range o.temp[mark:] {
key := n.Type.LongString()
o.free[key] = append(o.free[key], n)
}
o.temp = o.temp[:mark] o.temp = o.temp[:mark]
} }
@ -266,8 +326,10 @@ func (o *Order) stmtList(l Nodes) {
// orderBlock orders the block of statements in n into a new slice, // orderBlock orders the block of statements in n into a new slice,
// and then replaces the old slice in n with the new slice. // and then replaces the old slice in n with the new slice.
func orderBlock(n *Nodes) { // free is a map that can be used to obtain temporary variables by type.
func orderBlock(n *Nodes, free map[string][]*Node) {
var order Order var order Order
order.free = free
mark := order.markTemp() mark := order.markTemp()
order.stmtList(*n) order.stmtList(*n)
order.cleanTemp(mark) order.cleanTemp(mark)
@ -280,6 +342,7 @@ func orderBlock(n *Nodes) {
// n.Left = o.exprInPlace(n.Left) // n.Left = o.exprInPlace(n.Left)
func (o *Order) exprInPlace(n *Node) *Node { func (o *Order) exprInPlace(n *Node) *Node {
var order Order var order Order
order.free = o.free
n = order.expr(n, nil) n = order.expr(n, nil)
n = addinit(n, order.out) n = addinit(n, order.out)
@ -293,8 +356,10 @@ func (o *Order) exprInPlace(n *Node) *Node {
// and replaces it with the resulting statement list. // and replaces it with the resulting statement list.
// The result of orderStmtInPlace MUST be assigned back to n, e.g. // The result of orderStmtInPlace MUST be assigned back to n, e.g.
// n.Left = orderStmtInPlace(n.Left) // n.Left = orderStmtInPlace(n.Left)
func orderStmtInPlace(n *Node) *Node { // free is a map that can be used to obtain temporary variables by type.
func orderStmtInPlace(n *Node, free map[string][]*Node) *Node {
var order Order var order Order
order.free = free
mark := order.markTemp() mark := order.markTemp()
order.stmt(n) order.stmt(n)
order.cleanTemp(mark) order.cleanTemp(mark)
@ -341,11 +406,13 @@ func (o *Order) copyRet(n *Node) []*Node {
Fatalf("copyret %v %d", n.Type, n.Left.Type.NumResults()) Fatalf("copyret %v %d", n.Type, n.Left.Type.NumResults())
} }
var l1, l2 []*Node slice := n.Type.Fields().Slice()
for _, f := range n.Type.Fields().Slice() { l1 := make([]*Node, len(slice))
tmp := temp(f.Type) l2 := make([]*Node, len(slice))
l1 = append(l1, tmp) for i, t := range slice {
l2 = append(l2, tmp) tmp := temp(t.Type)
l1[i] = tmp
l2[i] = tmp
} }
as := nod(OAS2, nil, nil) as := nod(OAS2, nil, nil)
@ -554,10 +621,9 @@ func (o *Order) stmt(n *Node) {
r.Left = o.expr(r.Left, nil) r.Left = o.expr(r.Left, nil)
r.Right = o.expr(r.Right, nil) r.Right = o.expr(r.Right, nil)
// See case OINDEXMAP below. // See similar conversion for OINDEXMAP below.
if r.Right.Op == OARRAYBYTESTR { _ = mapKeyReplaceStrConv(r.Right)
r.Right.Op = OARRAYBYTESTRTMP
}
r.Right = o.mapKeyTemp(r.Left.Type, r.Right) r.Right = o.mapKeyTemp(r.Left.Type, r.Right)
o.okAs2(n) o.okAs2(n)
o.cleanTemp(t) o.cleanTemp(t)
@ -643,8 +709,8 @@ func (o *Order) stmt(n *Node) {
t := o.markTemp() t := o.markTemp()
n.Left = o.exprInPlace(n.Left) n.Left = o.exprInPlace(n.Left)
n.Nbody.Prepend(o.cleanTempNoPop(t)...) n.Nbody.Prepend(o.cleanTempNoPop(t)...)
orderBlock(&n.Nbody) orderBlock(&n.Nbody, o.free)
n.Right = orderStmtInPlace(n.Right) n.Right = orderStmtInPlace(n.Right, o.free)
o.out = append(o.out, n) o.out = append(o.out, n)
o.cleanTemp(t) o.cleanTemp(t)
@ -656,8 +722,8 @@ func (o *Order) stmt(n *Node) {
n.Nbody.Prepend(o.cleanTempNoPop(t)...) n.Nbody.Prepend(o.cleanTempNoPop(t)...)
n.Rlist.Prepend(o.cleanTempNoPop(t)...) n.Rlist.Prepend(o.cleanTempNoPop(t)...)
o.popTemp(t) o.popTemp(t)
orderBlock(&n.Nbody) orderBlock(&n.Nbody, o.free)
orderBlock(&n.Rlist) orderBlock(&n.Rlist, o.free)
o.out = append(o.out, n) o.out = append(o.out, n)
// Special: argument will be converted to interface using convT2E // Special: argument will be converted to interface using convT2E
@ -739,7 +805,7 @@ func (o *Order) stmt(n *Node) {
} }
o.exprListInPlace(n.List) o.exprListInPlace(n.List)
if orderBody { if orderBody {
orderBlock(&n.Nbody) orderBlock(&n.Nbody, o.free)
} }
o.out = append(o.out, n) o.out = append(o.out, n)
o.cleanTemp(t) o.cleanTemp(t)
@ -857,7 +923,7 @@ func (o *Order) stmt(n *Node) {
tmp2 = typecheck(tmp2, Etop) tmp2 = typecheck(tmp2, Etop)
n2.Ninit.Append(tmp2) n2.Ninit.Append(tmp2)
} }
orderBlock(&n2.Ninit) orderBlock(&n2.Ninit, o.free)
case OSEND: case OSEND:
if r.Ninit.Len() != 0 { if r.Ninit.Len() != 0 {
@ -882,7 +948,7 @@ func (o *Order) stmt(n *Node) {
// Also insert any ninit queued during the previous loop. // Also insert any ninit queued during the previous loop.
// (The temporary cleaning must follow that ninit work.) // (The temporary cleaning must follow that ninit work.)
for _, n3 := range n.List.Slice() { for _, n3 := range n.List.Slice() {
orderBlock(&n3.Nbody) orderBlock(&n3.Nbody, o.free)
n3.Nbody.Prepend(o.cleanTempNoPop(t)...) n3.Nbody.Prepend(o.cleanTempNoPop(t)...)
// TODO(mdempsky): Is this actually necessary? // TODO(mdempsky): Is this actually necessary?
@ -924,7 +990,7 @@ func (o *Order) stmt(n *Node) {
Fatalf("order switch case %v", ncas.Op) Fatalf("order switch case %v", ncas.Op)
} }
o.exprListInPlace(ncas.List) o.exprListInPlace(ncas.List)
orderBlock(&ncas.Nbody) orderBlock(&ncas.Nbody, o.free)
} }
o.out = append(o.out, n) o.out = append(o.out, n)
@ -1010,45 +1076,24 @@ func (o *Order) expr(n, lhs *Node) *Node {
} }
} }
case OCMPSTR:
n.Left = o.expr(n.Left, nil)
n.Right = o.expr(n.Right, nil)
// Mark string(byteSlice) arguments to reuse byteSlice backing
// buffer during conversion. String comparison does not
// memorize the strings for later use, so it is safe.
if n.Left.Op == OARRAYBYTESTR {
n.Left.Op = OARRAYBYTESTRTMP
}
if n.Right.Op == OARRAYBYTESTR {
n.Right.Op = OARRAYBYTESTRTMP
}
// key must be addressable // key must be addressable
case OINDEXMAP: case OINDEXMAP:
n.Left = o.expr(n.Left, nil) n.Left = o.expr(n.Left, nil)
n.Right = o.expr(n.Right, nil) n.Right = o.expr(n.Right, nil)
needCopy := false needCopy := false
if !n.IndexMapLValue() && instrumenting { if !n.IndexMapLValue() {
// Enforce that any []byte slices we are not copying
// can not be changed before the map index by forcing
// the map index to happen immediately following the
// conversions. See copyExpr a few lines below.
needCopy = mapKeyReplaceStrConv(n.Right)
if instrumenting {
// Race detector needs the copy so it can // Race detector needs the copy so it can
// call treecopy on the result. // call treecopy on the result.
needCopy = true needCopy = true
} }
// For x = m[string(k)] where k is []byte, the allocation of
// backing bytes for the string can be avoided by reusing
// the []byte backing array. This is a special case that it
// would be nice to handle more generally, but because
// there are no []byte-keyed maps, this specific case comes
// up in important cases in practice. See issue 3512.
// Nothing can change the []byte we are not copying before
// the map index, because the map access is going to
// be forced to happen immediately following this
// conversion (by the ordercopyexpr a few lines below).
if !n.IndexMapLValue() && n.Right.Op == OARRAYBYTESTR {
n.Right.Op = OARRAYBYTESTRTMP
needCopy = true
} }
n.Right = o.mapKeyTemp(n.Left.Type, n.Right) n.Right = o.mapKeyTemp(n.Left.Type, n.Right)
@ -1056,12 +1101,17 @@ func (o *Order) expr(n, lhs *Node) *Node {
n = o.copyExpr(n, n.Type, false) n = o.copyExpr(n, n.Type, false)
} }
// concrete type (not interface) argument must be addressable // concrete type (not interface) argument might need an addressable
// temporary to pass to runtime. // temporary to pass to the runtime conversion routine.
case OCONVIFACE: case OCONVIFACE:
n.Left = o.expr(n.Left, nil) n.Left = o.expr(n.Left, nil)
if n.Left.Type.IsInterface() {
if !n.Left.Type.IsInterface() { break
}
if _, needsaddr := convFuncName(n.Left.Type, n.Type); needsaddr || consttype(n.Left) > 0 {
// Need a temp if we need to pass the address to the conversion function.
// We also process constants here, making a named static global whose
// address we can put directly in an interface (see OCONVIFACE case in walk).
n.Left = o.addrTemp(n.Left) n.Left = o.addrTemp(n.Left)
} }
@ -1147,16 +1197,23 @@ func (o *Order) expr(n, lhs *Node) *Node {
case OCLOSURE: case OCLOSURE:
if n.Noescape() && n.Func.Closure.Func.Cvars.Len() > 0 { if n.Noescape() && n.Func.Closure.Func.Cvars.Len() > 0 {
prealloc[n] = o.newTemp(types.Types[TUINT8], false) // walk will fill in correct type prealloc[n] = o.newTemp(closureType(n), false)
} }
case OARRAYLIT, OSLICELIT, OCALLPART: case OSLICELIT, OCALLPART:
n.Left = o.expr(n.Left, nil) n.Left = o.expr(n.Left, nil)
n.Right = o.expr(n.Right, nil) n.Right = o.expr(n.Right, nil)
o.exprList(n.List) o.exprList(n.List)
o.exprList(n.Rlist) o.exprList(n.Rlist)
if n.Noescape() { if n.Noescape() {
prealloc[n] = o.newTemp(types.Types[TUINT8], false) // walk will fill in correct type var t *types.Type
switch n.Op {
case OSLICELIT:
t = types.NewArray(n.Type.Elem(), n.Right.Int64())
case OCALLPART:
t = partialCallType(n)
}
prealloc[n] = o.newTemp(t, false)
} }
case ODDDARG: case ODDDARG:
@ -1181,11 +1238,24 @@ func (o *Order) expr(n, lhs *Node) *Node {
n.Left = o.expr(n.Left, nil) n.Left = o.expr(n.Left, nil)
n = o.copyExpr(n, n.Type, true) n = o.copyExpr(n, n.Type, true)
case OEQ, ONE: case OEQ, ONE, OLT, OLE, OGT, OGE:
n.Left = o.expr(n.Left, nil) n.Left = o.expr(n.Left, nil)
n.Right = o.expr(n.Right, nil) n.Right = o.expr(n.Right, nil)
t := n.Left.Type t := n.Left.Type
if t.IsStruct() || t.IsArray() { switch {
case t.IsString():
// Mark string(byteSlice) arguments to reuse byteSlice backing
// buffer during conversion. String comparison does not
// memorize the strings for later use, so it is safe.
if n.Left.Op == OARRAYBYTESTR {
n.Left.Op = OARRAYBYTESTRTMP
}
if n.Right.Op == OARRAYBYTESTR {
n.Right.Op = OARRAYBYTESTRTMP
}
case t.IsStruct() || t.IsArray():
// for complex comparisons, we need both args to be // for complex comparisons, we need both args to be
// addressable so we can pass them to the runtime. // addressable so we can pass them to the runtime.
n.Left = o.addrTemp(n.Left) n.Left = o.addrTemp(n.Left)
@ -1217,9 +1287,10 @@ func okas(ok, val *Node) *Node {
func (o *Order) as2(n *Node) { func (o *Order) as2(n *Node) {
tmplist := []*Node{} tmplist := []*Node{}
left := []*Node{} left := []*Node{}
for _, l := range n.List.Slice() { for ni, l := range n.List.Slice() {
if !l.isBlank() { if !l.isBlank() {
tmp := o.newTemp(l.Type, types.Haspointers(l.Type)) tmp := o.newTemp(l.Type, types.Haspointers(l.Type))
n.List.SetIndex(ni, tmp)
tmplist = append(tmplist, tmp) tmplist = append(tmplist, tmp)
left = append(left, l) left = append(left, l)
} }
@ -1232,14 +1303,6 @@ func (o *Order) as2(n *Node) {
as.Rlist.Set(tmplist) as.Rlist.Set(tmplist)
as = typecheck(as, Etop) as = typecheck(as, Etop)
o.stmt(as) o.stmt(as)
ti := 0
for ni, l := range n.List.Slice() {
if !l.isBlank() {
n.List.SetIndex(ni, tmplist[ti])
ti++
}
}
} }
// okAs2 orders OAS2 with ok. // okAs2 orders OAS2 with ok.

View file

@ -20,7 +20,7 @@ func typeWithoutPointers() *types.Type {
func typeWithPointers() *types.Type { func typeWithPointers() *types.Type {
t := types.New(TSTRUCT) t := types.New(TSTRUCT)
f := &types.Field{Type: types.New(TPTR64)} f := &types.Field{Type: types.New(TPTR)}
t.SetFields([]*types.Field{f}) t.SetFields([]*types.Field{f})
return t return t
} }

View file

@ -534,7 +534,7 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) {
TINT, TUINT, TUINTPTR, TBOOL, TINT, TUINT, TUINTPTR, TBOOL,
TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128: TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128:
case TPTR32, TPTR64, TUNSAFEPTR, TFUNC, TCHAN, TMAP: case TPTR, TUNSAFEPTR, TFUNC, TCHAN, TMAP:
if off&int64(Widthptr-1) != 0 { if off&int64(Widthptr-1) != 0 {
Fatalf("onebitwalktype1: invalid alignment, %v", t) Fatalf("onebitwalktype1: invalid alignment, %v", t)
} }
@ -1163,8 +1163,7 @@ func clobberWalk(b *ssa.Block, v *Node, offset int64, t *types.Type) {
return return
} }
switch t.Etype { switch t.Etype {
case TPTR32, case TPTR,
TPTR64,
TUNSAFEPTR, TUNSAFEPTR,
TFUNC, TFUNC,
TCHAN, TCHAN,

View file

@ -32,7 +32,15 @@ import (
// Do not instrument the following packages at all, // Do not instrument the following packages at all,
// at best instrumentation would cause infinite recursion. // at best instrumentation would cause infinite recursion.
var omit_pkgs = []string{"runtime/internal/atomic", "runtime/internal/sys", "runtime", "runtime/race", "runtime/msan", "internal/cpu"} var omit_pkgs = []string{
"runtime/internal/atomic",
"runtime/internal/sys",
"runtime/internal/math",
"runtime",
"runtime/race",
"runtime/msan",
"internal/cpu",
}
// Only insert racefuncenterfp/racefuncexit into the following packages. // Only insert racefuncenterfp/racefuncexit into the following packages.
// Memory accesses in the packages are either uninteresting or will cause false positives. // Memory accesses in the packages are either uninteresting or will cause false positives.

View file

@ -286,13 +286,7 @@ func walkrange(n *Node) *Node {
// This runs *after* the condition check, so we know // This runs *after* the condition check, so we know
// advancing the pointer is safe and won't go past the // advancing the pointer is safe and won't go past the
// end of the allocation. // end of the allocation.
tmp = nod(OADD, hp, nodintconst(t.Elem().Width)) a = nod(OAS, hp, addptr(hp, t.Elem().Width))
tmp.Type = hp.Type
tmp.SetTypecheck(1)
tmp.Right.Type = types.Types[types.Tptr]
tmp.Right.SetTypecheck(1)
a = nod(OAS, hp, tmp)
a = typecheck(a, Etop) a = typecheck(a, Etop)
n.List.Set1(a) n.List.Set1(a)
@ -613,3 +607,18 @@ func arrayClear(n, v1, v2, a *Node) bool {
n = walkstmt(n) n = walkstmt(n)
return true return true
} }
// addptr returns (*T)(uintptr(p) + n).
func addptr(p *Node, n int64) *Node {
t := p.Type
p = nod(OCONVNOP, p, nil)
p.Type = types.Types[TUINTPTR]
p = nod(OADD, p, nodintconst(n))
p = nod(OCONVNOP, p, nil)
p.Type = t
return p
}

View file

@ -320,7 +320,12 @@ func hiter(t *types.Type) *types.Type {
// f is method type, with receiver. // f is method type, with receiver.
// return function type, receiver as first argument (or not). // return function type, receiver as first argument (or not).
func methodfunc(f *types.Type, receiver *types.Type) *types.Type { func methodfunc(f *types.Type, receiver *types.Type) *types.Type {
var in []*Node inLen := f.Params().Fields().Len()
if receiver != nil {
inLen++
}
in := make([]*Node, 0, inLen)
if receiver != nil { if receiver != nil {
d := anonfield(receiver) d := anonfield(receiver)
in = append(in, d) in = append(in, d)
@ -332,7 +337,8 @@ func methodfunc(f *types.Type, receiver *types.Type) *types.Type {
in = append(in, d) in = append(in, d)
} }
var out []*Node outLen := f.Results().Fields().Len()
out := make([]*Node, 0, outLen)
for _, t := range f.Results().Fields().Slice() { for _, t := range f.Results().Fields().Slice() {
d := anonfield(t.Type) d := anonfield(t.Type)
out = append(out, d) out = append(out, d)
@ -405,19 +411,15 @@ func methods(t *types.Type) []*Sig {
if !sig.isym.Siggen() { if !sig.isym.Siggen() {
sig.isym.SetSiggen(true) sig.isym.SetSiggen(true)
if !eqtype(this, it) { if !types.Identical(this, it) {
compiling_wrappers = true
genwrapper(it, f, sig.isym) genwrapper(it, f, sig.isym)
compiling_wrappers = false
} }
} }
if !sig.tsym.Siggen() { if !sig.tsym.Siggen() {
sig.tsym.SetSiggen(true) sig.tsym.SetSiggen(true)
if !eqtype(this, t) { if !types.Identical(this, t) {
compiling_wrappers = true
genwrapper(t, f, sig.tsym) genwrapper(t, f, sig.tsym)
compiling_wrappers = false
} }
} }
} }
@ -656,7 +658,7 @@ func typePkg(t *types.Type) *types.Pkg {
tsym := t.Sym tsym := t.Sym
if tsym == nil { if tsym == nil {
switch t.Etype { switch t.Etype {
case TARRAY, TSLICE, TPTR32, TPTR64, TCHAN: case TARRAY, TSLICE, TPTR, TCHAN:
if t.Elem() != nil { if t.Elem() != nil {
tsym = t.Elem().Sym tsym = t.Elem().Sym
} }
@ -714,8 +716,7 @@ var kinds = []int{
TFLOAT64: objabi.KindFloat64, TFLOAT64: objabi.KindFloat64,
TBOOL: objabi.KindBool, TBOOL: objabi.KindBool,
TSTRING: objabi.KindString, TSTRING: objabi.KindString,
TPTR32: objabi.KindPtr, TPTR: objabi.KindPtr,
TPTR64: objabi.KindPtr,
TSTRUCT: objabi.KindStruct, TSTRUCT: objabi.KindStruct,
TINTER: objabi.KindInterface, TINTER: objabi.KindInterface,
TCHAN: objabi.KindChan, TCHAN: objabi.KindChan,
@ -736,8 +737,7 @@ func typeptrdata(t *types.Type) int64 {
} }
switch t.Etype { switch t.Etype {
case TPTR32, case TPTR,
TPTR64,
TUNSAFEPTR, TUNSAFEPTR,
TFUNC, TFUNC,
TCHAN, TCHAN,
@ -1035,8 +1035,7 @@ func isreflexive(t *types.Type) bool {
TINT64, TINT64,
TUINT64, TUINT64,
TUINTPTR, TUINTPTR,
TPTR32, TPTR,
TPTR64,
TUNSAFEPTR, TUNSAFEPTR,
TSTRING, TSTRING,
TCHAN: TCHAN:
@ -1071,7 +1070,7 @@ func isreflexive(t *types.Type) bool {
func needkeyupdate(t *types.Type) bool { func needkeyupdate(t *types.Type) bool {
switch t.Etype { switch t.Etype {
case TBOOL, TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, case TBOOL, TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32,
TINT64, TUINT64, TUINTPTR, TPTR32, TPTR64, TUNSAFEPTR, TCHAN: TINT64, TUINT64, TUINTPTR, TPTR, TUNSAFEPTR, TCHAN:
return false return false
case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, // floats and complex can be +0/-0 case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, // floats and complex can be +0/-0
@ -1279,7 +1278,7 @@ func dtypesym(t *types.Type) *obj.LSym {
ot = duint8(lsym, ot, uint8(obj.Bool2int(needkeyupdate(t.Key())))) ot = duint8(lsym, ot, uint8(obj.Bool2int(needkeyupdate(t.Key()))))
ot = dextratype(lsym, ot, t, 0) ot = dextratype(lsym, ot, t, 0)
case TPTR32, TPTR64: case TPTR:
if t.Elem().Etype == TANY { if t.Elem().Etype == TANY {
// ../../../../runtime/type.go:/UnsafePointerType // ../../../../runtime/type.go:/UnsafePointerType
ot = dcommontype(lsym, t) ot = dcommontype(lsym, t)
@ -1383,7 +1382,7 @@ func dtypesym(t *types.Type) *obj.LSym {
// functions must return the existing type structure rather // functions must return the existing type structure rather
// than creating a new one. // than creating a new one.
switch t.Etype { switch t.Etype {
case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT: case TPTR, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT:
keep = true keep = true
} }
} }

View file

@ -288,7 +288,7 @@ func staticcopy(l *Node, r *Node, out *[]*Node) bool {
orig := r orig := r
r = r.Name.Defn.Right r = r.Name.Defn.Right
for r.Op == OCONVNOP && !eqtype(r.Type, l.Type) { for r.Op == OCONVNOP && !types.Identical(r.Type, l.Type) {
r = r.Left r = r.Left
} }
@ -751,7 +751,7 @@ func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes)
case initKindStatic: case initKindStatic:
genAsStatic(a) genAsStatic(a)
case initKindDynamic, initKindLocalCode: case initKindDynamic, initKindLocalCode:
a = orderStmtInPlace(a) a = orderStmtInPlace(a, map[string][]*Node{})
a = walkstmt(a) a = walkstmt(a)
init.Append(a) init.Append(a)
default: default:
@ -833,7 +833,9 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
var a *Node var a *Node
if x := prealloc[n]; x != nil { if x := prealloc[n]; x != nil {
// temp allocated during order.go for dddarg // temp allocated during order.go for dddarg
x.Type = t if !types.Identical(t, x.Type) {
panic("dotdotdot base type does not match order's assigned type")
}
if vstat == nil { if vstat == nil {
a = nod(OAS, x, nil) a = nod(OAS, x, nil)
@ -909,7 +911,7 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
a = nod(OAS, a, value) a = nod(OAS, a, value)
a = typecheck(a, Etop) a = typecheck(a, Etop)
a = orderStmtInPlace(a) a = orderStmtInPlace(a, map[string][]*Node{})
a = walkstmt(a) a = walkstmt(a)
init.Append(a) init.Append(a)
} }
@ -918,7 +920,7 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
a = nod(OAS, var_, nod(OSLICE, vauto, nil)) a = nod(OAS, var_, nod(OSLICE, vauto, nil))
a = typecheck(a, Etop) a = typecheck(a, Etop)
a = orderStmtInPlace(a) a = orderStmtInPlace(a, map[string][]*Node{})
a = walkstmt(a) a = walkstmt(a)
init.Append(a) init.Append(a)
} }
@ -1152,7 +1154,7 @@ func oaslit(n *Node, init *Nodes) bool {
// not a special composite literal assignment // not a special composite literal assignment
return false return false
} }
if !eqtype(n.Left.Type, n.Right.Type) { if !types.Identical(n.Left.Type, n.Right.Type) {
// not a special composite literal assignment // not a special composite literal assignment
return false return false
} }

View file

@ -147,6 +147,7 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.f.Cache.Reset() s.f.Cache.Reset()
s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH", name) s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH", name)
s.f.Name = name s.f.Name = name
s.f.PrintOrHtmlSSA = printssa
if fn.Func.Pragma&Nosplit != 0 { if fn.Func.Pragma&Nosplit != 0 {
s.f.NoSplit = true s.f.NoSplit = true
} }
@ -201,7 +202,9 @@ func buildssa(fn *Node, worker int) *ssa.Func {
// Populate SSAable arguments. // Populate SSAable arguments.
for _, n := range fn.Func.Dcl { for _, n := range fn.Func.Dcl {
if n.Class() == PPARAM && s.canSSA(n) { if n.Class() == PPARAM && s.canSSA(n) {
s.vars[n] = s.newValue0A(ssa.OpArg, n.Type, n) v := s.newValue0A(ssa.OpArg, n.Type, n)
s.vars[n] = v
s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
} }
} }
@ -1247,10 +1250,8 @@ var opToSSA = map[opAndType]ssa.Op{
opAndType{OADD, TUINT16}: ssa.OpAdd16, opAndType{OADD, TUINT16}: ssa.OpAdd16,
opAndType{OADD, TINT32}: ssa.OpAdd32, opAndType{OADD, TINT32}: ssa.OpAdd32,
opAndType{OADD, TUINT32}: ssa.OpAdd32, opAndType{OADD, TUINT32}: ssa.OpAdd32,
opAndType{OADD, TPTR32}: ssa.OpAdd32,
opAndType{OADD, TINT64}: ssa.OpAdd64, opAndType{OADD, TINT64}: ssa.OpAdd64,
opAndType{OADD, TUINT64}: ssa.OpAdd64, opAndType{OADD, TUINT64}: ssa.OpAdd64,
opAndType{OADD, TPTR64}: ssa.OpAdd64,
opAndType{OADD, TFLOAT32}: ssa.OpAdd32F, opAndType{OADD, TFLOAT32}: ssa.OpAdd32F,
opAndType{OADD, TFLOAT64}: ssa.OpAdd64F, opAndType{OADD, TFLOAT64}: ssa.OpAdd64F,
@ -1365,8 +1366,7 @@ var opToSSA = map[opAndType]ssa.Op{
opAndType{OEQ, TFUNC}: ssa.OpEqPtr, opAndType{OEQ, TFUNC}: ssa.OpEqPtr,
opAndType{OEQ, TMAP}: ssa.OpEqPtr, opAndType{OEQ, TMAP}: ssa.OpEqPtr,
opAndType{OEQ, TCHAN}: ssa.OpEqPtr, opAndType{OEQ, TCHAN}: ssa.OpEqPtr,
opAndType{OEQ, TPTR32}: ssa.OpEqPtr, opAndType{OEQ, TPTR}: ssa.OpEqPtr,
opAndType{OEQ, TPTR64}: ssa.OpEqPtr,
opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr, opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr,
opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr, opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
opAndType{OEQ, TFLOAT64}: ssa.OpEq64F, opAndType{OEQ, TFLOAT64}: ssa.OpEq64F,
@ -1386,8 +1386,7 @@ var opToSSA = map[opAndType]ssa.Op{
opAndType{ONE, TFUNC}: ssa.OpNeqPtr, opAndType{ONE, TFUNC}: ssa.OpNeqPtr,
opAndType{ONE, TMAP}: ssa.OpNeqPtr, opAndType{ONE, TMAP}: ssa.OpNeqPtr,
opAndType{ONE, TCHAN}: ssa.OpNeqPtr, opAndType{ONE, TCHAN}: ssa.OpNeqPtr,
opAndType{ONE, TPTR32}: ssa.OpNeqPtr, opAndType{ONE, TPTR}: ssa.OpNeqPtr,
opAndType{ONE, TPTR64}: ssa.OpNeqPtr,
opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr, opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr,
opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr, opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
opAndType{ONE, TFLOAT64}: ssa.OpNeq64F, opAndType{ONE, TFLOAT64}: ssa.OpNeq64F,
@ -2335,7 +2334,7 @@ func (s *state) expr(n *Node) *ssa.Value {
if max != nil { if max != nil {
k = s.extendIndex(s.expr(max), panicslice) k = s.extendIndex(s.expr(max), panicslice)
} }
p, l, c := s.slice(n.Left.Type, v, i, j, k) p, l, c := s.slice(n.Left.Type, v, i, j, k, n.Bounded())
return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c) return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
case OSLICESTR: case OSLICESTR:
@ -2348,7 +2347,7 @@ func (s *state) expr(n *Node) *ssa.Value {
if high != nil { if high != nil {
j = s.extendIndex(s.expr(high), panicslice) j = s.extendIndex(s.expr(high), panicslice)
} }
p, l, _ := s.slice(n.Left.Type, v, i, j, nil) p, l, _ := s.slice(n.Left.Type, v, i, j, nil, n.Bounded())
return s.newValue2(ssa.OpStringMake, n.Type, p, l) return s.newValue2(ssa.OpStringMake, n.Type, p, l)
case OCALLFUNC: case OCALLFUNC:
@ -2863,6 +2862,7 @@ func init() {
var all []*sys.Arch var all []*sys.Arch
var p4 []*sys.Arch var p4 []*sys.Arch
var p8 []*sys.Arch var p8 []*sys.Arch
var lwatomics []*sys.Arch
for _, a := range sys.Archs { for _, a := range sys.Archs {
all = append(all, a) all = append(all, a)
if a.PtrSize == 4 { if a.PtrSize == 4 {
@ -2870,6 +2870,9 @@ func init() {
} else { } else {
p8 = append(p8, a) p8 = append(p8, a)
} }
if a.Family != sys.PPC64 {
lwatomics = append(lwatomics, a)
}
} }
// add adds the intrinsic b for pkg.fn for the given list of architectures. // add adds the intrinsic b for pkg.fn for the given list of architectures.
@ -2916,6 +2919,14 @@ func init() {
}, },
all...) all...)
} }
addF("runtime/internal/math", "MulUintptr",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1])
}
return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1])
},
sys.AMD64, sys.I386)
add("runtime", "KeepAlive", add("runtime", "KeepAlive",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0]) data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
@ -2978,6 +2989,13 @@ func init() {
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
}, },
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "LoadAcq",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.PPC64)
addF("runtime/internal/atomic", "Loadp", addF("runtime/internal/atomic", "Loadp",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem()) v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
@ -3004,6 +3022,12 @@ func init() {
return nil return nil
}, },
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64) sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64)
addF("runtime/internal/atomic", "StoreRel",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.PPC64)
addF("runtime/internal/atomic", "Xchg", addF("runtime/internal/atomic", "Xchg",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
@ -3091,6 +3115,13 @@ func init() {
return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
}, },
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "CasRel",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
},
sys.PPC64)
addF("runtime/internal/atomic", "And8", addF("runtime/internal/atomic", "And8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
@ -3111,8 +3142,10 @@ func init() {
alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...) alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...)
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...) alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...) alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...)
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...) alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...) alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...)
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...) alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...) alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...) alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
@ -3121,6 +3154,7 @@ func init() {
alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...) alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...)
alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...) alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...)
alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...) alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...)
alias("runtime/internal/atomic", "CasRel", "runtime/internal/atomic", "Cas", lwatomics...)
alias("runtime/internal/sys", "Ctz8", "math/bits", "TrailingZeros8", all...) alias("runtime/internal/sys", "Ctz8", "math/bits", "TrailingZeros8", all...)
@ -3240,7 +3274,7 @@ func init() {
y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c) y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
return s.newValue1(ssa.OpCtz64, types.Types[TINT], y) return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
}, },
sys.ARM64, sys.S390X) sys.ARM64, sys.S390X, sys.PPC64)
addF("math/bits", "TrailingZeros8", addF("math/bits", "TrailingZeros8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0]) x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
@ -3431,12 +3465,12 @@ func init() {
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount16, types.Types[TINT], args[0]) return s.newValue1(ssa.OpPopCount16, types.Types[TINT], args[0])
}, },
sys.ARM64, sys.S390X) sys.ARM64, sys.S390X, sys.PPC64)
addF("math/bits", "OnesCount8", addF("math/bits", "OnesCount8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount8, types.Types[TINT], args[0]) return s.newValue1(ssa.OpPopCount8, types.Types[TINT], args[0])
}, },
sys.S390X) sys.S390X, sys.PPC64)
addF("math/bits", "OnesCount", addF("math/bits", "OnesCount",
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32), makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
sys.AMD64) sys.AMD64)
@ -3492,7 +3526,7 @@ func init() {
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1]) return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
}, },
sys.ArchAMD64, sys.ArchARM64) sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64)
add("math/big", "divWW", add("math/big", "divWW",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value { func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2]) return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
@ -3551,59 +3585,34 @@ func (s *state) intrinsicCall(n *Node) *ssa.Value {
return v return v
} }
type callArg struct {
offset int64
v *ssa.Value
}
type byOffset []callArg
func (x byOffset) Len() int { return len(x) }
func (x byOffset) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byOffset) Less(i, j int) bool {
return x[i].offset < x[j].offset
}
// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them. // intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
func (s *state) intrinsicArgs(n *Node) []*ssa.Value { func (s *state) intrinsicArgs(n *Node) []*ssa.Value {
// This code is complicated because of how walk transforms calls. For a call node, // Construct map of temps; see comments in s.call about the structure of n.
// each entry in n.List is either an assignment to OINDREGSP which actually
// stores an arg, or an assignment to a temporary which computes an arg
// which is later assigned.
// The args can also be out of order.
// TODO: when walk goes away someday, this code can go away also.
var args []callArg
temps := map[*Node]*ssa.Value{} temps := map[*Node]*ssa.Value{}
for _, a := range n.List.Slice() { for _, a := range n.List.Slice() {
if a.Op != OAS { if a.Op != OAS {
s.Fatalf("non-assignment as a function argument %v", a.Op) s.Fatalf("non-assignment as a temp function argument %v", a.Op)
} }
l, r := a.Left, a.Right l, r := a.Left, a.Right
switch l.Op { if l.Op != ONAME {
case ONAME: s.Fatalf("non-ONAME temp function argument %v", a.Op)
}
// Evaluate and store to "temporary". // Evaluate and store to "temporary".
// Walk ensures these temporaries are dead outside of n. // Walk ensures these temporaries are dead outside of n.
temps[l] = s.expr(r) temps[l] = s.expr(r)
case OINDREGSP: }
args := make([]*ssa.Value, n.Rlist.Len())
for i, n := range n.Rlist.Slice() {
// Store a value to an argument slot. // Store a value to an argument slot.
var v *ssa.Value if x, ok := temps[n]; ok {
if x, ok := temps[r]; ok {
// This is a previously computed temporary. // This is a previously computed temporary.
v = x args[i] = x
} else { continue
}
// This is an explicit value; evaluate it. // This is an explicit value; evaluate it.
v = s.expr(r) args[i] = s.expr(n)
} }
args = append(args, callArg{l.Xoffset, v}) return args
default:
s.Fatalf("function argument assignment target not allowed: %v", l.Op)
}
}
sort.Sort(byOffset(args))
res := make([]*ssa.Value, len(args))
for i, a := range args {
res[i] = a.v
}
return res
} }
// Calls the function n using the specified call type. // Calls the function n using the specified call type.
@ -3644,7 +3653,7 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
n2.Pos = fn.Pos n2.Pos = fn.Pos
n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it. n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it.
closure = s.expr(n2) closure = s.expr(n2)
// Note: receiver is already assigned in n.List, so we don't // Note: receiver is already present in n.Rlist, so we don't
// want to set it here. // want to set it here.
case OCALLINTER: case OCALLINTER:
if fn.Op != ODOTINTER { if fn.Op != ODOTINTER {
@ -3665,32 +3674,43 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
dowidth(fn.Type) dowidth(fn.Type)
stksize := fn.Type.ArgWidth() // includes receiver stksize := fn.Type.ArgWidth() // includes receiver
// Run all argument assignments. The arg slots have already // Run all assignments of temps.
// been offset by the appropriate amount (+2*widthptr for go/defer, // The temps are introduced to avoid overwriting argument
// +widthptr for interface calls). // slots when arguments themselves require function calls.
// For OCALLMETH, the receiver is set in these statements.
s.stmtList(n.List) s.stmtList(n.List)
// Set receiver (for interface calls) // Store arguments to stack, including defer/go arguments and receiver for method calls.
if rcvr != nil { // These are written in SP-offset order.
argStart := Ctxt.FixedFrameSize() argStart := Ctxt.FixedFrameSize()
if k != callNormal { // Defer/go args.
argStart += int64(2 * Widthptr)
}
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
s.store(types.Types[TUINTPTR], addr, rcvr)
}
// Defer/go args
if k != callNormal { if k != callNormal {
// Write argsize and closure (args to newproc/deferproc). // Write argsize and closure (args to newproc/deferproc).
argStart := Ctxt.FixedFrameSize()
argsize := s.constInt32(types.Types[TUINT32], int32(stksize)) argsize := s.constInt32(types.Types[TUINT32], int32(stksize))
addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart) addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
s.store(types.Types[TUINT32], addr, argsize) s.store(types.Types[TUINT32], addr, argsize)
addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr)) addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
s.store(types.Types[TUINTPTR], addr, closure) s.store(types.Types[TUINTPTR], addr, closure)
stksize += 2 * int64(Widthptr) stksize += 2 * int64(Widthptr)
argStart += 2 * int64(Widthptr)
}
// Set receiver (for interface calls).
if rcvr != nil {
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
s.store(types.Types[TUINTPTR], addr, rcvr)
}
// Write args.
t := n.Left.Type
args := n.Rlist.Slice()
if n.Op == OCALLMETH {
f := t.Recv()
s.storeArg(args[0], f.Type, argStart+f.Offset)
args = args[1:]
}
for i, n := range args {
f := t.Params().Field(i)
s.storeArg(n, f.Type, argStart+f.Offset)
} }
// call target // call target
@ -4175,10 +4195,24 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
} }
} }
func (s *state) storeArg(n *Node, t *types.Type, off int64) {
pt := types.NewPtr(t)
sp := s.constOffPtrSP(pt, off)
if !canSSAType(t) {
a := s.addr(n, false)
s.move(t, sp, a)
return
}
a := s.expr(n)
s.storeType(t, sp, a, 0, false)
}
// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result. // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
// i,j,k may be nil, in which case they are set to their default value. // i,j,k may be nil, in which case they are set to their default value.
// t is a slice, ptr to array, or string type. // t is a slice, ptr to array, or string type.
func (s *state) slice(t *types.Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) { func (s *state) slice(t *types.Type, v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) {
var elemtype *types.Type var elemtype *types.Type
var ptrtype *types.Type var ptrtype *types.Type
var ptr *ssa.Value var ptr *ssa.Value
@ -4223,6 +4257,7 @@ func (s *state) slice(t *types.Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value)
k = cap k = cap
} }
if !bounded {
// Panic if slice indices are not in bounds. // Panic if slice indices are not in bounds.
s.sliceBoundsCheck(i, j) s.sliceBoundsCheck(i, j)
if j != k { if j != k {
@ -4231,6 +4266,7 @@ func (s *state) slice(t *types.Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value)
if k != cap { if k != cap {
s.sliceBoundsCheck(k, cap) s.sliceBoundsCheck(k, cap)
} }
}
// Generate the following code assuming that indexes are in bounds. // Generate the following code assuming that indexes are in bounds.
// The masking is to make sure that we don't generate a slice // The masking is to make sure that we don't generate a slice
@ -5004,9 +5040,7 @@ func genssa(f *ssa.Func, pp *Progs) {
var progToValue map[*obj.Prog]*ssa.Value var progToValue map[*obj.Prog]*ssa.Value
var progToBlock map[*obj.Prog]*ssa.Block var progToBlock map[*obj.Prog]*ssa.Block
var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point. var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point.
var logProgs = e.log if f.PrintOrHtmlSSA {
if f.HTMLWriter != nil {
// logProgs can be false, meaning that we do not dump to the Stdout.
progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues()) progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks()) progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
f.Logf("genssa %s\n", f.Name) f.Logf("genssa %s\n", f.Name)
@ -5089,7 +5123,7 @@ func genssa(f *ssa.Func, pp *Progs) {
valueToProgAfter[v.ID] = s.pp.next valueToProgAfter[v.ID] = s.pp.next
} }
if logProgs { if f.PrintOrHtmlSSA {
for ; x != s.pp.next; x = x.Link { for ; x != s.pp.next; x = x.Link {
progToValue[x] = v progToValue[x] = v
} }
@ -5107,7 +5141,7 @@ func genssa(f *ssa.Func, pp *Progs) {
x := s.pp.next x := s.pp.next
s.SetPos(b.Pos) s.SetPos(b.Pos)
thearch.SSAGenBlock(&s, b, next) thearch.SSAGenBlock(&s, b, next)
if logProgs { if f.PrintOrHtmlSSA {
for ; x != s.pp.next; x = x.Link { for ; x != s.pp.next; x = x.Link {
progToBlock[x] = b progToBlock[x] = b
} }
@ -5140,7 +5174,7 @@ func genssa(f *ssa.Func, pp *Progs) {
} }
} }
if logProgs { if e.log { // spew to stdout
filename := "" filename := ""
for p := pp.Text; p != nil; p = p.Link { for p := pp.Text; p != nil; p = p.Link {
if p.Pos.IsKnown() && p.InnermostFilename() != filename { if p.Pos.IsKnown() && p.InnermostFilename() != filename {
@ -5159,7 +5193,7 @@ func genssa(f *ssa.Func, pp *Progs) {
f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString()) f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString())
} }
} }
if f.HTMLWriter != nil { if f.HTMLWriter != nil { // spew to ssa.html
var buf bytes.Buffer var buf bytes.Buffer
buf.WriteString("<code>") buf.WriteString("<code>")
buf.WriteString("<dl class=\"ssa-gen\">") buf.WriteString("<dl class=\"ssa-gen\">")

View file

@ -529,119 +529,6 @@ func methtype(t *types.Type) *types.Type {
return nil return nil
} }
// eqtype reports whether t1 and t2 are identical, following the spec rules.
//
// Any cyclic type must go through a named type, and if one is
// named, it is only identical to the other if they are the same
// pointer (t1 == t2), so there's no chance of chasing cycles
// ad infinitum, so no need for a depth counter.
func eqtype(t1, t2 *types.Type) bool {
return eqtype1(t1, t2, true, nil)
}
// eqtypeIgnoreTags is like eqtype but it ignores struct tags for struct identity.
func eqtypeIgnoreTags(t1, t2 *types.Type) bool {
return eqtype1(t1, t2, false, nil)
}
type typePair struct {
t1 *types.Type
t2 *types.Type
}
func eqtype1(t1, t2 *types.Type, cmpTags bool, assumedEqual map[typePair]struct{}) bool {
if t1 == t2 {
return true
}
if t1 == nil || t2 == nil || t1.Etype != t2.Etype || t1.Broke() || t2.Broke() {
return false
}
if t1.Sym != nil || t2.Sym != nil {
// Special case: we keep byte/uint8 and rune/int32
// separate for error messages. Treat them as equal.
switch t1.Etype {
case TUINT8:
return (t1 == types.Types[TUINT8] || t1 == types.Bytetype) && (t2 == types.Types[TUINT8] || t2 == types.Bytetype)
case TINT32:
return (t1 == types.Types[TINT32] || t1 == types.Runetype) && (t2 == types.Types[TINT32] || t2 == types.Runetype)
default:
return false
}
}
if assumedEqual == nil {
assumedEqual = make(map[typePair]struct{})
} else if _, ok := assumedEqual[typePair{t1, t2}]; ok {
return true
}
assumedEqual[typePair{t1, t2}] = struct{}{}
switch t1.Etype {
case TINTER:
if t1.NumFields() != t2.NumFields() {
return false
}
for i, f1 := range t1.FieldSlice() {
f2 := t2.Field(i)
if f1.Sym != f2.Sym || !eqtype1(f1.Type, f2.Type, cmpTags, assumedEqual) {
return false
}
}
return true
case TSTRUCT:
if t1.NumFields() != t2.NumFields() {
return false
}
for i, f1 := range t1.FieldSlice() {
f2 := t2.Field(i)
if f1.Sym != f2.Sym || f1.Embedded != f2.Embedded || !eqtype1(f1.Type, f2.Type, cmpTags, assumedEqual) {
return false
}
if cmpTags && f1.Note != f2.Note {
return false
}
}
return true
case TFUNC:
// Check parameters and result parameters for type equality.
// We intentionally ignore receiver parameters for type
// equality, because they're never relevant.
for _, f := range types.ParamsResults {
// Loop over fields in structs, ignoring argument names.
fs1, fs2 := f(t1).FieldSlice(), f(t2).FieldSlice()
if len(fs1) != len(fs2) {
return false
}
for i, f1 := range fs1 {
f2 := fs2[i]
if f1.Isddd() != f2.Isddd() || !eqtype1(f1.Type, f2.Type, cmpTags, assumedEqual) {
return false
}
}
}
return true
case TARRAY:
if t1.NumElem() != t2.NumElem() {
return false
}
case TCHAN:
if t1.ChanDir() != t2.ChanDir() {
return false
}
case TMAP:
if !eqtype1(t1.Key(), t2.Key(), cmpTags, assumedEqual) {
return false
}
}
return eqtype1(t1.Elem(), t2.Elem(), cmpTags, assumedEqual)
}
// Are t1 and t2 equal struct types when field names are ignored? // Are t1 and t2 equal struct types when field names are ignored?
// For deciding whether the result struct from g can be copied // For deciding whether the result struct from g can be copied
// directly when compiling f(g()). // directly when compiling f(g()).
@ -655,7 +542,7 @@ func eqtypenoname(t1 *types.Type, t2 *types.Type) bool {
} }
for i, f1 := range t1.FieldSlice() { for i, f1 := range t1.FieldSlice() {
f2 := t2.Field(i) f2 := t2.Field(i)
if !eqtype(f1.Type, f2.Type) { if !types.Identical(f1.Type, f2.Type) {
return false return false
} }
} }
@ -670,13 +557,6 @@ func assignop(src *types.Type, dst *types.Type, why *string) Op {
*why = "" *why = ""
} }
// TODO(rsc,lvd): This behaves poorly in the presence of inlining.
// https://golang.org/issue/2795
if safemode && !inimport && src != nil && src.Etype == TUNSAFEPTR {
yyerror("cannot use unsafe.Pointer")
errorexit()
}
if src == dst { if src == dst {
return OCONVNOP return OCONVNOP
} }
@ -685,7 +565,7 @@ func assignop(src *types.Type, dst *types.Type, why *string) Op {
} }
// 1. src type is identical to dst. // 1. src type is identical to dst.
if eqtype(src, dst) { if types.Identical(src, dst) {
return OCONVNOP return OCONVNOP
} }
@ -696,7 +576,7 @@ func assignop(src *types.Type, dst *types.Type, why *string) Op {
// we want to recompute the itab. Recomputing the itab ensures // we want to recompute the itab. Recomputing the itab ensures
// that itabs are unique (thus an interface with a compile-time // that itabs are unique (thus an interface with a compile-time
// type I has an itab with interface type I). // type I has an itab with interface type I).
if eqtype(src.Orig, dst.Orig) { if types.Identical(src.Orig, dst.Orig) {
if src.IsEmptyInterface() { if src.IsEmptyInterface() {
// Conversion between two empty interfaces // Conversion between two empty interfaces
// requires no code. // requires no code.
@ -764,7 +644,7 @@ func assignop(src *types.Type, dst *types.Type, why *string) Op {
// src and dst have identical element types, and // src and dst have identical element types, and
// either src or dst is not a named type. // either src or dst is not a named type.
if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() { if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() {
if eqtype(src.Elem(), dst.Elem()) && (src.Sym == nil || dst.Sym == nil) { if types.Identical(src.Elem(), dst.Elem()) && (src.Sym == nil || dst.Sym == nil) {
return OCONVNOP return OCONVNOP
} }
} }
@ -772,8 +652,7 @@ func assignop(src *types.Type, dst *types.Type, why *string) Op {
// 5. src is the predeclared identifier nil and dst is a nillable type. // 5. src is the predeclared identifier nil and dst is a nillable type.
if src.Etype == TNIL { if src.Etype == TNIL {
switch dst.Etype { switch dst.Etype {
case TPTR32, case TPTR,
TPTR64,
TFUNC, TFUNC,
TMAP, TMAP,
TCHAN, TCHAN,
@ -836,14 +715,14 @@ func convertop(src *types.Type, dst *types.Type, why *string) Op {
} }
// 2. Ignoring struct tags, src and dst have identical underlying types. // 2. Ignoring struct tags, src and dst have identical underlying types.
if eqtypeIgnoreTags(src.Orig, dst.Orig) { if types.IdenticalIgnoreTags(src.Orig, dst.Orig) {
return OCONVNOP return OCONVNOP
} }
// 3. src and dst are unnamed pointer types and, ignoring struct tags, // 3. src and dst are unnamed pointer types and, ignoring struct tags,
// their base types have identical underlying types. // their base types have identical underlying types.
if src.IsPtr() && dst.IsPtr() && src.Sym == nil && dst.Sym == nil { if src.IsPtr() && dst.IsPtr() && src.Sym == nil && dst.Sym == nil {
if eqtypeIgnoreTags(src.Elem().Orig, dst.Elem().Orig) { if types.IdenticalIgnoreTags(src.Elem().Orig, dst.Elem().Orig) {
return OCONVNOP return OCONVNOP
} }
} }
@ -946,7 +825,7 @@ func assignconvfn(n *Node, t *types.Type, context func() string) *Node {
} }
} }
if eqtype(n.Type, t) { if types.Identical(n.Type, t) {
return n return n
} }
@ -1729,11 +1608,10 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
Curfn = fn Curfn = fn
typecheckslice(fn.Nbody.Slice(), Etop) typecheckslice(fn.Nbody.Slice(), Etop)
// TODO(mdempsky): Investigate why this doesn't work with // Inline calls within (*T).M wrappers. This is safe because we only
// indexed export. For now, we disable even in non-indexed // generate those wrappers within the same compilation unit as (T).M.
// mode to ensure fair benchmark comparisons and to track down // TODO(mdempsky): Investigate why we can't enable this more generally.
// unintended compilation differences. if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym != nil {
if false {
inlcalls(fn) inlcalls(fn)
} }
escAnalyze([]*Node{fn}, false) escAnalyze([]*Node{fn}, false)
@ -1813,7 +1691,7 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool
return false return false
} }
tm := tms[i] tm := tms[i]
if !eqtype(tm.Type, im.Type) { if !types.Identical(tm.Type, im.Type) {
*m = im *m = im
*samename = tm *samename = tm
*ptr = 0 *ptr = 0
@ -1845,7 +1723,7 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool
return false return false
} }
tm := tms[i] tm := tms[i]
if tm.Nointerface() || !eqtype(tm.Type, im.Type) { if tm.Nointerface() || !types.Identical(tm.Type, im.Type) {
*m = im *m = im
*samename = tm *samename = tm
*ptr = 0 *ptr = 0
@ -2003,8 +1881,7 @@ func isdirectiface(t *types.Type) bool {
} }
switch t.Etype { switch t.Etype {
case TPTR32, case TPTR,
TPTR64,
TCHAN, TCHAN,
TMAP, TMAP,
TFUNC, TFUNC,

View file

@ -611,7 +611,7 @@ Outer:
continue continue
} }
for _, n := range prev { for _, n := range prev {
if eqtype(n.Left.Type, c.node.Left.Type) { if types.Identical(n.Left.Type, c.node.Left.Type) {
yyerrorl(c.node.Pos, "duplicate case %v in type switch\n\tprevious case at %v", c.node.Left.Type, n.Line()) yyerrorl(c.node.Pos, "duplicate case %v in type switch\n\tprevious case at %v", c.node.Left.Type, n.Line())
// avoid double-reporting errors // avoid double-reporting errors
continue Outer continue Outer

View file

@ -45,7 +45,7 @@ type Node struct {
// - ONAME nodes that refer to local variables use it to identify their stack frame position. // - ONAME nodes that refer to local variables use it to identify their stack frame position.
// - ODOT, ODOTPTR, and OINDREGSP use it to indicate offset relative to their base address. // - ODOT, ODOTPTR, and OINDREGSP use it to indicate offset relative to their base address.
// - OSTRUCTKEY uses it to store the named field's offset. // - OSTRUCTKEY uses it to store the named field's offset.
// - Named OLITERALs use it to to store their ambient iota value. // - Named OLITERALs use it to store their ambient iota value.
// Possibly still more uses. If you find any, document them. // Possibly still more uses. If you find any, document them.
Xoffset int64 Xoffset int64
@ -65,7 +65,7 @@ func (n *Node) ResetAux() {
func (n *Node) SubOp() Op { func (n *Node) SubOp() Op {
switch n.Op { switch n.Op {
case OASOP, OCMPIFACE, OCMPSTR, ONAME: case OASOP, ONAME:
default: default:
Fatalf("unexpected op: %v", n.Op) Fatalf("unexpected op: %v", n.Op)
} }
@ -74,7 +74,7 @@ func (n *Node) SubOp() Op {
func (n *Node) SetSubOp(op Op) { func (n *Node) SetSubOp(op Op) {
switch n.Op { switch n.Op {
case OASOP, OCMPIFACE, OCMPSTR, ONAME: case OASOP, ONAME:
default: default:
Fatalf("unexpected op: %v", n.Op) Fatalf("unexpected op: %v", n.Op)
} }
@ -603,15 +603,21 @@ const (
OAS2DOTTYPE // List = Rlist (x, ok = I.(int)) OAS2DOTTYPE // List = Rlist (x, ok = I.(int))
OASOP // Left Etype= Right (x += y) OASOP // Left Etype= Right (x += y)
OCALL // Left(List) (function call, method call or type conversion) OCALL // Left(List) (function call, method call or type conversion)
OCALLFUNC // Left(List) (function call f(args))
OCALLMETH // Left(List) (direct method call x.Method(args)) // OCALLFUNC, OCALLMETH, and OCALLINTER have the same structure.
OCALLINTER // Left(List) (interface method call x.Method(args)) // Prior to walk, they are: Left(List), where List is all regular arguments.
// If present, Right is an ODDDARG that holds the
// generated slice used in a call to a variadic function.
// After walk, List is a series of assignments to temporaries,
// and Rlist is an updated set of arguments, including any ODDDARG slice.
// TODO(josharian/khr): Use Ninit instead of List for the assignments to temporaries. See CL 114797.
OCALLFUNC // Left(List/Rlist) (function call f(args))
OCALLMETH // Left(List/Rlist) (direct method call x.Method(args))
OCALLINTER // Left(List/Rlist) (interface method call x.Method(args))
OCALLPART // Left.Right (method expression x.Method, not called) OCALLPART // Left.Right (method expression x.Method, not called)
OCAP // cap(Left) OCAP // cap(Left)
OCLOSE // close(Left) OCLOSE // close(Left)
OCLOSURE // func Type { Body } (func literal) OCLOSURE // func Type { Body } (func literal)
OCMPIFACE // Left Etype Right (interface comparison, x == y or x != y)
OCMPSTR // Left Etype Right (string comparison, x == y, x < y, etc)
OCOMPLIT // Right{List} (composite literal, not yet lowered to specific form) OCOMPLIT // Right{List} (composite literal, not yet lowered to specific form)
OMAPLIT // Type{List} (composite literal, Type is map) OMAPLIT // Type{List} (composite literal, Type is map)
OSTRUCTLIT // Type{List} (composite literal, Type is struct) OSTRUCTLIT // Type{List} (composite literal, Type is struct)

View file

@ -7,6 +7,7 @@
package main package main
import ( import (
"runtime"
"testing" "testing"
) )
@ -14,6 +15,13 @@ const (
y = 0x0fffFFFF y = 0x0fffFFFF
) )
var (
g8 int8
g16 int16
g32 int32
g64 int64
)
//go:noinline //go:noinline
func lshNop1(x uint64) uint64 { func lshNop1(x uint64) uint64 {
// two outer shifts should be removed // two outer shifts should be removed
@ -915,4 +923,32 @@ func TestArithmetic(t *testing.T) {
testLoadSymCombine(t) testLoadSymCombine(t)
testShiftRemoval(t) testShiftRemoval(t)
testShiftedOps(t) testShiftedOps(t)
testDivFixUp(t)
}
// testDivFixUp ensures that signed division fix-ups are being generated.
func testDivFixUp(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Error("testDivFixUp failed")
if e, ok := r.(runtime.Error); ok {
t.Logf("%v\n", e.Error())
}
}
}()
var w int8 = -128
var x int16 = -32768
var y int32 = -2147483648
var z int64 = -9223372036854775808
for i := -5; i < 0; i++ {
g8 = w / int8(i)
g16 = x / int16(i)
g32 = y / int32(i)
g64 = z / int64(i)
g8 = w % int8(i)
g16 = x % int16(i)
g32 = y % int32(i)
g64 = z % int64(i)
}
} }

View file

@ -87,8 +87,7 @@ var _typekind = []string{
TFLOAT64: "float64", TFLOAT64: "float64",
TBOOL: "bool", TBOOL: "bool",
TSTRING: "string", TSTRING: "string",
TPTR32: "pointer", TPTR: "pointer",
TPTR64: "pointer",
TUNSAFEPTR: "unsafe.Pointer", TUNSAFEPTR: "unsafe.Pointer",
TSTRUCT: "struct", TSTRUCT: "struct",
TINTER: "interface", TINTER: "interface",
@ -297,10 +296,11 @@ func indexlit(n *Node) *Node {
// n.Left = typecheck1(n.Left, top) // n.Left = typecheck1(n.Left, top)
func typecheck1(n *Node, top int) *Node { func typecheck1(n *Node, top int) *Node {
switch n.Op { switch n.Op {
case OXDOT, ODOT, ODOTPTR, ODOTMETH, ODOTINTER, ORETJMP: case OLITERAL, ONAME, ONONAME, OTYPE:
// n.Sym is a field/method name, not a variable. if n.Sym == nil {
default: break
if n.Sym != nil { }
if n.Op == ONAME && n.SubOp() != 0 && top&Ecall == 0 { if n.Op == ONAME && n.SubOp() != 0 && top&Ecall == 0 {
yyerror("use of builtin %v not in function call", n.Sym) yyerror("use of builtin %v not in function call", n.Sym)
n.Type = nil n.Type = nil
@ -313,7 +313,6 @@ func typecheck1(n *Node, top int) *Node {
return n return n
} }
} }
}
ok := 0 ok := 0
switch n.Op { switch n.Op {
@ -633,7 +632,7 @@ func typecheck1(n *Node, top int) *Node {
et = TINT et = TINT
} }
aop := OXXX aop := OXXX
if iscmp[n.Op] && t.Etype != TIDEAL && !eqtype(l.Type, r.Type) { if iscmp[n.Op] && t.Etype != TIDEAL && !types.Identical(l.Type, r.Type) {
// comparison is okay as long as one side is // comparison is okay as long as one side is
// assignable to the other. convert so they have // assignable to the other. convert so they have
// the same type. // the same type.
@ -688,7 +687,7 @@ func typecheck1(n *Node, top int) *Node {
et = t.Etype et = t.Etype
} }
if t.Etype != TIDEAL && !eqtype(l.Type, r.Type) { if t.Etype != TIDEAL && !types.Identical(l.Type, r.Type) {
l, r = defaultlit2(l, r, true) l, r = defaultlit2(l, r, true)
if r.Type.IsInterface() == l.Type.IsInterface() || aop == 0 { if r.Type.IsInterface() == l.Type.IsInterface() || aop == 0 {
yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type) yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type)
@ -748,12 +747,7 @@ func typecheck1(n *Node, top int) *Node {
} }
} }
if et == TSTRING { if et == TSTRING && n.Op == OADD {
if iscmp[n.Op] {
ot := n.Op
n.Op = OCMPSTR
n.SetSubOp(ot)
} else if n.Op == OADD {
// create OADDSTR node with list of strings in x + y + z + (w + v) + ... // create OADDSTR node with list of strings in x + y + z + (w + v) + ...
n.Op = OADDSTR n.Op = OADDSTR
@ -770,22 +764,6 @@ func typecheck1(n *Node, top int) *Node {
n.Left = nil n.Left = nil
n.Right = nil n.Right = nil
} }
}
if et == TINTER {
if l.Op == OLITERAL && l.Val().Ctype() == CTNIL {
// swap for back end
n.Left = r
n.Right = l
} else if r.Op == OLITERAL && r.Val().Ctype() == CTNIL {
} else // leave alone for back end
if r.Type.IsInterface() == l.Type.IsInterface() {
ot := n.Op
n.Op = OCMPIFACE
n.SetSubOp(ot)
}
}
if (op == ODIV || op == OMOD) && Isconst(r, CTINT) { if (op == ODIV || op == OMOD) && Isconst(r, CTINT) {
if r.Val().U.(*Mpint).CmpInt64(0) == 0 { if r.Val().U.(*Mpint).CmpInt64(0) == 0 {
@ -1255,7 +1233,7 @@ func typecheck1(n *Node, top int) *Node {
// It isn't necessary, so just do a sanity check. // It isn't necessary, so just do a sanity check.
tp := t.Recv().Type tp := t.Recv().Type
if l.Left == nil || !eqtype(l.Left.Type, tp) { if l.Left == nil || !types.Identical(l.Left.Type, tp) {
Fatalf("method receiver") Fatalf("method receiver")
} }
@ -1474,7 +1452,7 @@ func typecheck1(n *Node, top int) *Node {
n.Right = r n.Right = r
} }
if !eqtype(l.Type, r.Type) { if !types.Identical(l.Type, r.Type) {
yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type) yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type)
n.Type = nil n.Type = nil
return n return n
@ -1679,7 +1657,7 @@ func typecheck1(n *Node, top int) *Node {
// copy([]byte, string) // copy([]byte, string)
if n.Left.Type.IsSlice() && n.Right.Type.IsString() { if n.Left.Type.IsSlice() && n.Right.Type.IsString() {
if eqtype(n.Left.Type.Elem(), types.Bytetype) { if types.Identical(n.Left.Type.Elem(), types.Bytetype) {
break break
} }
yyerror("arguments to copy have different element types: %L and string", n.Left.Type) yyerror("arguments to copy have different element types: %L and string", n.Left.Type)
@ -1699,7 +1677,7 @@ func typecheck1(n *Node, top int) *Node {
return n return n
} }
if !eqtype(n.Left.Type.Elem(), n.Right.Type.Elem()) { if !types.Identical(n.Left.Type.Elem(), n.Right.Type.Elem()) {
yyerror("arguments to copy have different element types: %L and %L", n.Left.Type, n.Right.Type) yyerror("arguments to copy have different element types: %L and %L", n.Left.Type, n.Right.Type)
n.Type = nil n.Type = nil
return n return n
@ -1741,14 +1719,14 @@ func typecheck1(n *Node, top int) *Node {
} }
} }
// do not use stringtoarraylit. // do not convert to []byte literal. See CL 125796.
// generated code and compiler memory footprint is better without it. // generated code and compiler memory footprint is better without it.
case OSTRARRAYBYTE: case OSTRARRAYBYTE:
break break
case OSTRARRAYRUNE: case OSTRARRAYRUNE:
if n.Left.Op == OLITERAL { if n.Left.Op == OLITERAL {
n = stringtoarraylit(n) n = stringtoruneslit(n)
} }
} }
@ -2134,10 +2112,6 @@ func typecheck1(n *Node, top int) *Node {
} }
} }
if safemode && !inimport && !compiling_wrappers && t != nil && t.Etype == TUNSAFEPTR {
yyerror("cannot use unsafe.Pointer")
}
evconst(n) evconst(n)
if n.Op == OTYPE && top&Etype == 0 { if n.Op == OTYPE && top&Etype == 0 {
if !n.Type.Broke() { if !n.Type.Broke() {
@ -2505,17 +2479,17 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field {
tt := n.Left.Type tt := n.Left.Type
dowidth(tt) dowidth(tt)
rcvr := f2.Type.Recv().Type rcvr := f2.Type.Recv().Type
if !eqtype(rcvr, tt) { if !types.Identical(rcvr, tt) {
if rcvr.IsPtr() && eqtype(rcvr.Elem(), tt) { if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) {
checklvalue(n.Left, "call pointer method on") checklvalue(n.Left, "call pointer method on")
n.Left = nod(OADDR, n.Left, nil) n.Left = nod(OADDR, n.Left, nil)
n.Left.SetImplicit(true) n.Left.SetImplicit(true)
n.Left = typecheck(n.Left, Etype|Erv) n.Left = typecheck(n.Left, Etype|Erv)
} else if tt.IsPtr() && !rcvr.IsPtr() && eqtype(tt.Elem(), rcvr) { } else if tt.IsPtr() && !rcvr.IsPtr() && types.Identical(tt.Elem(), rcvr) {
n.Left = nod(OIND, n.Left, nil) n.Left = nod(OIND, n.Left, nil)
n.Left.SetImplicit(true) n.Left.SetImplicit(true)
n.Left = typecheck(n.Left, Etype|Erv) n.Left = typecheck(n.Left, Etype|Erv)
} else if tt.IsPtr() && tt.Elem().IsPtr() && eqtype(derefall(tt), derefall(rcvr)) { } else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) {
yyerror("calling method %v with receiver %L requires explicit dereference", n.Sym, n.Left) yyerror("calling method %v with receiver %L requires explicit dereference", n.Sym, n.Left)
for tt.IsPtr() { for tt.IsPtr() {
// Stop one level early for method with pointer receiver. // Stop one level early for method with pointer receiver.
@ -2857,7 +2831,7 @@ func keydup(n *Node, hash map[uint32][]*Node) {
if a.Op == OCONVIFACE && orign.Op == OCONVIFACE { if a.Op == OCONVIFACE && orign.Op == OCONVIFACE {
a = a.Left a = a.Left
} }
if !eqtype(a.Type, n.Type) { if !types.Identical(a.Type, n.Type) {
continue continue
} }
cmp.Right = a cmp.Right = a
@ -2901,7 +2875,7 @@ func pushtype(n *Node, t *types.Type) {
n.Right.SetImplicit(true) // * is okay n.Right.SetImplicit(true) // * is okay
} else if Debug['s'] != 0 { } else if Debug['s'] != 0 {
n.Right = typecheck(n.Right, Etype) n.Right = typecheck(n.Right, Etype)
if n.Right.Type != nil && eqtype(n.Right.Type, t) { if n.Right.Type != nil && types.Identical(n.Right.Type, t) {
fmt.Printf("%v: redundant type: %v\n", n.Line(), t) fmt.Printf("%v: redundant type: %v\n", n.Line(), t)
} }
} }
@ -3287,7 +3261,7 @@ func checkassignlist(stmt *Node, l Nodes) {
// lvalue expression is for OSLICE and OAPPEND optimizations, and it // lvalue expression is for OSLICE and OAPPEND optimizations, and it
// is correct in those settings. // is correct in those settings.
func samesafeexpr(l *Node, r *Node) bool { func samesafeexpr(l *Node, r *Node) bool {
if l.Op != r.Op || !eqtype(l.Type, r.Type) { if l.Op != r.Op || !types.Identical(l.Type, r.Type) {
return false return false
} }
@ -3535,28 +3509,20 @@ func typecheckfunc(n *Node) {
} }
} }
// The result of stringtoarraylit MUST be assigned back to n, e.g. // The result of stringtoruneslit MUST be assigned back to n, e.g.
// n.Left = stringtoarraylit(n.Left) // n.Left = stringtoruneslit(n.Left)
func stringtoarraylit(n *Node) *Node { func stringtoruneslit(n *Node) *Node {
if n.Left.Op != OLITERAL || n.Left.Val().Ctype() != CTSTR { if n.Left.Op != OLITERAL || n.Left.Val().Ctype() != CTSTR {
Fatalf("stringtoarraylit %v", n) Fatalf("stringtoarraylit %v", n)
} }
s := n.Left.Val().U.(string)
var l []*Node var l []*Node
if n.Type.Elem().Etype == TUINT8 { s := n.Left.Val().U.(string)
// []byte
for i := 0; i < len(s); i++ {
l = append(l, nod(OKEY, nodintconst(int64(i)), nodintconst(int64(s[0]))))
}
} else {
// []rune
i := 0 i := 0
for _, r := range s { for _, r := range s {
l = append(l, nod(OKEY, nodintconst(int64(i)), nodintconst(int64(r)))) l = append(l, nod(OKEY, nodintconst(int64(i)), nodintconst(int64(r))))
i++ i++
} }
}
nn := nod(OCOMPLIT, nil, typenod(n.Type)) nn := nod(OCOMPLIT, nil, typenod(n.Type))
nn.List.Set(l) nn.List.Set(l)
@ -3688,9 +3654,6 @@ func typecheckdef(n *Node) {
default: default:
Fatalf("typecheckdef %v", n.Op) Fatalf("typecheckdef %v", n.Op)
case OGOTO, OLABEL, OPACK:
// nothing to do here
case OLITERAL: case OLITERAL:
if n.Name.Param.Ntype != nil { if n.Name.Param.Ntype != nil {
n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, Etype) n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, Etype)
@ -3731,7 +3694,7 @@ func typecheckdef(n *Node) {
goto ret goto ret
} }
if !e.Type.IsUntyped() && !eqtype(t, e.Type) { if !e.Type.IsUntyped() && !types.Identical(t, e.Type) {
yyerrorl(n.Pos, "cannot use %L as type %v in const initializer", e, t) yyerrorl(n.Pos, "cannot use %L as type %v in const initializer", e, t)
goto ret goto ret
} }

View file

@ -32,9 +32,7 @@ const (
TBOOL = types.TBOOL TBOOL = types.TBOOL
TPTR32 = types.TPTR32 TPTR = types.TPTR
TPTR64 = types.TPTR64
TFUNC = types.TFUNC TFUNC = types.TFUNC
TSLICE = types.TSLICE TSLICE = types.TSLICE
TARRAY = types.TARRAY TARRAY = types.TARRAY

View file

@ -177,11 +177,8 @@ func typeinit() {
simtype[et] = et simtype[et] = et
} }
types.Types[TPTR32] = types.New(TPTR32) types.Types[TPTR] = types.New(TPTR)
dowidth(types.Types[TPTR32]) dowidth(types.Types[TPTR])
types.Types[TPTR64] = types.New(TPTR64)
dowidth(types.Types[TPTR64])
t := types.New(TUNSAFEPTR) t := types.New(TUNSAFEPTR)
types.Types[TUNSAFEPTR] = t types.Types[TUNSAFEPTR] = t
@ -190,11 +187,6 @@ func typeinit() {
asNode(t.Sym.Def).Name = new(Name) asNode(t.Sym.Def).Name = new(Name)
dowidth(types.Types[TUNSAFEPTR]) dowidth(types.Types[TUNSAFEPTR])
types.Tptr = TPTR32
if Widthptr == 8 {
types.Tptr = TPTR64
}
for et := TINT8; et <= TUINT64; et++ { for et := TINT8; et <= TUINT64; et++ {
isInt[et] = true isInt[et] = true
} }
@ -263,8 +255,7 @@ func typeinit() {
okforlen[TSLICE] = true okforlen[TSLICE] = true
okforlen[TSTRING] = true okforlen[TSTRING] = true
okforeq[TPTR32] = true okforeq[TPTR] = true
okforeq[TPTR64] = true
okforeq[TUNSAFEPTR] = true okforeq[TUNSAFEPTR] = true
okforeq[TINTER] = true okforeq[TINTER] = true
okforeq[TCHAN] = true okforeq[TCHAN] = true
@ -357,10 +348,10 @@ func typeinit() {
types.Types[TINTER] = types.New(TINTER) types.Types[TINTER] = types.New(TINTER)
// simple aliases // simple aliases
simtype[TMAP] = types.Tptr simtype[TMAP] = TPTR
simtype[TCHAN] = types.Tptr simtype[TCHAN] = TPTR
simtype[TFUNC] = types.Tptr simtype[TFUNC] = TPTR
simtype[TUNSAFEPTR] = types.Tptr simtype[TUNSAFEPTR] = TPTR
array_array = int(Rnd(0, int64(Widthptr))) array_array = int(Rnd(0, int64(Widthptr)))
array_nel = int(Rnd(int64(array_array)+int64(Widthptr), int64(Widthptr))) array_nel = int(Rnd(int64(array_array)+int64(Widthptr), int64(Widthptr)))

File diff suppressed because it is too large Load diff

View file

@ -313,9 +313,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} }
arg0 := v.Args[0].Reg() arg0 := v.Args[0].Reg()
out := v.Reg0() out := v.Reg0()
// SYNC // SYNC when AuxInt == 1; otherwise, load-acquire
if v.AuxInt == 1 {
psync := s.Prog(ppc64.ASYNC) psync := s.Prog(ppc64.ASYNC)
psync.To.Type = obj.TYPE_NONE psync.To.Type = obj.TYPE_NONE
}
// Load // Load
p := s.Prog(ld) p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
@ -338,7 +340,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpPPC64LoweredAtomicStore32, case ssa.OpPPC64LoweredAtomicStore32,
ssa.OpPPC64LoweredAtomicStore64: ssa.OpPPC64LoweredAtomicStore64:
// SYNC // SYNC or LWSYNC
// MOVD/MOVW arg1,(arg0) // MOVD/MOVW arg1,(arg0)
st := ppc64.AMOVD st := ppc64.AMOVD
if v.Op == ssa.OpPPC64LoweredAtomicStore32 { if v.Op == ssa.OpPPC64LoweredAtomicStore32 {
@ -346,8 +348,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} }
arg0 := v.Args[0].Reg() arg0 := v.Args[0].Reg()
arg1 := v.Args[1].Reg() arg1 := v.Args[1].Reg()
// If AuxInt == 0, LWSYNC (Store-Release), else SYNC
// SYNC // SYNC
psync := s.Prog(ppc64.ASYNC) syncOp := ppc64.ASYNC
if v.AuxInt == 0 {
syncOp = ppc64.ALWSYNC
}
psync := s.Prog(syncOp)
psync.To.Type = obj.TYPE_NONE psync.To.Type = obj.TYPE_NONE
// Store // Store
p := s.Prog(st) p := s.Prog(st)
@ -360,12 +367,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpPPC64LoweredAtomicCas32: ssa.OpPPC64LoweredAtomicCas32:
// LWSYNC // LWSYNC
// loop: // loop:
// LDAR (Rarg0), Rtmp // LDAR (Rarg0), MutexHint, Rtmp
// CMP Rarg1, Rtmp // CMP Rarg1, Rtmp
// BNE fail // BNE fail
// STDCCC Rarg2, (Rarg0) // STDCCC Rarg2, (Rarg0)
// BNE loop // BNE loop
// LWSYNC // LWSYNC // Only for sequential consistency; not required in CasRel.
// MOVD $1, Rout // MOVD $1, Rout
// BR end // BR end
// fail: // fail:
@ -393,6 +400,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = r0 p.From.Reg = r0
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP p.To.Reg = ppc64.REGTMP
// If it is a Compare-and-Swap-Release operation, set the EH field with
// the release hint.
if v.AuxInt == 0 {
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: 0})
}
// CMP reg1,reg2 // CMP reg1,reg2
p1 := s.Prog(cmp) p1 := s.Prog(cmp)
p1.From.Type = obj.TYPE_REG p1.From.Type = obj.TYPE_REG
@ -414,8 +426,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.Patch(p4, p) gc.Patch(p4, p)
// LWSYNC - Assuming shared data not write-through-required nor // LWSYNC - Assuming shared data not write-through-required nor
// caching-inhibited. See Appendix B.2.1.1 in the ISA 2.07b. // caching-inhibited. See Appendix B.2.1.1 in the ISA 2.07b.
// If the operation is a CAS-Release, then synchronization is not necessary.
if v.AuxInt != 0 {
plwsync2 := s.Prog(ppc64.ALWSYNC) plwsync2 := s.Prog(ppc64.ALWSYNC)
plwsync2.To.Type = obj.TYPE_NONE plwsync2.To.Type = obj.TYPE_NONE
}
// return true // return true
p5 := s.Prog(ppc64.AMOVD) p5 := s.Prog(ppc64.AMOVD)
p5.From.Type = obj.TYPE_CONST p5.From.Type = obj.TYPE_CONST
@ -967,7 +982,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpPPC64LoweredMove: case ssa.OpPPC64LoweredMove:
// This will be used when moving more // This will be used when moving more
// than 8 bytes. Moves start with as // than 8 bytes. Moves start with
// as many 8 byte moves as possible, then // as many 8 byte moves as possible, then
// 4, 2, or 1 byte(s) as remaining. This will // 4, 2, or 1 byte(s) as remaining. This will
// work and be efficient for power8 or later. // work and be efficient for power8 or later.

View file

@ -373,6 +373,7 @@ var passes = [...]pass{
{name: "phiopt", fn: phiopt}, {name: "phiopt", fn: phiopt},
{name: "nilcheckelim", fn: nilcheckelim}, {name: "nilcheckelim", fn: nilcheckelim},
{name: "prove", fn: prove}, {name: "prove", fn: prove},
{name: "fuse plain", fn: fusePlain},
{name: "decompose builtin", fn: decomposeBuiltIn, required: true}, {name: "decompose builtin", fn: decomposeBuiltIn, required: true},
{name: "softfloat", fn: softfloat, required: true}, {name: "softfloat", fn: softfloat, required: true},
{name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules {name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
@ -380,7 +381,7 @@ var passes = [...]pass{
{name: "generic deadcode", fn: deadcode, required: true}, // remove dead stores, which otherwise mess up store chain {name: "generic deadcode", fn: deadcode, required: true}, // remove dead stores, which otherwise mess up store chain
{name: "check bce", fn: checkbce}, {name: "check bce", fn: checkbce},
{name: "branchelim", fn: branchelim}, {name: "branchelim", fn: branchelim},
{name: "fuse", fn: fuse}, {name: "fuse", fn: fuseAll},
{name: "dse", fn: dse}, {name: "dse", fn: dse},
{name: "writebarrier", fn: writebarrier, required: true}, // expand write barrier ops {name: "writebarrier", fn: writebarrier, required: true}, // expand write barrier ops
{name: "insert resched checks", fn: insertLoopReschedChecks, {name: "insert resched checks", fn: insertLoopReschedChecks,

View file

@ -178,6 +178,7 @@ type GCNode interface {
Typ() *types.Type Typ() *types.Type
String() string String() string
IsSynthetic() bool IsSynthetic() bool
IsAutoTmp() bool
StorageClass() StorageClass StorageClass() StorageClass
} }

View file

@ -153,8 +153,12 @@ var BlockEnd = &Value{
// RegisterSet is a bitmap of registers, indexed by Register.num. // RegisterSet is a bitmap of registers, indexed by Register.num.
type RegisterSet uint64 type RegisterSet uint64
// logf prints debug-specific logging to stdout (always stdout) if the current
// function is tagged by GOSSAFUNC (for ssa output directed either to stdout or html).
func (s *debugState) logf(msg string, args ...interface{}) { func (s *debugState) logf(msg string, args ...interface{}) {
s.f.Logf(msg, args...) if s.f.PrintOrHtmlSSA {
fmt.Printf(msg, args...)
}
} }
type debugState struct { type debugState struct {

View file

@ -86,6 +86,10 @@ func (d *DummyAuto) IsSynthetic() bool {
return false return false
} }
func (d *DummyAuto) IsAutoTmp() bool {
return true
}
func (DummyFrontend) StringData(s string) interface{} { func (DummyFrontend) StringData(s string) interface{} {
return nil return nil
} }
@ -163,7 +167,6 @@ func init() {
} }
types.Dowidth = func(t *types.Type) {} types.Dowidth = func(t *types.Type) {}
types.Tptr = types.TPTR64
for _, typ := range [...]struct { for _, typ := range [...]struct {
width int64 width int64
et types.EType et types.EType

View file

@ -40,6 +40,7 @@ type Func struct {
logfiles map[string]writeSyncer logfiles map[string]writeSyncer
HTMLWriter *HTMLWriter // html writer, for debugging HTMLWriter *HTMLWriter // html writer, for debugging
DebugTest bool // default true unless $GOSSAHASH != ""; as a debugging aid, make new code conditional on this and use GOSSAHASH to binary search for failing cases DebugTest bool // default true unless $GOSSAHASH != ""; as a debugging aid, make new code conditional on this and use GOSSAHASH to binary search for failing cases
PrintOrHtmlSSA bool // true if GOSSAFUNC matches, true even if fe.Log() (spew phase results to stdout) is false.
scheduled bool // Values in Blocks are in final order scheduled bool // Values in Blocks are in final order
NoSplit bool // true if function is marked as nosplit. Used by schedule check pass. NoSplit bool // true if function is marked as nosplit. Used by schedule check pass.

View file

@ -8,17 +8,35 @@ import (
"cmd/internal/src" "cmd/internal/src"
) )
// fusePlain runs fuse(f, fuseTypePlain).
func fusePlain(f *Func) { fuse(f, fuseTypePlain) }
// fuseAll runs fuse(f, fuseTypeAll).
func fuseAll(f *Func) { fuse(f, fuseTypeAll) }
type fuseType uint8
const (
fuseTypePlain fuseType = 1 << iota
fuseTypeIf
fuseTypeAll = fuseTypePlain | fuseTypeIf
)
// fuse simplifies control flow by joining basic blocks. // fuse simplifies control flow by joining basic blocks.
func fuse(f *Func) { func fuse(f *Func, typ fuseType) {
for changed := true; changed; { for changed := true; changed; {
changed = false changed = false
// Fuse from end to beginning, to avoid quadratic behavior in fuseBlockPlain. See issue 13554. // Fuse from end to beginning, to avoid quadratic behavior in fuseBlockPlain. See issue 13554.
for i := len(f.Blocks) - 1; i >= 0; i-- { for i := len(f.Blocks) - 1; i >= 0; i-- {
b := f.Blocks[i] b := f.Blocks[i]
if typ&fuseTypeIf != 0 {
changed = fuseBlockIf(b) || changed changed = fuseBlockIf(b) || changed
}
if typ&fuseTypePlain != 0 {
changed = fuseBlockPlain(b) || changed changed = fuseBlockPlain(b) || changed
} }
} }
}
} }
// fuseBlockIf handles the following cases where s0 and s1 are empty blocks. // fuseBlockIf handles the following cases where s0 and s1 are empty blocks.

View file

@ -26,7 +26,7 @@ func TestFuseEliminatesOneBranch(t *testing.T) {
Exit("mem"))) Exit("mem")))
CheckFunc(fun.f) CheckFunc(fun.f)
fuse(fun.f) fuseAll(fun.f)
for _, b := range fun.f.Blocks { for _, b := range fun.f.Blocks {
if b == fun.blocks["then"] && b.Kind != BlockInvalid { if b == fun.blocks["then"] && b.Kind != BlockInvalid {
@ -56,7 +56,7 @@ func TestFuseEliminatesBothBranches(t *testing.T) {
Exit("mem"))) Exit("mem")))
CheckFunc(fun.f) CheckFunc(fun.f)
fuse(fun.f) fuseAll(fun.f)
for _, b := range fun.f.Blocks { for _, b := range fun.f.Blocks {
if b == fun.blocks["then"] && b.Kind != BlockInvalid { if b == fun.blocks["then"] && b.Kind != BlockInvalid {
@ -90,7 +90,7 @@ func TestFuseHandlesPhis(t *testing.T) {
Exit("mem"))) Exit("mem")))
CheckFunc(fun.f) CheckFunc(fun.f)
fuse(fun.f) fuseAll(fun.f)
for _, b := range fun.f.Blocks { for _, b := range fun.f.Blocks {
if b == fun.blocks["then"] && b.Kind != BlockInvalid { if b == fun.blocks["then"] && b.Kind != BlockInvalid {
@ -122,7 +122,7 @@ func TestFuseEliminatesEmptyBlocks(t *testing.T) {
)) ))
CheckFunc(fun.f) CheckFunc(fun.f)
fuse(fun.f) fuseAll(fun.f)
for k, b := range fun.blocks { for k, b := range fun.blocks {
if k[:1] == "z" && b.Kind != BlockInvalid { if k[:1] == "z" && b.Kind != BlockInvalid {
@ -162,7 +162,7 @@ func BenchmarkFuse(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
fun := c.Fun("entry", blocks...) fun := c.Fun("entry", blocks...)
fuse(fun.f) fuseAll(fun.f)
} }
}) })
} }

View file

@ -17,14 +17,17 @@
(Mul(32|64)F x y) -> (MULS(S|D) x y) (Mul(32|64)F x y) -> (MULS(S|D) x y)
(Mul32uhilo x y) -> (MULLQU x y) (Mul32uhilo x y) -> (MULLQU x y)
(Select0 (Mul32uover x y)) -> (Select0 <typ.UInt32> (MULLU x y))
(Select1 (Mul32uover x y)) -> (SETO (Select1 <types.TypeFlags> (MULLU x y)))
(Avg32u x y) -> (AVGLU x y) (Avg32u x y) -> (AVGLU x y)
(Div32F x y) -> (DIVSS x y) (Div32F x y) -> (DIVSS x y)
(Div64F x y) -> (DIVSD x y) (Div64F x y) -> (DIVSD x y)
(Div32 x y) -> (DIVL x y) (Div32 [a] x y) -> (DIVL [a] x y)
(Div32u x y) -> (DIVLU x y) (Div32u x y) -> (DIVLU x y)
(Div16 x y) -> (DIVW x y) (Div16 [a] x y) -> (DIVW [a] x y)
(Div16u x y) -> (DIVWU x y) (Div16u x y) -> (DIVWU x y)
(Div8 x y) -> (DIVW (SignExt8to16 x) (SignExt8to16 y)) (Div8 x y) -> (DIVW (SignExt8to16 x) (SignExt8to16 y))
(Div8u x y) -> (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)) (Div8u x y) -> (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
@ -32,9 +35,9 @@
(Hmul32 x y) -> (HMULL x y) (Hmul32 x y) -> (HMULL x y)
(Hmul32u x y) -> (HMULLU x y) (Hmul32u x y) -> (HMULLU x y)
(Mod32 x y) -> (MODL x y) (Mod32 [a] x y) -> (MODL [a] x y)
(Mod32u x y) -> (MODLU x y) (Mod32u x y) -> (MODLU x y)
(Mod16 x y) -> (MODW x y) (Mod16 [a] x y) -> (MODW [a] x y)
(Mod16u x y) -> (MODWU x y) (Mod16u x y) -> (MODWU x y)
(Mod8 x y) -> (MODW (SignExt8to16 x) (SignExt8to16 y)) (Mod8 x y) -> (MODW (SignExt8to16 x) (SignExt8to16 y))
(Mod8u x y) -> (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y)) (Mod8u x y) -> (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
@ -369,6 +372,7 @@
(If (SETBE cmp) yes no) -> (ULE cmp yes no) (If (SETBE cmp) yes no) -> (ULE cmp yes no)
(If (SETA cmp) yes no) -> (UGT cmp yes no) (If (SETA cmp) yes no) -> (UGT cmp yes no)
(If (SETAE cmp) yes no) -> (UGE cmp yes no) (If (SETAE cmp) yes no) -> (UGE cmp yes no)
(If (SETO cmp) yes no) -> (OS cmp yes no)
// Special case for floating point - LF/LEF not generated // Special case for floating point - LF/LEF not generated
(If (SETGF cmp) yes no) -> (UGT cmp yes no) (If (SETGF cmp) yes no) -> (UGT cmp yes no)
@ -398,6 +402,7 @@
(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) -> (ULE cmp yes no) (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) -> (ULE cmp yes no)
(NE (TESTB (SETA cmp) (SETA cmp)) yes no) -> (UGT cmp yes no) (NE (TESTB (SETA cmp) (SETA cmp)) yes no) -> (UGT cmp yes no)
(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) -> (UGE cmp yes no) (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) -> (UGE cmp yes no)
(NE (TESTB (SETO cmp) (SETO cmp)) yes no) -> (OS cmp yes no)
// Special case for floating point - LF/LEF not generated // Special case for floating point - LF/LEF not generated
(NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) -> (UGT cmp yes no) (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) -> (UGT cmp yes no)
@ -614,38 +619,39 @@
(MOVWLSX (ANDLconst [c] x)) && c & 0x8000 == 0 -> (ANDLconst [c & 0x7fff] x) (MOVWLSX (ANDLconst [c] x)) && c & 0x8000 == 0 -> (ANDLconst [c & 0x7fff] x)
// Don't extend before storing // Don't extend before storing
(MOVWstore [off] {sym} ptr (MOVWLSX x) mem) -> (MOVWstore [off] {sym} ptr x mem) (MOVWstore [off] {sym} ptr (MOVWL(S|Z)X x) mem) -> (MOVWstore [off] {sym} ptr x mem)
(MOVBstore [off] {sym} ptr (MOVBLSX x) mem) -> (MOVBstore [off] {sym} ptr x mem) (MOVBstore [off] {sym} ptr (MOVBL(S|Z)X x) mem) -> (MOVBstore [off] {sym} ptr x mem)
(MOVWstore [off] {sym} ptr (MOVWLZX x) mem) -> (MOVWstore [off] {sym} ptr x mem)
(MOVBstore [off] {sym} ptr (MOVBLZX x) mem) -> (MOVBstore [off] {sym} ptr x mem)
// fold constants into memory operations // fold constants into memory operations
// Note that this is not always a good idea because if not all the uses of // Note that this is not always a good idea because if not all the uses of
// the ADDQconst get eliminated, we still have to compute the ADDQconst and we now // the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
// have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one. // have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
// Nevertheless, let's do it! // Nevertheless, let's do it!
(MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload [off1+off2] {sym} ptr mem) (MOV(L|W|B|SS|SD)load [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOV(L|W|B|SS|SD)load [off1+off2] {sym} ptr mem)
(MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload [off1+off2] {sym} ptr mem) (MOV(L|W|B|SS|SD)store [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOV(L|W|B|SS|SD)store [off1+off2] {sym} ptr val mem)
(MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload [off1+off2] {sym} ptr mem)
(MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSSload [off1+off2] {sym} ptr mem)
(MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSDload [off1+off2] {sym} ptr mem)
(MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore [off1+off2] {sym} ptr val mem)
(MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem)
(MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem)
(MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSSstore [off1+off2] {sym} ptr val mem)
(MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSDstore [off1+off2] {sym} ptr val mem)
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(off1+off2) -> ((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {sym} val base mem) ((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {sym} val base mem)
((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem) && is32Bit(off1+off2) ->
((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1+off2] {sym} val base idx mem)
((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) && is32Bit(off1+off2*4) ->
((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1+off2*4] {sym} val base idx mem)
((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(off1+off2) -> ((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem) ((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem)
((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(off1+off2) -> ((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem) ((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem)
((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDLconst [off2] base) val mem) && is32Bit(off1+off2) -> ((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDLconst [off2] base) val mem) && is32Bit(off1+off2) ->
((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem) ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem)
((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem) && is32Bit(off1+off2) ->
((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off1+off2] {sym} base idx val mem)
((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem) && is32Bit(off1+off2*4) ->
((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off1+off2*4] {sym} base idx val mem)
((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) -> ((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
((ADD|AND|OR|XOR)Lconstmodifyidx4 [valoff1] {sym} (ADDLconst [off2] base) idx mem) && ValAndOff(valoff1).canAdd(off2) ->
((ADD|AND|OR|XOR)Lconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {sym} base idx mem)
((ADD|AND|OR|XOR)Lconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem) && ValAndOff(valoff1).canAdd(off2*4) ->
((ADD|AND|OR|XOR)Lconstmodifyidx4 [ValAndOff(valoff1).add(off2*4)] {sym} base idx mem)
// Fold constants into stores. // Fold constants into stores.
(MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) -> (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
@ -656,12 +662,8 @@
(MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
// Fold address offsets into constant stores. // Fold address offsets into constant stores.
(MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> (MOV(L|W|B)storeconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
(MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) (MOV(L|W|B)storeconst [ValAndOff(sc).add(off)] {s} ptr mem)
(MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
(MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
(MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
(MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
// We need to fold LEAQ into the MOVx ops so that the live variable analysis knows // We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
// what variables are being read/written by the ops. // what variables are being read/written by the ops.
@ -671,97 +673,43 @@
// a separate instruction gives us that register. Having the LEAL be // a separate instruction gives us that register. Having the LEAL be
// a separate instruction also allows it to be CSEd (which is good because // a separate instruction also allows it to be CSEd (which is good because
// it compiles to a thunk call). // it compiles to a thunk call).
(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) (MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
&& (base.Op != OpSB || !config.ctxt.Flag_shared) -> && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
(MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) (MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
&& (base.Op != OpSB || !config.ctxt.Flag_shared) ->
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
&& (base.Op != OpSB || !config.ctxt.Flag_shared) ->
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
&& (base.Op != OpSB || !config.ctxt.Flag_shared) ->
(MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
&& (base.Op != OpSB || !config.ctxt.Flag_shared) ->
(MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVBLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) (MOV(L|W|B|SS|SD)store [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
&& (base.Op != OpSB || !config.ctxt.Flag_shared) -> && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
(MOVBLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) (MOV(L|W|B|SS|SD)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
&& (base.Op != OpSB || !config.ctxt.Flag_shared) ->
(MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) (MOV(L|W|B)storeconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
&& (base.Op != OpSB || !config.ctxt.Flag_shared) ->
(MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
&& (base.Op != OpSB || !config.ctxt.Flag_shared) ->
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
&& (base.Op != OpSB || !config.ctxt.Flag_shared) ->
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
&& (base.Op != OpSB || !config.ctxt.Flag_shared) ->
(MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
&& (base.Op != OpSB || !config.ctxt.Flag_shared) ->
(MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> && (ptr.Op != OpSB || !config.ctxt.Flag_shared) ->
(MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) (MOV(L|W|B)storeconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
(MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) ->
(MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
(MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) ->
(MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
// generating indexed loads and stores // generating indexed loads and stores
(MOVBload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOV(B|W|L|SS|SD)load [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) (MOV(B|W|L|SS|SD)loadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVWload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVWload [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVWload [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVLload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOV(L|SS)load [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) (MOV(L|SS)loadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVLload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVSSload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVSSload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVSDload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVSDload [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVSDload [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVBstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOV(B|W|L|SS|SD)store [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) (MOV(B|W|L|SS|SD)storeidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVWstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVWstore [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVWstore [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVLstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOV(L|SS)store [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) (MOV(L|SS)storeidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVLstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVSSstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVSSstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVSDstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
(MOVSDstore [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVSDstore [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) ((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) -> && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem) ((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem)
((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) ((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) -> && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) ((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
@ -771,35 +719,27 @@
((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) ((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) -> && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off1+off2] {mergeSym(sym1,sym2)} base idx val mem)
((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem) ((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
&& ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) -> && ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
((ADD|AND|OR|XOR)Lconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem)
&& ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
((ADD|AND|OR|XOR)Lconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base idx mem)
(MOVBload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVBloadidx1 [off] {sym} ptr idx mem) (MOV(B|W|L|SS|SD)load [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOV(B|W|L|SS|SD)loadidx1 [off] {sym} ptr idx mem)
(MOVWload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVWloadidx1 [off] {sym} ptr idx mem) (MOV(B|W|L|SS|SD)store [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOV(B|W|L|SS|SD)storeidx1 [off] {sym} ptr idx val mem)
(MOVLload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVLloadidx1 [off] {sym} ptr idx mem)
(MOVSSload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVSSloadidx1 [off] {sym} ptr idx mem)
(MOVSDload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVSDloadidx1 [off] {sym} ptr idx mem)
(MOVBstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx1 [off] {sym} ptr idx val mem)
(MOVWstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVWstoreidx1 [off] {sym} ptr idx val mem)
(MOVLstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVLstoreidx1 [off] {sym} ptr idx val mem)
(MOVSSstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVSSstoreidx1 [off] {sym} ptr idx val mem)
(MOVSDstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVSDstoreidx1 [off] {sym} ptr idx val mem)
(MOVBstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> (MOV(B|W|L)storeconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) (MOV(B|W|L)storeconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVWstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVWstoreconst [x] {sym1} (LEAL2 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> (MOVWstoreconst [x] {sym1} (LEAL2 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVLstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVLstoreconst [x] {sym1} (LEAL4 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> (MOVLstoreconst [x] {sym1} (LEAL4 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
(MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
(MOVBstoreconst [x] {sym} (ADDL ptr idx) mem) -> (MOVBstoreconstidx1 [x] {sym} ptr idx mem) (MOV(B|W|L)storeconst [x] {sym} (ADDL ptr idx) mem) -> (MOV(B|W|L)storeconstidx1 [x] {sym} ptr idx mem)
(MOVWstoreconst [x] {sym} (ADDL ptr idx) mem) -> (MOVWstoreconstidx1 [x] {sym} ptr idx mem)
(MOVLstoreconst [x] {sym} (ADDL ptr idx) mem) -> (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
// combine SHLL into indexed loads and stores // combine SHLL into indexed loads and stores
(MOVWloadidx1 [c] {sym} ptr (SHLLconst [1] idx) mem) -> (MOVWloadidx2 [c] {sym} ptr idx mem) (MOVWloadidx1 [c] {sym} ptr (SHLLconst [1] idx) mem) -> (MOVWloadidx2 [c] {sym} ptr idx mem)
@ -810,76 +750,64 @@
(MOVLstoreconstidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) -> (MOVLstoreconstidx4 [c] {sym} ptr idx mem) (MOVLstoreconstidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) -> (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
// combine ADDL into indexed loads and stores // combine ADDL into indexed loads and stores
(MOVBloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) (MOV(B|W|L|SS|SD)loadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOV(B|W|L|SS|SD)loadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
(MOVWloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVWloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
(MOVWloadidx2 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVWloadidx2 [int64(int32(c+d))] {sym} ptr idx mem) (MOVWloadidx2 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVWloadidx2 [int64(int32(c+d))] {sym} ptr idx mem)
(MOVLloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) (MOV(L|SS)loadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOV(L|SS)loadidx4 [int64(int32(c+d))] {sym} ptr idx mem)
(MOVLloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVLloadidx4 [int64(int32(c+d))] {sym} ptr idx mem)
(MOVSSloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSSloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
(MOVSSloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSSloadidx4 [int64(int32(c+d))] {sym} ptr idx mem)
(MOVSDloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSDloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
(MOVSDloadidx8 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSDloadidx8 [int64(int32(c+d))] {sym} ptr idx mem) (MOVSDloadidx8 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSDloadidx8 [int64(int32(c+d))] {sym} ptr idx mem)
(MOVBstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) (MOV(B|W|L|SS|SD)storeidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOV(B|W|L|SS|SD)storeidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
(MOVWstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVWstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
(MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVWstoreidx2 [int64(int32(c+d))] {sym} ptr idx val mem) (MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVWstoreidx2 [int64(int32(c+d))] {sym} ptr idx val mem)
(MOVLstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) (MOV(L|SS)storeidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOV(L|SS)storeidx4 [int64(int32(c+d))] {sym} ptr idx val mem)
(MOVLstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVLstoreidx4 [int64(int32(c+d))] {sym} ptr idx val mem)
(MOVSSstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSSstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
(MOVSSstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSSstoreidx4 [int64(int32(c+d))] {sym} ptr idx val mem)
(MOVSDstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSDstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
(MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSDstoreidx8 [int64(int32(c+d))] {sym} ptr idx val mem) (MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSDstoreidx8 [int64(int32(c+d))] {sym} ptr idx val mem)
(MOVBloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) (MOV(B|W|L|SS|SD)loadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOV(B|W|L|SS|SD)loadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
(MOVWloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVWloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
(MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVWloadidx2 [int64(int32(c+2*d))] {sym} ptr idx mem) (MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVWloadidx2 [int64(int32(c+2*d))] {sym} ptr idx mem)
(MOVLloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) (MOV(L|SS)loadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOV(L|SS)loadidx4 [int64(int32(c+4*d))] {sym} ptr idx mem)
(MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVLloadidx4 [int64(int32(c+4*d))] {sym} ptr idx mem)
(MOVSSloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSSloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
(MOVSSloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSSloadidx4 [int64(int32(c+4*d))] {sym} ptr idx mem)
(MOVSDloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSDloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
(MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSDloadidx8 [int64(int32(c+8*d))] {sym} ptr idx mem) (MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSDloadidx8 [int64(int32(c+8*d))] {sym} ptr idx mem)
(MOVBstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) (MOV(B|W|L|SS|SD)storeidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOV(B|W|L|SS|SD)storeidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
(MOVWstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVWstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
(MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVWstoreidx2 [int64(int32(c+2*d))] {sym} ptr idx val mem) (MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVWstoreidx2 [int64(int32(c+2*d))] {sym} ptr idx val mem)
(MOVLstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) (MOV(L|SS)storeidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOV(L|SS)storeidx4 [int64(int32(c+4*d))] {sym} ptr idx val mem)
(MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVLstoreidx4 [int64(int32(c+4*d))] {sym} ptr idx val mem)
(MOVSSstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSSstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
(MOVSSstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSSstoreidx4 [int64(int32(c+4*d))] {sym} ptr idx val mem)
(MOVSDstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSDstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
(MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSDstoreidx8 [int64(int32(c+8*d))] {sym} ptr idx val mem) (MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSDstoreidx8 [int64(int32(c+8*d))] {sym} ptr idx val mem)
// Merge load/store to op // Merge load/store to op
((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|AND|OR|XOR|SUB|MUL)Lload x [off] {sym} ptr mem) ((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|AND|OR|XOR|SUB|MUL)Lload x [off] {sym} ptr mem)
((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) && canMergeLoad(v, l, x) && clobber(l) ->
((ADD|AND|OR|XOR|SUB|MUL)Lloadidx4 x [off] {sym} ptr idx mem)
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem)
((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && !config.use387 && clobber(l) -> ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem) ((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && !config.use387 && clobber(l) -> ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && !config.use387 && clobber(l) -> ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem) ((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && !config.use387 && clobber(l) -> ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) -> ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) (MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) -> ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) -> (MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) ->
((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) ((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
(MOVLstoreidx4 {sym} [off] ptr idx y:((ADD|AND|OR|XOR)Lloadidx4 x [off] {sym} ptr idx mem) mem) && y.Uses==1 && clobber(y) ->
((ADD|AND|OR|XOR)Lmodifyidx4 [off] {sym} ptr idx x mem)
(MOVLstoreidx4 {sym} [off] ptr idx y:((ADD|SUB|AND|OR|XOR)L l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) ->
((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off] {sym} ptr idx x mem)
(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) (MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
&& y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) && validValAndOff(c,off) -> && y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) && validValAndOff(c,off) ->
((ADD|AND|OR|XOR)Lconstmodify [makeValAndOff(c,off)] {sym} ptr mem) ((ADD|AND|OR|XOR)Lconstmodify [makeValAndOff(c,off)] {sym} ptr mem)
(MOVLstoreidx4 {sym} [off] ptr idx y:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem)
&& y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) && validValAndOff(c,off) ->
((ADD|AND|OR|XOR)Lconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
((ADD|AND|OR|XOR)Lmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) && validValAndOff(c,off) ->
((ADD|AND|OR|XOR)Lconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
(SUBLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) && validValAndOff(-c,off) ->
(ADDLconstmodifyidx4 [makeValAndOff(-c,off)] {sym} ptr idx mem)
(MOVBstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) -> (MOV(B|W|L)storeconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
(MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) (MOV(B|W|L)storeconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
(MOVWstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
(MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
(MOVWstoreconstidx2 [x] {sym} (ADDLconst [c] ptr) idx mem) -> (MOVWstoreconstidx2 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
(MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
(MOVLstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
(MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
(MOVLstoreconstidx4 [x] {sym} (ADDLconst [c] ptr) idx mem) -> (MOVLstoreconstidx4 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
(MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
(MOVBstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) -> (MOV(B|W|L)storeconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
(MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) (MOV(B|W|L)storeconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
(MOVWstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
(MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
(MOVWstoreconstidx2 [x] {sym} ptr (ADDLconst [c] idx) mem) -> (MOVWstoreconstidx2 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
(MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
(MOVLstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
(MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
(MOVLstoreconstidx4 [x] {sym} ptr (ADDLconst [c] idx) mem) -> (MOVLstoreconstidx4 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
(MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
@ -1199,11 +1127,21 @@
&& ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
&& clobber(x) && clobber(x)
-> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) -> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
(MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
&& x.Uses == 1
&& ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
&& clobber(x)
-> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
&& x.Uses == 1 && x.Uses == 1
&& ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
&& clobber(x) && clobber(x)
-> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) -> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
(MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
&& x.Uses == 1
&& ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
&& clobber(x)
-> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
(MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
&& x.Uses == 1 && x.Uses == 1
@ -1223,10 +1161,14 @@
-> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLLconst <i.Type> [1] i) mem) -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLLconst <i.Type> [1] i) mem)
// Combine stores into larger (unaligned) stores. // Combine stores into larger (unaligned) stores.
(MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) (MOVBstore [i] {s} p (SHR(W|L)const [8] w) x:(MOVBstore [i-1] {s} p w mem))
&& x.Uses == 1 && x.Uses == 1
&& clobber(x) && clobber(x)
-> (MOVWstore [i-1] {s} p w mem) -> (MOVWstore [i-1] {s} p w mem)
(MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHR(W|L)const [8] w) mem))
&& x.Uses == 1
&& clobber(x)
-> (MOVWstore [i] {s} p w mem)
(MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem)) (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
&& x.Uses == 1 && x.Uses == 1
&& clobber(x) && clobber(x)
@ -1240,10 +1182,14 @@
&& clobber(x) && clobber(x)
-> (MOVLstore [i-2] {s} p w0 mem) -> (MOVLstore [i-2] {s} p w0 mem)
(MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) (MOVBstoreidx1 [i] {s} p idx (SHR(L|W)const [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
&& x.Uses == 1 && x.Uses == 1
&& clobber(x) && clobber(x)
-> (MOVWstoreidx1 [i-1] {s} p idx w mem) -> (MOVWstoreidx1 [i-1] {s} p idx w mem)
(MOVBstoreidx1 [i] {s} p idx w x:(MOVBstoreidx1 [i+1] {s} p idx (SHR(L|W)const [8] w) mem))
&& x.Uses == 1
&& clobber(x)
-> (MOVWstoreidx1 [i] {s} p idx w mem)
(MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem)) (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem))
&& x.Uses == 1 && x.Uses == 1
&& clobber(x) && clobber(x)
@ -1283,3 +1229,7 @@
(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int32(c)),off) -> (CMPLconstload {sym} [makeValAndOff(int64(int32(c)),off)] ptr mem) (CMPLload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int32(c)),off) -> (CMPLconstload {sym} [makeValAndOff(int64(int32(c)),off)] ptr mem)
(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int16(c)),off) -> (CMPWconstload {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem) (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int16(c)),off) -> (CMPWconstload {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem)
(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int8(c)),off) -> (CMPBconstload {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem) (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int8(c)),off) -> (CMPBconstload {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem)
(MOVBload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVLconst [int64(read8(sym, off))])
(MOVWload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVLconst [int64(read16(sym, off, config.BigEndian))])
(MOVLload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVLconst [int64(int32(read32(sym, off, config.BigEndian)))])

View file

@ -129,6 +129,7 @@ func init() {
gpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: gponly} gpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: gponly}
gp21load = regInfo{inputs: []regMask{gp, gpspsb, 0}, outputs: gponly} gp21load = regInfo{inputs: []regMask{gp, gpspsb, 0}, outputs: gponly}
gploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: gponly} gploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: gponly}
gp21loadidx = regInfo{inputs: []regMask{gp, gpspsb, gpsp, 0}, outputs: gponly}
gpstore = regInfo{inputs: []regMask{gpspsb, gpsp, 0}} gpstore = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
gpstoreconst = regInfo{inputs: []regMask{gpspsb, 0}} gpstoreconst = regInfo{inputs: []regMask{gpspsb, 0}}
@ -206,6 +207,8 @@ func init() {
{name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1 {name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
{name: "MULLconst", argLength: 1, reg: gp11, asm: "IMUL3L", aux: "Int32", clobberFlags: true}, // arg0 * auxint {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMUL3L", aux: "Int32", clobberFlags: true}, // arg0 * auxint
{name: "MULLU", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{ax, 0}, clobbers: dx}, typ: "(UInt32,Flags)", asm: "MULL", commutative: true, clobberFlags: true}, // Let x = arg0*arg1 (full 32x32->64 unsigned multiply). Returns uint32(x), and flags set to overflow if uint32(x) != x.
{name: "HMULL", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width {name: "HMULL", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width
{name: "HMULLU", argLength: 2, reg: gp21hmul, commutative: true, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width {name: "HMULLU", argLength: 2, reg: gp21hmul, commutative: true, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width
@ -213,13 +216,14 @@ func init() {
{name: "AVGLU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 + arg1) / 2 as unsigned, all 32 result bits {name: "AVGLU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 + arg1) / 2 as unsigned, all 32 result bits
{name: "DIVL", argLength: 2, reg: gp11div, asm: "IDIVL", clobberFlags: true}, // arg0 / arg1 // For DIVL, DIVW, MODL and MODW, AuxInt non-zero means that the divisor has been proved to be not -1.
{name: "DIVW", argLength: 2, reg: gp11div, asm: "IDIVW", clobberFlags: true}, // arg0 / arg1 {name: "DIVL", argLength: 2, reg: gp11div, asm: "IDIVL", aux: "Bool", clobberFlags: true}, // arg0 / arg1
{name: "DIVW", argLength: 2, reg: gp11div, asm: "IDIVW", aux: "Bool", clobberFlags: true}, // arg0 / arg1
{name: "DIVLU", argLength: 2, reg: gp11div, asm: "DIVL", clobberFlags: true}, // arg0 / arg1 {name: "DIVLU", argLength: 2, reg: gp11div, asm: "DIVL", clobberFlags: true}, // arg0 / arg1
{name: "DIVWU", argLength: 2, reg: gp11div, asm: "DIVW", clobberFlags: true}, // arg0 / arg1 {name: "DIVWU", argLength: 2, reg: gp11div, asm: "DIVW", clobberFlags: true}, // arg0 / arg1
{name: "MODL", argLength: 2, reg: gp11mod, asm: "IDIVL", clobberFlags: true}, // arg0 % arg1 {name: "MODL", argLength: 2, reg: gp11mod, asm: "IDIVL", aux: "Bool", clobberFlags: true}, // arg0 % arg1
{name: "MODW", argLength: 2, reg: gp11mod, asm: "IDIVW", clobberFlags: true}, // arg0 % arg1 {name: "MODW", argLength: 2, reg: gp11mod, asm: "IDIVW", aux: "Bool", clobberFlags: true}, // arg0 % arg1
{name: "MODLU", argLength: 2, reg: gp11mod, asm: "DIVL", clobberFlags: true}, // arg0 % arg1 {name: "MODLU", argLength: 2, reg: gp11mod, asm: "DIVL", clobberFlags: true}, // arg0 % arg1
{name: "MODWU", argLength: 2, reg: gp11mod, asm: "DIVW", clobberFlags: true}, // arg0 % arg1 {name: "MODWU", argLength: 2, reg: gp11mod, asm: "DIVW", clobberFlags: true}, // arg0 % arg1
@ -281,6 +285,7 @@ func init() {
{name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-15 {name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-15
{name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-7 {name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-7
// binary-op with a memory source operand
{name: "ADDLload", argLength: 3, reg: gp21load, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem {name: "ADDLload", argLength: 3, reg: gp21load, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
{name: "SUBLload", argLength: 3, reg: gp21load, asm: "SUBL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem {name: "SUBLload", argLength: 3, reg: gp21load, asm: "SUBL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
{name: "MULLload", argLength: 3, reg: gp21load, asm: "IMULL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem {name: "MULLload", argLength: 3, reg: gp21load, asm: "IMULL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
@ -288,6 +293,14 @@ func init() {
{name: "ORLload", argLength: 3, reg: gp21load, asm: "ORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+auxint+aux, arg2 = mem {name: "ORLload", argLength: 3, reg: gp21load, asm: "ORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
{name: "XORLload", argLength: 3, reg: gp21load, asm: "XORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+auxint+aux, arg2 = mem {name: "XORLload", argLength: 3, reg: gp21load, asm: "XORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
// binary-op with an indexed memory source operand
{name: "ADDLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
{name: "SUBLloadidx4", argLength: 4, reg: gp21loadidx, asm: "SUBL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
{name: "MULLloadidx4", argLength: 4, reg: gp21loadidx, asm: "IMULL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
{name: "ANDLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ANDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
{name: "ORLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
{name: "XORLloadidx4", argLength: 4, reg: gp21loadidx, asm: "XORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
// unary ops // unary ops
{name: "NEGL", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true, clobberFlags: true}, // -arg0 {name: "NEGL", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true, clobberFlags: true}, // -arg0
@ -316,6 +329,7 @@ func init() {
{name: "SETBE", argLength: 1, reg: readflags, asm: "SETLS"}, // extract unsigned <= condition from arg0 {name: "SETBE", argLength: 1, reg: readflags, asm: "SETLS"}, // extract unsigned <= condition from arg0
{name: "SETA", argLength: 1, reg: readflags, asm: "SETHI"}, // extract unsigned > condition from arg0 {name: "SETA", argLength: 1, reg: readflags, asm: "SETHI"}, // extract unsigned > condition from arg0
{name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0 {name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0
{name: "SETO", argLength: 1, reg: readflags, asm: "SETOS"}, // extract if overflow flag is set from arg0
// Need different opcodes for floating point conditions because // Need different opcodes for floating point conditions because
// any comparison involving a NaN is always FALSE and thus // any comparison involving a NaN is always FALSE and thus
// the patterns for inverting conditions cannot be used. // the patterns for inverting conditions cannot be used.
@ -367,12 +381,25 @@ func init() {
{name: "ORLmodify", argLength: 3, reg: gpstore, asm: "ORL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) |= arg1, arg2=mem {name: "ORLmodify", argLength: 3, reg: gpstore, asm: "ORL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) |= arg1, arg2=mem
{name: "XORLmodify", argLength: 3, reg: gpstore, asm: "XORL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) ^= arg1, arg2=mem {name: "XORLmodify", argLength: 3, reg: gpstore, asm: "XORL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) ^= arg1, arg2=mem
// direct binary-op on indexed memory (read-modify-write)
{name: "ADDLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ADDL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) += arg2, arg3=mem
{name: "SUBLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "SUBL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) -= arg2, arg3=mem
{name: "ANDLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ANDL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) &= arg2, arg3=mem
{name: "ORLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ORL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) |= arg2, arg3=mem
{name: "XORLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "XORL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) ^= arg2, arg3=mem
// direct binary-op on memory with a constant (read-modify-write) // direct binary-op on memory with a constant (read-modify-write)
{name: "ADDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ADDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // add ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem {name: "ADDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ADDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // add ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
{name: "ANDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ANDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem {name: "ANDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ANDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
{name: "ORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem {name: "ORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
{name: "XORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "XORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem {name: "XORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "XORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
// direct binary-op on indexed memory with a constant (read-modify-write)
{name: "ADDLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ADDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // add ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
{name: "ANDLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ANDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
{name: "ORLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
{name: "XORLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "XORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
// indexed loads/stores // indexed loads/stores
{name: "MOVBloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBLZX", aux: "SymOff", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem {name: "MOVBloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBLZX", aux: "SymOff", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem
{name: "MOVWloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem {name: "MOVWloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem
@ -530,6 +557,8 @@ func init() {
{name: "LE"}, {name: "LE"},
{name: "GT"}, {name: "GT"},
{name: "GE"}, {name: "GE"},
{name: "OS"},
{name: "OC"},
{name: "ULT"}, {name: "ULT"},
{name: "ULE"}, {name: "ULE"},
{name: "UGT"}, {name: "UGT"},

View file

@ -16,10 +16,14 @@
(Mul(64|32|16|8) x y) -> (MUL(Q|L|L|L) x y) (Mul(64|32|16|8) x y) -> (MUL(Q|L|L|L) x y)
(Mul(32|64)F x y) -> (MULS(S|D) x y) (Mul(32|64)F x y) -> (MULS(S|D) x y)
(Select0 (Mul64uover x y)) -> (Select0 <typ.UInt64> (MULQU x y))
(Select0 (Mul32uover x y)) -> (Select0 <typ.UInt32> (MULLU x y))
(Select1 (Mul(64|32)uover x y)) -> (SETO (Select1 <types.TypeFlags> (MUL(Q|L)U x y)))
(Hmul(64|32) x y) -> (HMUL(Q|L) x y) (Hmul(64|32) x y) -> (HMUL(Q|L) x y)
(Hmul(64|32)u x y) -> (HMUL(Q|L)U x y) (Hmul(64|32)u x y) -> (HMUL(Q|L)U x y)
(Div(64|32|16) x y) -> (Select0 (DIV(Q|L|W) x y)) (Div(64|32|16) [a] x y) -> (Select0 (DIV(Q|L|W) [a] x y))
(Div8 x y) -> (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) (Div8 x y) -> (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
(Div(64|32|16)u x y) -> (Select0 (DIV(Q|L|W)U x y)) (Div(64|32|16)u x y) -> (Select0 (DIV(Q|L|W)U x y))
(Div8u x y) -> (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) (Div8u x y) -> (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
@ -30,7 +34,7 @@
(Avg64u x y) -> (AVGQU x y) (Avg64u x y) -> (AVGQU x y)
(Mod(64|32|16) x y) -> (Select1 (DIV(Q|L|W) x y)) (Mod(64|32|16) [a] x y) -> (Select1 (DIV(Q|L|W) [a] x y))
(Mod8 x y) -> (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) (Mod8 x y) -> (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
(Mod(64|32|16)u x y) -> (Select1 (DIV(Q|L|W)U x y)) (Mod(64|32|16)u x y) -> (Select1 (DIV(Q|L|W)U x y))
(Mod8u x y) -> (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) (Mod8u x y) -> (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
@ -480,6 +484,7 @@
(If (SETBE cmp) yes no) -> (ULE cmp yes no) (If (SETBE cmp) yes no) -> (ULE cmp yes no)
(If (SETA cmp) yes no) -> (UGT cmp yes no) (If (SETA cmp) yes no) -> (UGT cmp yes no)
(If (SETAE cmp) yes no) -> (UGE cmp yes no) (If (SETAE cmp) yes no) -> (UGE cmp yes no)
(If (SETO cmp) yes no) -> (OS cmp yes no)
// Special case for floating point - LF/LEF not generated // Special case for floating point - LF/LEF not generated
(If (SETGF cmp) yes no) -> (UGT cmp yes no) (If (SETGF cmp) yes no) -> (UGT cmp yes no)
@ -542,6 +547,7 @@
(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) -> (ULE cmp yes no) (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) -> (ULE cmp yes no)
(NE (TESTB (SETA cmp) (SETA cmp)) yes no) -> (UGT cmp yes no) (NE (TESTB (SETA cmp) (SETA cmp)) yes no) -> (UGT cmp yes no)
(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) -> (UGE cmp yes no) (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) -> (UGE cmp yes no)
(NE (TESTB (SETO cmp) (SETO cmp)) yes no) -> (OS cmp yes no)
// Recognize bit tests: a&(1<<b) != 0 for b suitably bounded // Recognize bit tests: a&(1<<b) != 0 for b suitably bounded
// Note that BTx instructions use the carry bit, so we need to convert tests for zero flag // Note that BTx instructions use the carry bit, so we need to convert tests for zero flag
@ -1073,11 +1079,11 @@
// Fold constants into stores. // Fold constants into stores.
(MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validValAndOff(c,off) -> (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validValAndOff(c,off) ->
(MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
(MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) -> (MOVLstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) && validOff(off) ->
(MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
(MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) -> (MOVWstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) && validOff(off) ->
(MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
(MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) -> (MOVBstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) && validOff(off) ->
(MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
// Fold address offsets into constant stores. // Fold address offsets into constant stores.
@ -2119,16 +2125,31 @@
&& ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
&& clobber(x) && clobber(x)
-> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) -> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
(MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
&& x.Uses == 1
&& ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
&& clobber(x)
-> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
&& x.Uses == 1 && x.Uses == 1
&& ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
&& clobber(x) && clobber(x)
-> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) -> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
(MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
&& x.Uses == 1
&& ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
&& clobber(x)
-> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
(MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
&& x.Uses == 1 && x.Uses == 1
&& ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
&& clobber(x) && clobber(x)
-> (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) -> (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
(MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem))
&& x.Uses == 1
&& ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
&& clobber(x)
-> (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
(MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem)) (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem))
&& config.useSSE && config.useSSE
&& x.Uses == 1 && x.Uses == 1
@ -2170,6 +2191,10 @@
&& x.Uses == 1 && x.Uses == 1
&& clobber(x) && clobber(x)
-> (MOVWstore [i-1] {s} p w mem) -> (MOVWstore [i-1] {s} p w mem)
(MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHR(W|L|Q)const [8] w) mem))
&& x.Uses == 1
&& clobber(x)
-> (MOVWstore [i] {s} p w mem)
(MOVBstore [i] {s} p (SHR(L|Q)const [j] w) x:(MOVBstore [i-1] {s} p w0:(SHR(L|Q)const [j-8] w) mem)) (MOVBstore [i] {s} p (SHR(L|Q)const [j] w) x:(MOVBstore [i-1] {s} p w0:(SHR(L|Q)const [j-8] w) mem))
&& x.Uses == 1 && x.Uses == 1
&& clobber(x) && clobber(x)
@ -2487,3 +2512,8 @@
&& validValAndOff(0,off) && validValAndOff(0,off)
&& clobber(l) -> && clobber(l) ->
@l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff(0,off)] ptr mem) @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff(0,off)] ptr mem)
(MOVBload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVLconst [int64(read8(sym, off))])
(MOVWload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVLconst [int64(read16(sym, off, config.BigEndian))])
(MOVLload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVQconst [int64(read32(sym, off, config.BigEndian))])
(MOVQload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVQconst [int64(read64(sym, off, config.BigEndian))])

View file

@ -210,6 +210,9 @@ func init() {
{name: "MULQconst", argLength: 1, reg: gp11, asm: "IMUL3Q", aux: "Int32", clobberFlags: true}, // arg0 * auxint {name: "MULQconst", argLength: 1, reg: gp11, asm: "IMUL3Q", aux: "Int32", clobberFlags: true}, // arg0 * auxint
{name: "MULLconst", argLength: 1, reg: gp11, asm: "IMUL3L", aux: "Int32", clobberFlags: true}, // arg0 * auxint {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMUL3L", aux: "Int32", clobberFlags: true}, // arg0 * auxint
{name: "MULLU", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{ax, 0}, clobbers: dx}, typ: "(UInt32,Flags)", asm: "MULL", commutative: true, clobberFlags: true}, // Let x = arg0*arg1 (full 32x32->64 unsigned multiply). Returns uint32(x), and flags set to overflow if uint32(x) != x.
{name: "MULQU", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{ax, 0}, clobbers: dx}, typ: "(UInt64,Flags)", asm: "MULQ", commutative: true, clobberFlags: true}, // Let x = arg0*arg1 (full 64x64->128 unsigned multiply). Returns uint64(x), and flags set to overflow if uint64(x) != x.
{name: "HMULQ", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULQ", clobberFlags: true}, // (arg0 * arg1) >> width {name: "HMULQ", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULQ", clobberFlags: true}, // (arg0 * arg1) >> width
{name: "HMULL", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width {name: "HMULL", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width
{name: "HMULQU", argLength: 2, reg: gp21hmul, commutative: true, asm: "MULQ", clobberFlags: true}, // (arg0 * arg1) >> width {name: "HMULQU", argLength: 2, reg: gp21hmul, commutative: true, asm: "MULQ", clobberFlags: true}, // (arg0 * arg1) >> width
@ -217,9 +220,11 @@ func init() {
{name: "AVGQU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 + arg1) / 2 as unsigned, all 64 result bits {name: "AVGQU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 + arg1) / 2 as unsigned, all 64 result bits
{name: "DIVQ", argLength: 2, reg: gp11div, typ: "(Int64,Int64)", asm: "IDIVQ", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1] // For DIVQ, DIVL and DIVW, AuxInt non-zero means that the divisor has been proved to be not -1.
{name: "DIVL", argLength: 2, reg: gp11div, typ: "(Int32,Int32)", asm: "IDIVL", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1] {name: "DIVQ", argLength: 2, reg: gp11div, typ: "(Int64,Int64)", asm: "IDIVQ", aux: "Bool", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
{name: "DIVW", argLength: 2, reg: gp11div, typ: "(Int16,Int16)", asm: "IDIVW", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1] {name: "DIVL", argLength: 2, reg: gp11div, typ: "(Int32,Int32)", asm: "IDIVL", aux: "Bool", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
{name: "DIVW", argLength: 2, reg: gp11div, typ: "(Int16,Int16)", asm: "IDIVW", aux: "Bool", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
{name: "DIVQU", argLength: 2, reg: gp11div, typ: "(UInt64,UInt64)", asm: "DIVQ", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1] {name: "DIVQU", argLength: 2, reg: gp11div, typ: "(UInt64,UInt64)", asm: "DIVQ", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
{name: "DIVLU", argLength: 2, reg: gp11div, typ: "(UInt32,UInt32)", asm: "DIVL", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1] {name: "DIVLU", argLength: 2, reg: gp11div, typ: "(UInt32,UInt32)", asm: "DIVL", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
{name: "DIVWU", argLength: 2, reg: gp11div, typ: "(UInt16,UInt16)", asm: "DIVW", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1] {name: "DIVWU", argLength: 2, reg: gp11div, typ: "(UInt16,UInt16)", asm: "DIVW", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
@ -468,6 +473,7 @@ func init() {
{name: "SETBE", argLength: 1, reg: readflags, asm: "SETLS"}, // extract unsigned <= condition from arg0 {name: "SETBE", argLength: 1, reg: readflags, asm: "SETLS"}, // extract unsigned <= condition from arg0
{name: "SETA", argLength: 1, reg: readflags, asm: "SETHI"}, // extract unsigned > condition from arg0 {name: "SETA", argLength: 1, reg: readflags, asm: "SETHI"}, // extract unsigned > condition from arg0
{name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0 {name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0
{name: "SETO", argLength: 1, reg: readflags, asm: "SETOS"}, // extract if overflow flag is set from arg0
// Variants that store result to memory // Variants that store result to memory
{name: "SETEQstore", argLength: 3, reg: gpstoreconst, asm: "SETEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract == condition from arg1 to arg0+auxint+aux, arg2=mem {name: "SETEQstore", argLength: 3, reg: gpstoreconst, asm: "SETEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract == condition from arg1 to arg0+auxint+aux, arg2=mem
{name: "SETNEstore", argLength: 3, reg: gpstoreconst, asm: "SETNE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract != condition from arg1 to arg0+auxint+aux, arg2=mem {name: "SETNEstore", argLength: 3, reg: gpstoreconst, asm: "SETNE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract != condition from arg1 to arg0+auxint+aux, arg2=mem
@ -754,6 +760,8 @@ func init() {
{name: "LE"}, {name: "LE"},
{name: "GT"}, {name: "GT"},
{name: "GE"}, {name: "GE"},
{name: "OS"},
{name: "OC"},
{name: "ULT"}, {name: "ULT"},
{name: "ULE"}, {name: "ULE"},
{name: "UGT"}, {name: "UGT"},

View file

@ -1544,3 +1544,7 @@
(GE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 -> (GE (TEQshiftLLreg x y z) yes no) (GE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 -> (GE (TEQshiftLLreg x y z) yes no)
(GE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 -> (GE (TEQshiftRLreg x y z) yes no) (GE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 -> (GE (TEQshiftRLreg x y z) yes no)
(GE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 -> (GE (TEQshiftRAreg x y z) yes no) (GE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 -> (GE (TEQshiftRAreg x y z) yes no)
(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVWconst [int64(read8(sym, off))])
(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVWconst [int64(read16(sym, off, config.BigEndian))])
(MOVWload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVWconst [int64(int32(read32(sym, off, config.BigEndian)))])

View file

@ -2957,3 +2957,8 @@
(FSUBD a (FNMULD x y)) -> (FMADDD a x y) (FSUBD a (FNMULD x y)) -> (FMADDD a x y)
(FSUBS (FNMULS x y) a) -> (FNMADDS a x y) (FSUBS (FNMULS x y) a) -> (FNMADDS a x y)
(FSUBD (FNMULD x y) a) -> (FNMADDD a x y) (FSUBD (FNMULD x y) a) -> (FNMADDD a x y)
(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVDconst [int64(read8(sym, off))])
(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVDconst [int64(read16(sym, off, config.BigEndian))])
(MOVWUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVDconst [int64(read32(sym, off, config.BigEndian))])
(MOVDload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVDconst [int64(read64(sym, off, config.BigEndian))])

View file

@ -297,6 +297,8 @@
(Ctz64 x) -> (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x)) (Ctz64 x) -> (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
(Ctz32 x) -> (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x))) (Ctz32 x) -> (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
(Ctz16 x) -> (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x)))
(Ctz8 x) -> (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x)))
(BitLen64 x) -> (SUB (MOVDconst [64]) (CNTLZD <typ.Int> x)) (BitLen64 x) -> (SUB (MOVDconst [64]) (CNTLZD <typ.Int> x))
(BitLen32 x) -> (SUB (MOVDconst [32]) (CNTLZW <typ.Int> x)) (BitLen32 x) -> (SUB (MOVDconst [32]) (CNTLZW <typ.Int> x))
@ -304,7 +306,7 @@
(PopCount64 x) -> (POPCNTD x) (PopCount64 x) -> (POPCNTD x)
(PopCount32 x) -> (POPCNTW (MOVWZreg x)) (PopCount32 x) -> (POPCNTW (MOVWZreg x))
(PopCount16 x) -> (POPCNTW (MOVHZreg x)) (PopCount16 x) -> (POPCNTW (MOVHZreg x))
(PopCount8 x) -> (POPCNTB (MOVBreg x)) (PopCount8 x) -> (POPCNTB (MOVBZreg x))
(And(64|32|16|8) x y) -> (AND x y) (And(64|32|16|8) x y) -> (AND x y)
(Or(64|32|16|8) x y) -> (OR x y) (Or(64|32|16|8) x y) -> (OR x y)
@ -894,16 +896,19 @@
(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
// atomic intrinsics // atomic intrinsics
(AtomicLoad(32|64|Ptr) ptr mem) -> (LoweredAtomicLoad(32|64|Ptr) ptr mem) (AtomicLoad(32|64|Ptr) ptr mem) -> (LoweredAtomicLoad(32|64|Ptr) [1] ptr mem)
(AtomicLoadAcq32 ptr mem) -> (LoweredAtomicLoad32 [0] ptr mem)
(AtomicStore(32|64) ptr val mem) -> (LoweredAtomicStore(32|64) ptr val mem) (AtomicStore(32|64) ptr val mem) -> (LoweredAtomicStore(32|64) [1] ptr val mem)
(AtomicStoreRel32 ptr val mem) -> (LoweredAtomicStore32 [0] ptr val mem)
//(AtomicStorePtrNoWB ptr val mem) -> (STLR ptr val mem) //(AtomicStorePtrNoWB ptr val mem) -> (STLR ptr val mem)
(AtomicExchange(32|64) ptr val mem) -> (LoweredAtomicExchange(32|64) ptr val mem) (AtomicExchange(32|64) ptr val mem) -> (LoweredAtomicExchange(32|64) ptr val mem)
(AtomicAdd(32|64) ptr val mem) -> (LoweredAtomicAdd(32|64) ptr val mem) (AtomicAdd(32|64) ptr val mem) -> (LoweredAtomicAdd(32|64) ptr val mem)
(AtomicCompareAndSwap(32|64) ptr old new_ mem) -> (LoweredAtomicCas(32|64) ptr old new_ mem) (AtomicCompareAndSwap(32|64) ptr old new_ mem) -> (LoweredAtomicCas(32|64) [1] ptr old new_ mem)
(AtomicCompareAndSwapRel32 ptr old new_ mem) -> (LoweredAtomicCas32 [0] ptr old new_ mem)
(AtomicAnd8 ptr val mem) -> (LoweredAtomicAnd8 ptr val mem) (AtomicAnd8 ptr val mem) -> (LoweredAtomicAnd8 ptr val mem)
(AtomicOr8 ptr val mem) -> (LoweredAtomicOr8 ptr val mem) (AtomicOr8 ptr val mem) -> (LoweredAtomicOr8 ptr val mem)
@ -956,7 +961,7 @@
(MOVWZreg (MOVDconst [c])) -> (MOVDconst [int64(uint32(c))]) (MOVWZreg (MOVDconst [c])) -> (MOVDconst [int64(uint32(c))])
// Lose widening ops fed to to stores // Lose widening ops fed to stores
(MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) -> (MOVBstore [off] {sym} ptr x mem) (MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
(MOVHstore [off] {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) -> (MOVHstore [off] {sym} ptr x mem) (MOVHstore [off] {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
(MOVWstore [off] {sym} ptr (MOV(W|WZ)reg x) mem) -> (MOVWstore [off] {sym} ptr x mem) (MOVWstore [off] {sym} ptr (MOV(W|WZ)reg x) mem) -> (MOVWstore [off] {sym} ptr x mem)

View file

@ -470,12 +470,12 @@ func init() {
faultOnNilArg1: true, faultOnNilArg1: true,
}, },
{name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, typ: "Mem", faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, typ: "Mem", faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, typ: "UInt32", clobberFlags: true, faultOnNilArg0: true}, {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, typ: "UInt32", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
{name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, typ: "Int64", clobberFlags: true, faultOnNilArg0: true}, {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
{name: "LoweredAtomicLoadPtr", argLength: 2, reg: gpload, typ: "Int64", clobberFlags: true, faultOnNilArg0: true}, {name: "LoweredAtomicLoadPtr", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
// atomic add32, 64 // atomic add32, 64
// SYNC // SYNC
@ -516,8 +516,8 @@ func init() {
// BNE -4(PC) // BNE -4(PC)
// CBNZ Rtmp, -4(PC) // CBNZ Rtmp, -4(PC)
// CSET EQ, Rout // CSET EQ, Rout
{name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
// atomic 8 and/or. // atomic 8 and/or.
// *arg0 &= (|=) arg1. arg2=mem. returns memory. auxint must be zero. // *arg0 &= (|=) arg1. arg2=mem. returns memory. auxint must be zero.

View file

@ -816,7 +816,7 @@
// Decomposing StringMake and lowering of StringPtr and StringLen // Decomposing StringMake and lowering of StringPtr and StringLen
// happens in a later pass, dec, so that these operations are available // happens in a later pass, dec, so that these operations are available
// to other passes for optimizations. // to other passes for optimizations.
(StringPtr (StringMake (Const64 <t> [c]) _)) -> (Const64 <t> [c]) (StringPtr (StringMake (Addr <t> {s} base) _)) -> (Addr <t> {s} base)
(StringLen (StringMake _ (Const64 <t> [c]))) -> (Const64 <t> [c]) (StringLen (StringMake _ (Const64 <t> [c]))) -> (Const64 <t> [c])
(ConstString {s}) && config.PtrSize == 4 && s.(string) == "" -> (ConstString {s}) && config.PtrSize == 4 && s.(string) == "" ->
(StringMake (ConstNil) (Const32 <typ.Int> [0])) (StringMake (ConstNil) (Const32 <typ.Int> [0]))
@ -1799,3 +1799,17 @@
(Zero {t1} [n] dst mem))))) (Zero {t1} [n] dst mem)))))
(StaticCall {sym} x) && needRaceCleanup(sym,v) -> x (StaticCall {sym} x) && needRaceCleanup(sym,v) -> x
// Collapse moving A -> B -> C into just A -> C.
// Later passes (deadstore, elim unread auto) will remove the A -> B move, if possible.
// This happens most commonly when B is an autotmp inserted earlier
// during compilation to ensure correctness.
(Move {t1} [s1] dst tmp1 midmem:(Move {t2} [s2] tmp2 src _))
&& s1 == s2
&& t1.(*types.Type).Compare(t2.(*types.Type)) == types.CMPeq
&& isSamePtr(tmp1, tmp2)
-> (Move {t1} [s1] dst src midmem)
// Elide self-moves. This only happens rarely (e.g test/fixedbugs/bug277.go).
// However, this rule is needed to prevent the previous rule from looping forever in such cases.
(Move dst src mem) && isSamePtr(dst, src) -> mem

View file

@ -55,6 +55,9 @@ var genericOps = []opData{
{name: "Mul32uhilo", argLength: 2, typ: "(UInt32,UInt32)", commutative: true}, // arg0 * arg1, returns (hi, lo) {name: "Mul32uhilo", argLength: 2, typ: "(UInt32,UInt32)", commutative: true}, // arg0 * arg1, returns (hi, lo)
{name: "Mul64uhilo", argLength: 2, typ: "(UInt64,UInt64)", commutative: true}, // arg0 * arg1, returns (hi, lo) {name: "Mul64uhilo", argLength: 2, typ: "(UInt64,UInt64)", commutative: true}, // arg0 * arg1, returns (hi, lo)
{name: "Mul32uover", argLength: 2, typ: "(UInt32,Bool)", commutative: true}, // Let x = arg0*arg1 (full 32x32-> 64 unsigned multiply), returns (uint32(x), (uint32(x) != x))
{name: "Mul64uover", argLength: 2, typ: "(UInt64,Bool)", commutative: true}, // Let x = arg0*arg1 (full 64x64->128 unsigned multiply), returns (uint64(x), (uint64(x) != x))
// Weird special instructions for use in the strength reduction of divides. // Weird special instructions for use in the strength reduction of divides.
// These ops compute unsigned (arg0 + arg1) / 2, correct to all // These ops compute unsigned (arg0 + arg1) / 2, correct to all
// 32/64 bits, even when the intermediate result of the add has 33/65 bits. // 32/64 bits, even when the intermediate result of the add has 33/65 bits.
@ -63,23 +66,26 @@ var genericOps = []opData{
{name: "Avg32u", argLength: 2, typ: "UInt32"}, // 32-bit platforms only {name: "Avg32u", argLength: 2, typ: "UInt32"}, // 32-bit platforms only
{name: "Avg64u", argLength: 2, typ: "UInt64"}, // 64-bit platforms only {name: "Avg64u", argLength: 2, typ: "UInt64"}, // 64-bit platforms only
// For Div16, Div32 and Div64, AuxInt non-zero means that the divisor has been proved to be not -1
// or that the dividend is not the most negative value.
{name: "Div8", argLength: 2}, // arg0 / arg1, signed {name: "Div8", argLength: 2}, // arg0 / arg1, signed
{name: "Div8u", argLength: 2}, // arg0 / arg1, unsigned {name: "Div8u", argLength: 2}, // arg0 / arg1, unsigned
{name: "Div16", argLength: 2}, {name: "Div16", argLength: 2, aux: "Bool"},
{name: "Div16u", argLength: 2}, {name: "Div16u", argLength: 2},
{name: "Div32", argLength: 2}, {name: "Div32", argLength: 2, aux: "Bool"},
{name: "Div32u", argLength: 2}, {name: "Div32u", argLength: 2},
{name: "Div64", argLength: 2}, {name: "Div64", argLength: 2, aux: "Bool"},
{name: "Div64u", argLength: 2}, {name: "Div64u", argLength: 2},
{name: "Div128u", argLength: 3}, // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r) {name: "Div128u", argLength: 3}, // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r)
// For Mod16, Mod32 and Mod64, AuxInt non-zero means that the divisor has been proved to be not -1.
{name: "Mod8", argLength: 2}, // arg0 % arg1, signed {name: "Mod8", argLength: 2}, // arg0 % arg1, signed
{name: "Mod8u", argLength: 2}, // arg0 % arg1, unsigned {name: "Mod8u", argLength: 2}, // arg0 % arg1, unsigned
{name: "Mod16", argLength: 2}, {name: "Mod16", argLength: 2, aux: "Bool"},
{name: "Mod16u", argLength: 2}, {name: "Mod16u", argLength: 2},
{name: "Mod32", argLength: 2}, {name: "Mod32", argLength: 2, aux: "Bool"},
{name: "Mod32u", argLength: 2}, {name: "Mod32u", argLength: 2},
{name: "Mod64", argLength: 2}, {name: "Mod64", argLength: 2, aux: "Bool"},
{name: "Mod64u", argLength: 2}, {name: "Mod64u", argLength: 2},
{name: "And8", argLength: 2, commutative: true}, // arg0 & arg1 {name: "And8", argLength: 2, commutative: true}, // arg0 & arg1
@ -509,15 +515,18 @@ var genericOps = []opData{
{name: "AtomicLoad32", argLength: 2, typ: "(UInt32,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory. {name: "AtomicLoad32", argLength: 2, typ: "(UInt32,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
{name: "AtomicLoad64", argLength: 2, typ: "(UInt64,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory. {name: "AtomicLoad64", argLength: 2, typ: "(UInt64,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
{name: "AtomicLoadPtr", argLength: 2, typ: "(BytePtr,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory. {name: "AtomicLoadPtr", argLength: 2, typ: "(BytePtr,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
{name: "AtomicLoadAcq32", argLength: 2, typ: "(UInt32,Mem)"}, // Load from arg0. arg1=memory. Lock acquisition, returns loaded value and new memory.
{name: "AtomicStore32", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory. {name: "AtomicStore32", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
{name: "AtomicStore64", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory. {name: "AtomicStore64", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
{name: "AtomicStorePtrNoWB", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory. {name: "AtomicStorePtrNoWB", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
{name: "AtomicStoreRel32", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Lock release, returns memory.
{name: "AtomicExchange32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory. {name: "AtomicExchange32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
{name: "AtomicExchange64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory. {name: "AtomicExchange64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
{name: "AtomicAdd32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory. {name: "AtomicAdd32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
{name: "AtomicAdd64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory. {name: "AtomicAdd64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
{name: "AtomicCompareAndSwap32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true iff store happens and new memory. {name: "AtomicCompareAndSwap32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
{name: "AtomicCompareAndSwap64", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true iff store happens and new memory. {name: "AtomicCompareAndSwap64", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
{name: "AtomicCompareAndSwapRel32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Lock release, returns true if store happens and new memory.
{name: "AtomicAnd8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory. {name: "AtomicAnd8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
{name: "AtomicOr8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory. {name: "AtomicOr8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.

View file

@ -109,6 +109,12 @@ code, pre, .lines, .ast {
font-size: 12px; font-size: 12px;
} }
pre {
-moz-tab-size: 4;
-o-tab-size: 4;
tab-size: 4;
}
.allow-x-scroll { .allow-x-scroll {
overflow-x: scroll; overflow-x: scroll;
} }

View file

@ -12,7 +12,7 @@ func layout(f *Func) {
} }
// Register allocation may use a different order which has constraints // Register allocation may use a different order which has constraints
// imposed by the linear-scan algorithm. Note that that f.pass here is // imposed by the linear-scan algorithm. Note that f.pass here is
// regalloc, so the switch is conditional on -d=ssa/regalloc/test=N // regalloc, so the switch is conditional on -d=ssa/regalloc/test=N
func layoutRegallocOrder(f *Func) []*Block { func layoutRegallocOrder(f *Func) []*Block {

View file

@ -87,7 +87,7 @@ func TestNilcheckSimple(t *testing.T) {
nilcheckelim(fun.f) nilcheckelim(fun.f)
// clean up the removed nil check // clean up the removed nil check
fuse(fun.f) fusePlain(fun.f)
deadcode(fun.f) deadcode(fun.f)
CheckFunc(fun.f) CheckFunc(fun.f)
@ -124,7 +124,7 @@ func TestNilcheckDomOrder(t *testing.T) {
nilcheckelim(fun.f) nilcheckelim(fun.f)
// clean up the removed nil check // clean up the removed nil check
fuse(fun.f) fusePlain(fun.f)
deadcode(fun.f) deadcode(fun.f)
CheckFunc(fun.f) CheckFunc(fun.f)
@ -157,7 +157,7 @@ func TestNilcheckAddr(t *testing.T) {
nilcheckelim(fun.f) nilcheckelim(fun.f)
// clean up the removed nil check // clean up the removed nil check
fuse(fun.f) fusePlain(fun.f)
deadcode(fun.f) deadcode(fun.f)
CheckFunc(fun.f) CheckFunc(fun.f)
@ -191,7 +191,7 @@ func TestNilcheckAddPtr(t *testing.T) {
nilcheckelim(fun.f) nilcheckelim(fun.f)
// clean up the removed nil check // clean up the removed nil check
fuse(fun.f) fusePlain(fun.f)
deadcode(fun.f) deadcode(fun.f)
CheckFunc(fun.f) CheckFunc(fun.f)
@ -235,7 +235,7 @@ func TestNilcheckPhi(t *testing.T) {
nilcheckelim(fun.f) nilcheckelim(fun.f)
// clean up the removed nil check // clean up the removed nil check
fuse(fun.f) fusePlain(fun.f)
deadcode(fun.f) deadcode(fun.f)
CheckFunc(fun.f) CheckFunc(fun.f)
@ -276,7 +276,7 @@ func TestNilcheckKeepRemove(t *testing.T) {
nilcheckelim(fun.f) nilcheckelim(fun.f)
// clean up the removed nil check // clean up the removed nil check
fuse(fun.f) fusePlain(fun.f)
deadcode(fun.f) deadcode(fun.f)
CheckFunc(fun.f) CheckFunc(fun.f)
@ -323,7 +323,7 @@ func TestNilcheckInFalseBranch(t *testing.T) {
nilcheckelim(fun.f) nilcheckelim(fun.f)
// clean up the removed nil check // clean up the removed nil check
fuse(fun.f) fusePlain(fun.f)
deadcode(fun.f) deadcode(fun.f)
CheckFunc(fun.f) CheckFunc(fun.f)
@ -374,7 +374,7 @@ func TestNilcheckUser(t *testing.T) {
nilcheckelim(fun.f) nilcheckelim(fun.f)
// clean up the removed nil check // clean up the removed nil check
fuse(fun.f) fusePlain(fun.f)
deadcode(fun.f) deadcode(fun.f)
CheckFunc(fun.f) CheckFunc(fun.f)
@ -418,7 +418,7 @@ func TestNilcheckBug(t *testing.T) {
nilcheckelim(fun.f) nilcheckelim(fun.f)
// clean up the removed nil check // clean up the removed nil check
fuse(fun.f) fusePlain(fun.f)
deadcode(fun.f) deadcode(fun.f)
CheckFunc(fun.f) CheckFunc(fun.f)

View file

@ -22,6 +22,8 @@ const (
Block386LE Block386LE
Block386GT Block386GT
Block386GE Block386GE
Block386OS
Block386OC
Block386ULT Block386ULT
Block386ULE Block386ULE
Block386UGT Block386UGT
@ -37,6 +39,8 @@ const (
BlockAMD64LE BlockAMD64LE
BlockAMD64GT BlockAMD64GT
BlockAMD64GE BlockAMD64GE
BlockAMD64OS
BlockAMD64OC
BlockAMD64ULT BlockAMD64ULT
BlockAMD64ULE BlockAMD64ULE
BlockAMD64UGT BlockAMD64UGT
@ -130,6 +134,8 @@ var blockString = [...]string{
Block386LE: "LE", Block386LE: "LE",
Block386GT: "GT", Block386GT: "GT",
Block386GE: "GE", Block386GE: "GE",
Block386OS: "OS",
Block386OC: "OC",
Block386ULT: "ULT", Block386ULT: "ULT",
Block386ULE: "ULE", Block386ULE: "ULE",
Block386UGT: "UGT", Block386UGT: "UGT",
@ -145,6 +151,8 @@ var blockString = [...]string{
BlockAMD64LE: "LE", BlockAMD64LE: "LE",
BlockAMD64GT: "GT", BlockAMD64GT: "GT",
BlockAMD64GE: "GE", BlockAMD64GE: "GE",
BlockAMD64OS: "OS",
BlockAMD64OC: "OC",
BlockAMD64ULT: "ULT", BlockAMD64ULT: "ULT",
BlockAMD64ULE: "ULE", BlockAMD64ULE: "ULE",
BlockAMD64UGT: "UGT", BlockAMD64UGT: "UGT",
@ -278,6 +286,7 @@ const (
Op386SBBLconst Op386SBBLconst
Op386MULL Op386MULL
Op386MULLconst Op386MULLconst
Op386MULLU
Op386HMULL Op386HMULL
Op386HMULLU Op386HMULLU
Op386MULLQU Op386MULLQU
@ -339,6 +348,12 @@ const (
Op386ANDLload Op386ANDLload
Op386ORLload Op386ORLload
Op386XORLload Op386XORLload
Op386ADDLloadidx4
Op386SUBLloadidx4
Op386MULLloadidx4
Op386ANDLloadidx4
Op386ORLloadidx4
Op386XORLloadidx4
Op386NEGL Op386NEGL
Op386NOTL Op386NOTL
Op386BSFL Op386BSFL
@ -358,6 +373,7 @@ const (
Op386SETBE Op386SETBE
Op386SETA Op386SETA
Op386SETAE Op386SETAE
Op386SETO
Op386SETEQF Op386SETEQF
Op386SETNEF Op386SETNEF
Op386SETORD Op386SETORD
@ -394,10 +410,19 @@ const (
Op386ANDLmodify Op386ANDLmodify
Op386ORLmodify Op386ORLmodify
Op386XORLmodify Op386XORLmodify
Op386ADDLmodifyidx4
Op386SUBLmodifyidx4
Op386ANDLmodifyidx4
Op386ORLmodifyidx4
Op386XORLmodifyidx4
Op386ADDLconstmodify Op386ADDLconstmodify
Op386ANDLconstmodify Op386ANDLconstmodify
Op386ORLconstmodify Op386ORLconstmodify
Op386XORLconstmodify Op386XORLconstmodify
Op386ADDLconstmodifyidx4
Op386ANDLconstmodifyidx4
Op386ORLconstmodifyidx4
Op386XORLconstmodifyidx4
Op386MOVBloadidx1 Op386MOVBloadidx1
Op386MOVWloadidx1 Op386MOVWloadidx1
Op386MOVWloadidx2 Op386MOVWloadidx2
@ -485,6 +510,8 @@ const (
OpAMD64MULL OpAMD64MULL
OpAMD64MULQconst OpAMD64MULQconst
OpAMD64MULLconst OpAMD64MULLconst
OpAMD64MULLU
OpAMD64MULQU
OpAMD64HMULQ OpAMD64HMULQ
OpAMD64HMULL OpAMD64HMULL
OpAMD64HMULQU OpAMD64HMULQU
@ -690,6 +717,7 @@ const (
OpAMD64SETBE OpAMD64SETBE
OpAMD64SETA OpAMD64SETA
OpAMD64SETAE OpAMD64SETAE
OpAMD64SETO
OpAMD64SETEQstore OpAMD64SETEQstore
OpAMD64SETNEstore OpAMD64SETNEstore
OpAMD64SETLstore OpAMD64SETLstore
@ -2068,6 +2096,8 @@ const (
OpHmul64u OpHmul64u
OpMul32uhilo OpMul32uhilo
OpMul64uhilo OpMul64uhilo
OpMul32uover
OpMul64uover
OpAvg32u OpAvg32u
OpAvg64u OpAvg64u
OpDiv8 OpDiv8
@ -2379,15 +2409,18 @@ const (
OpAtomicLoad32 OpAtomicLoad32
OpAtomicLoad64 OpAtomicLoad64
OpAtomicLoadPtr OpAtomicLoadPtr
OpAtomicLoadAcq32
OpAtomicStore32 OpAtomicStore32
OpAtomicStore64 OpAtomicStore64
OpAtomicStorePtrNoWB OpAtomicStorePtrNoWB
OpAtomicStoreRel32
OpAtomicExchange32 OpAtomicExchange32
OpAtomicExchange64 OpAtomicExchange64
OpAtomicAdd32 OpAtomicAdd32
OpAtomicAdd64 OpAtomicAdd64
OpAtomicCompareAndSwap32 OpAtomicCompareAndSwap32
OpAtomicCompareAndSwap64 OpAtomicCompareAndSwap64
OpAtomicCompareAndSwapRel32
OpAtomicAnd8 OpAtomicAnd8
OpAtomicOr8 OpAtomicOr8
OpAtomicAdd32Variant OpAtomicAdd32Variant
@ -3099,6 +3132,24 @@ var opcodeTable = [...]opInfo{
}, },
}, },
}, },
{
name: "MULLU",
argLen: 2,
commutative: true,
clobberFlags: true,
asm: x86.AMULL,
reg: regInfo{
inputs: []inputInfo{
{0, 1}, // AX
{1, 255}, // AX CX DX BX SP BP SI DI
},
clobbers: 4, // DX
outputs: []outputInfo{
{1, 0},
{0, 1}, // AX
},
},
},
{ {
name: "HMULL", name: "HMULL",
argLen: 2, argLen: 2,
@ -3168,6 +3219,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "DIVL", name: "DIVL",
auxType: auxBool,
argLen: 2, argLen: 2,
clobberFlags: true, clobberFlags: true,
asm: x86.AIDIVL, asm: x86.AIDIVL,
@ -3184,6 +3236,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "DIVW", name: "DIVW",
auxType: auxBool,
argLen: 2, argLen: 2,
clobberFlags: true, clobberFlags: true,
asm: x86.AIDIVW, asm: x86.AIDIVW,
@ -3232,6 +3285,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "MODL", name: "MODL",
auxType: auxBool,
argLen: 2, argLen: 2,
clobberFlags: true, clobberFlags: true,
asm: x86.AIDIVL, asm: x86.AIDIVL,
@ -3248,6 +3302,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "MODW", name: "MODW",
auxType: auxBool,
argLen: 2, argLen: 2,
clobberFlags: true, clobberFlags: true,
asm: x86.AIDIVW, asm: x86.AIDIVW,
@ -4019,6 +4074,126 @@ var opcodeTable = [...]opInfo{
}, },
}, },
}, },
{
name: "ADDLloadidx4",
auxType: auxSymOff,
argLen: 4,
resultInArg0: true,
clobberFlags: true,
faultOnNilArg1: true,
symEffect: SymRead,
asm: x86.AADDL,
reg: regInfo{
inputs: []inputInfo{
{0, 239}, // AX CX DX BX BP SI DI
{2, 255}, // AX CX DX BX SP BP SI DI
{1, 65791}, // AX CX DX BX SP BP SI DI SB
},
outputs: []outputInfo{
{0, 239}, // AX CX DX BX BP SI DI
},
},
},
{
name: "SUBLloadidx4",
auxType: auxSymOff,
argLen: 4,
resultInArg0: true,
clobberFlags: true,
faultOnNilArg1: true,
symEffect: SymRead,
asm: x86.ASUBL,
reg: regInfo{
inputs: []inputInfo{
{0, 239}, // AX CX DX BX BP SI DI
{2, 255}, // AX CX DX BX SP BP SI DI
{1, 65791}, // AX CX DX BX SP BP SI DI SB
},
outputs: []outputInfo{
{0, 239}, // AX CX DX BX BP SI DI
},
},
},
{
name: "MULLloadidx4",
auxType: auxSymOff,
argLen: 4,
resultInArg0: true,
clobberFlags: true,
faultOnNilArg1: true,
symEffect: SymRead,
asm: x86.AIMULL,
reg: regInfo{
inputs: []inputInfo{
{0, 239}, // AX CX DX BX BP SI DI
{2, 255}, // AX CX DX BX SP BP SI DI
{1, 65791}, // AX CX DX BX SP BP SI DI SB
},
outputs: []outputInfo{
{0, 239}, // AX CX DX BX BP SI DI
},
},
},
{
name: "ANDLloadidx4",
auxType: auxSymOff,
argLen: 4,
resultInArg0: true,
clobberFlags: true,
faultOnNilArg1: true,
symEffect: SymRead,
asm: x86.AANDL,
reg: regInfo{
inputs: []inputInfo{
{0, 239}, // AX CX DX BX BP SI DI
{2, 255}, // AX CX DX BX SP BP SI DI
{1, 65791}, // AX CX DX BX SP BP SI DI SB
},
outputs: []outputInfo{
{0, 239}, // AX CX DX BX BP SI DI
},
},
},
{
name: "ORLloadidx4",
auxType: auxSymOff,
argLen: 4,
resultInArg0: true,
clobberFlags: true,
faultOnNilArg1: true,
symEffect: SymRead,
asm: x86.AORL,
reg: regInfo{
inputs: []inputInfo{
{0, 239}, // AX CX DX BX BP SI DI
{2, 255}, // AX CX DX BX SP BP SI DI
{1, 65791}, // AX CX DX BX SP BP SI DI SB
},
outputs: []outputInfo{
{0, 239}, // AX CX DX BX BP SI DI
},
},
},
{
name: "XORLloadidx4",
auxType: auxSymOff,
argLen: 4,
resultInArg0: true,
clobberFlags: true,
faultOnNilArg1: true,
symEffect: SymRead,
asm: x86.AXORL,
reg: regInfo{
inputs: []inputInfo{
{0, 239}, // AX CX DX BX BP SI DI
{2, 255}, // AX CX DX BX SP BP SI DI
{1, 65791}, // AX CX DX BX SP BP SI DI SB
},
outputs: []outputInfo{
{0, 239}, // AX CX DX BX BP SI DI
},
},
},
{ {
name: "NEGL", name: "NEGL",
argLen: 1, argLen: 1,
@ -4243,6 +4418,16 @@ var opcodeTable = [...]opInfo{
}, },
}, },
}, },
{
name: "SETO",
argLen: 1,
asm: x86.ASETOS,
reg: regInfo{
outputs: []outputInfo{
{0, 239}, // AX CX DX BX BP SI DI
},
},
},
{ {
name: "SETEQF", name: "SETEQF",
argLen: 1, argLen: 1,
@ -4743,6 +4928,86 @@ var opcodeTable = [...]opInfo{
}, },
}, },
}, },
{
name: "ADDLmodifyidx4",
auxType: auxSymOff,
argLen: 4,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.AADDL,
reg: regInfo{
inputs: []inputInfo{
{1, 255}, // AX CX DX BX SP BP SI DI
{2, 255}, // AX CX DX BX SP BP SI DI
{0, 65791}, // AX CX DX BX SP BP SI DI SB
},
},
},
{
name: "SUBLmodifyidx4",
auxType: auxSymOff,
argLen: 4,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.ASUBL,
reg: regInfo{
inputs: []inputInfo{
{1, 255}, // AX CX DX BX SP BP SI DI
{2, 255}, // AX CX DX BX SP BP SI DI
{0, 65791}, // AX CX DX BX SP BP SI DI SB
},
},
},
{
name: "ANDLmodifyidx4",
auxType: auxSymOff,
argLen: 4,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.AANDL,
reg: regInfo{
inputs: []inputInfo{
{1, 255}, // AX CX DX BX SP BP SI DI
{2, 255}, // AX CX DX BX SP BP SI DI
{0, 65791}, // AX CX DX BX SP BP SI DI SB
},
},
},
{
name: "ORLmodifyidx4",
auxType: auxSymOff,
argLen: 4,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.AORL,
reg: regInfo{
inputs: []inputInfo{
{1, 255}, // AX CX DX BX SP BP SI DI
{2, 255}, // AX CX DX BX SP BP SI DI
{0, 65791}, // AX CX DX BX SP BP SI DI SB
},
},
},
{
name: "XORLmodifyidx4",
auxType: auxSymOff,
argLen: 4,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.AXORL,
reg: regInfo{
inputs: []inputInfo{
{1, 255}, // AX CX DX BX SP BP SI DI
{2, 255}, // AX CX DX BX SP BP SI DI
{0, 65791}, // AX CX DX BX SP BP SI DI SB
},
},
},
{ {
name: "ADDLconstmodify", name: "ADDLconstmodify",
auxType: auxSymValAndOff, auxType: auxSymValAndOff,
@ -4799,6 +5064,66 @@ var opcodeTable = [...]opInfo{
}, },
}, },
}, },
{
name: "ADDLconstmodifyidx4",
auxType: auxSymValAndOff,
argLen: 3,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.AADDL,
reg: regInfo{
inputs: []inputInfo{
{1, 255}, // AX CX DX BX SP BP SI DI
{0, 65791}, // AX CX DX BX SP BP SI DI SB
},
},
},
{
name: "ANDLconstmodifyidx4",
auxType: auxSymValAndOff,
argLen: 3,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.AANDL,
reg: regInfo{
inputs: []inputInfo{
{1, 255}, // AX CX DX BX SP BP SI DI
{0, 65791}, // AX CX DX BX SP BP SI DI SB
},
},
},
{
name: "ORLconstmodifyidx4",
auxType: auxSymValAndOff,
argLen: 3,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.AORL,
reg: regInfo{
inputs: []inputInfo{
{1, 255}, // AX CX DX BX SP BP SI DI
{0, 65791}, // AX CX DX BX SP BP SI DI SB
},
},
},
{
name: "XORLconstmodifyidx4",
auxType: auxSymValAndOff,
argLen: 3,
clobberFlags: true,
faultOnNilArg0: true,
symEffect: SymRead | SymWrite,
asm: x86.AXORL,
reg: regInfo{
inputs: []inputInfo{
{1, 255}, // AX CX DX BX SP BP SI DI
{0, 65791}, // AX CX DX BX SP BP SI DI SB
},
},
},
{ {
name: "MOVBloadidx1", name: "MOVBloadidx1",
auxType: auxSymOff, auxType: auxSymOff,
@ -5996,6 +6321,42 @@ var opcodeTable = [...]opInfo{
}, },
}, },
}, },
{
name: "MULLU",
argLen: 2,
commutative: true,
clobberFlags: true,
asm: x86.AMULL,
reg: regInfo{
inputs: []inputInfo{
{0, 1}, // AX
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
clobbers: 4, // DX
outputs: []outputInfo{
{1, 0},
{0, 1}, // AX
},
},
},
{
name: "MULQU",
argLen: 2,
commutative: true,
clobberFlags: true,
asm: x86.AMULQ,
reg: regInfo{
inputs: []inputInfo{
{0, 1}, // AX
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
clobbers: 4, // DX
outputs: []outputInfo{
{1, 0},
{0, 1}, // AX
},
},
},
{ {
name: "HMULQ", name: "HMULQ",
argLen: 2, argLen: 2,
@ -6082,6 +6443,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "DIVQ", name: "DIVQ",
auxType: auxBool,
argLen: 2, argLen: 2,
clobberFlags: true, clobberFlags: true,
asm: x86.AIDIVQ, asm: x86.AIDIVQ,
@ -6098,6 +6460,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "DIVL", name: "DIVL",
auxType: auxBool,
argLen: 2, argLen: 2,
clobberFlags: true, clobberFlags: true,
asm: x86.AIDIVL, asm: x86.AIDIVL,
@ -6114,6 +6477,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "DIVW", name: "DIVW",
auxType: auxBool,
argLen: 2, argLen: 2,
clobberFlags: true, clobberFlags: true,
asm: x86.AIDIVW, asm: x86.AIDIVW,
@ -9018,6 +9382,16 @@ var opcodeTable = [...]opInfo{
}, },
}, },
}, },
{
name: "SETO",
argLen: 1,
asm: x86.ASETOS,
reg: regInfo{
outputs: []outputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
{ {
name: "SETEQstore", name: "SETEQstore",
auxType: auxSymOff, auxType: auxSymOff,
@ -23078,6 +23452,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "LoweredAtomicStore32", name: "LoweredAtomicStore32",
auxType: auxInt64,
argLen: 3, argLen: 3,
faultOnNilArg0: true, faultOnNilArg0: true,
hasSideEffects: true, hasSideEffects: true,
@ -23090,6 +23465,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "LoweredAtomicStore64", name: "LoweredAtomicStore64",
auxType: auxInt64,
argLen: 3, argLen: 3,
faultOnNilArg0: true, faultOnNilArg0: true,
hasSideEffects: true, hasSideEffects: true,
@ -23102,6 +23478,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "LoweredAtomicLoad32", name: "LoweredAtomicLoad32",
auxType: auxInt64,
argLen: 2, argLen: 2,
clobberFlags: true, clobberFlags: true,
faultOnNilArg0: true, faultOnNilArg0: true,
@ -23116,6 +23493,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "LoweredAtomicLoad64", name: "LoweredAtomicLoad64",
auxType: auxInt64,
argLen: 2, argLen: 2,
clobberFlags: true, clobberFlags: true,
faultOnNilArg0: true, faultOnNilArg0: true,
@ -23130,6 +23508,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "LoweredAtomicLoadPtr", name: "LoweredAtomicLoadPtr",
auxType: auxInt64,
argLen: 2, argLen: 2,
clobberFlags: true, clobberFlags: true,
faultOnNilArg0: true, faultOnNilArg0: true,
@ -23212,6 +23591,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "LoweredAtomicCas64", name: "LoweredAtomicCas64",
auxType: auxInt64,
argLen: 4, argLen: 4,
resultNotInArgs: true, resultNotInArgs: true,
clobberFlags: true, clobberFlags: true,
@ -23230,6 +23610,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "LoweredAtomicCas32", name: "LoweredAtomicCas32",
auxType: auxInt64,
argLen: 4, argLen: 4,
resultNotInArgs: true, resultNotInArgs: true,
clobberFlags: true, clobberFlags: true,
@ -27624,6 +28005,18 @@ var opcodeTable = [...]opInfo{
commutative: true, commutative: true,
generic: true, generic: true,
}, },
{
name: "Mul32uover",
argLen: 2,
commutative: true,
generic: true,
},
{
name: "Mul64uover",
argLen: 2,
commutative: true,
generic: true,
},
{ {
name: "Avg32u", name: "Avg32u",
argLen: 2, argLen: 2,
@ -27646,6 +28039,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "Div16", name: "Div16",
auxType: auxBool,
argLen: 2, argLen: 2,
generic: true, generic: true,
}, },
@ -27656,6 +28050,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "Div32", name: "Div32",
auxType: auxBool,
argLen: 2, argLen: 2,
generic: true, generic: true,
}, },
@ -27666,6 +28061,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "Div64", name: "Div64",
auxType: auxBool,
argLen: 2, argLen: 2,
generic: true, generic: true,
}, },
@ -27691,6 +28087,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "Mod16", name: "Mod16",
auxType: auxBool,
argLen: 2, argLen: 2,
generic: true, generic: true,
}, },
@ -27701,6 +28098,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "Mod32", name: "Mod32",
auxType: auxBool,
argLen: 2, argLen: 2,
generic: true, generic: true,
}, },
@ -27711,6 +28109,7 @@ var opcodeTable = [...]opInfo{
}, },
{ {
name: "Mod64", name: "Mod64",
auxType: auxBool,
argLen: 2, argLen: 2,
generic: true, generic: true,
}, },
@ -29312,6 +29711,11 @@ var opcodeTable = [...]opInfo{
argLen: 2, argLen: 2,
generic: true, generic: true,
}, },
{
name: "AtomicLoadAcq32",
argLen: 2,
generic: true,
},
{ {
name: "AtomicStore32", name: "AtomicStore32",
argLen: 3, argLen: 3,
@ -29330,6 +29734,12 @@ var opcodeTable = [...]opInfo{
hasSideEffects: true, hasSideEffects: true,
generic: true, generic: true,
}, },
{
name: "AtomicStoreRel32",
argLen: 3,
hasSideEffects: true,
generic: true,
},
{ {
name: "AtomicExchange32", name: "AtomicExchange32",
argLen: 3, argLen: 3,
@ -29366,6 +29776,12 @@ var opcodeTable = [...]opInfo{
hasSideEffects: true, hasSideEffects: true,
generic: true, generic: true,
}, },
{
name: "AtomicCompareAndSwapRel32",
argLen: 4,
hasSideEffects: true,
generic: true,
},
{ {
name: "AtomicAnd8", name: "AtomicAnd8",
argLen: 3, argLen: 3,

View file

@ -114,7 +114,7 @@ type posetNode struct {
// given that non-equality is not transitive, the only effect is that a later call // given that non-equality is not transitive, the only effect is that a later call
// to SetEqual for the same values will fail. NonEqual checks whether it is known that // to SetEqual for the same values will fail. NonEqual checks whether it is known that
// the nodes are different, either because SetNonEqual was called before, or because // the nodes are different, either because SetNonEqual was called before, or because
// we know that that they are strictly ordered. // we know that they are strictly ordered.
// //
// It is implemented as a forest of DAGs; in each DAG, if node A dominates B, // It is implemented as a forest of DAGs; in each DAG, if node A dominates B,
// it means that A<B. Equality is represented by mapping two SSA values to the same // it means that A<B. Equality is represented by mapping two SSA values to the same

View file

@ -1076,6 +1076,13 @@ func addLocalInductiveFacts(ft *factsTable, b *Block) {
} }
var ctzNonZeroOp = map[Op]Op{OpCtz8: OpCtz8NonZero, OpCtz16: OpCtz16NonZero, OpCtz32: OpCtz32NonZero, OpCtz64: OpCtz64NonZero} var ctzNonZeroOp = map[Op]Op{OpCtz8: OpCtz8NonZero, OpCtz16: OpCtz16NonZero, OpCtz32: OpCtz32NonZero, OpCtz64: OpCtz64NonZero}
var mostNegativeDividend = map[Op]int64{
OpDiv16: -1 << 15,
OpMod16: -1 << 15,
OpDiv32: -1 << 31,
OpMod32: -1 << 31,
OpDiv64: -1 << 63,
OpMod64: -1 << 63}
// simplifyBlock simplifies some constant values in b and evaluates // simplifyBlock simplifies some constant values in b and evaluates
// branches to non-uniquely dominated successors of b. // branches to non-uniquely dominated successors of b.
@ -1147,6 +1154,22 @@ func simplifyBlock(sdom SparseTree, ft *factsTable, b *Block) {
b.Func.Warnl(v.Pos, "Proved %v bounded", v.Op) b.Func.Warnl(v.Pos, "Proved %v bounded", v.Op)
} }
} }
case OpDiv16, OpDiv32, OpDiv64, OpMod16, OpMod32, OpMod64:
// On amd64 and 386 fix-up code can be avoided if we know
// the divisor is not -1 or the dividend > MinIntNN.
divr := v.Args[1]
divrLim, divrLimok := ft.limits[divr.ID]
divd := v.Args[0]
divdLim, divdLimok := ft.limits[divd.ID]
if (divrLimok && (divrLim.max < -1 || divrLim.min > -1)) ||
(divdLimok && divdLim.min > mostNegativeDividend[v.Op]) {
v.AuxInt = 1 // see NeedsFixUp in genericOps - v.AuxInt = 0 means we have not proved
// that the divisor is not -1 and the dividend is not the most negative,
// so we need to add fix-up code.
if b.Func.pass.debug > 0 {
b.Func.Warnl(v.Pos, "Proved %v does not need fix-up", v.Op)
}
}
} }
} }

View file

@ -7,7 +7,9 @@ package ssa
import ( import (
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src" "cmd/internal/src"
"encoding/binary"
"fmt" "fmt"
"io" "io"
"math" "math"
@ -449,6 +451,16 @@ func extend32Fto64F(f float32) float64 {
return math.Float64frombits(r) return math.Float64frombits(r)
} }
// NeedsFixUp reports whether the division needs fix-up code.
func NeedsFixUp(v *Value) bool {
return v.AuxInt == 0
}
// i2f is used in rules for converting from an AuxInt to a float.
func i2f(i int64) float64 {
return math.Float64frombits(uint64(i))
}
// auxFrom64F encodes a float64 value so it can be stored in an AuxInt. // auxFrom64F encodes a float64 value so it can be stored in an AuxInt.
func auxFrom64F(f float64) int64 { func auxFrom64F(f float64) int64 {
return int64(math.Float64bits(f)) return int64(math.Float64bits(f))
@ -1090,3 +1102,45 @@ func needRaceCleanup(sym interface{}, v *Value) bool {
} }
return true return true
} }
// symIsRO reports whether sym is a read-only global.
func symIsRO(sym interface{}) bool {
lsym := sym.(*obj.LSym)
return lsym.Type == objabi.SRODATA && len(lsym.R) == 0
}
// read8 reads one byte from the read-only global sym at offset off.
func read8(sym interface{}, off int64) uint8 {
lsym := sym.(*obj.LSym)
return lsym.P[off]
}
// read16 reads two bytes from the read-only global sym at offset off.
func read16(sym interface{}, off int64, bigEndian bool) uint16 {
lsym := sym.(*obj.LSym)
if bigEndian {
return binary.BigEndian.Uint16(lsym.P[off:])
} else {
return binary.LittleEndian.Uint16(lsym.P[off:])
}
}
// read32 reads four bytes from the read-only global sym at offset off.
func read32(sym interface{}, off int64, bigEndian bool) uint32 {
lsym := sym.(*obj.LSym)
if bigEndian {
return binary.BigEndian.Uint32(lsym.P[off:])
} else {
return binary.LittleEndian.Uint32(lsym.P[off:])
}
}
// read64 reads eight bytes from the read-only global sym at offset off.
func read64(sym interface{}, off int64, bigEndian bool) uint64 {
lsym := sym.(*obj.LSym)
if bigEndian {
return binary.BigEndian.Uint64(lsym.P[off:])
} else {
return binary.LittleEndian.Uint64(lsym.P[off:])
}
}

File diff suppressed because it is too large Load diff

View file

@ -248,7 +248,7 @@ func rewriteValueAMD64(v *Value) bool {
case OpAMD64MOVBloadidx1: case OpAMD64MOVBloadidx1:
return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v) return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v)
case OpAMD64MOVBstore: case OpAMD64MOVBstore:
return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) || rewriteValueAMD64_OpAMD64MOVBstore_20(v) return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) || rewriteValueAMD64_OpAMD64MOVBstore_20(v) || rewriteValueAMD64_OpAMD64MOVBstore_30(v)
case OpAMD64MOVBstoreconst: case OpAMD64MOVBstoreconst:
return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v) return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v)
case OpAMD64MOVBstoreconstidx1: case OpAMD64MOVBstoreconstidx1:
@ -268,7 +268,7 @@ func rewriteValueAMD64(v *Value) bool {
case OpAMD64MOVLi2f: case OpAMD64MOVLi2f:
return rewriteValueAMD64_OpAMD64MOVLi2f_0(v) return rewriteValueAMD64_OpAMD64MOVLi2f_0(v)
case OpAMD64MOVLload: case OpAMD64MOVLload:
return rewriteValueAMD64_OpAMD64MOVLload_0(v) return rewriteValueAMD64_OpAMD64MOVLload_0(v) || rewriteValueAMD64_OpAMD64MOVLload_10(v)
case OpAMD64MOVLloadidx1: case OpAMD64MOVLloadidx1:
return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v) return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v)
case OpAMD64MOVLloadidx4: case OpAMD64MOVLloadidx4:
@ -12446,6 +12446,24 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
// match: (MOVBload [off] {sym} (SB) _)
// cond: symIsRO(sym)
// result: (MOVLconst [int64(read8(sym, off))])
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpSB {
break
}
if !(symIsRO(sym)) {
break
}
v.reset(OpAMD64MOVLconst)
v.AuxInt = int64(read8(sym, off))
return true
}
return false return false
} }
func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool {
@ -12953,6 +12971,30 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
// match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem)
// cond: validOff(off)
// result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64MOVQconst {
break
}
c := v_1.AuxInt
mem := v.Args[2]
if !(validOff(off)) {
break
}
v.reset(OpAMD64MOVBstoreconst)
v.AuxInt = makeValAndOff(int64(int8(c)), off)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
@ -13181,6 +13223,13 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
return false
}
func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
// match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem))))))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)
// result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem) // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
@ -13372,13 +13421,6 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
return false
}
func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
// match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) // match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
// cond: x.Uses == 1 && clobber(x) // cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i-1] {s} p w mem) // result: (MOVWstore [i-1] {s} p w mem)
@ -13514,6 +13556,141 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
// match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRWconst [8] w) mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i] {s} p w mem)
for {
i := v.AuxInt
s := v.Aux
_ = v.Args[2]
p := v.Args[0]
w := v.Args[1]
x := v.Args[2]
if x.Op != OpAMD64MOVBstore {
break
}
if x.AuxInt != i+1 {
break
}
if x.Aux != s {
break
}
_ = x.Args[2]
if p != x.Args[0] {
break
}
x_1 := x.Args[1]
if x_1.Op != OpAMD64SHRWconst {
break
}
if x_1.AuxInt != 8 {
break
}
if w != x_1.Args[0] {
break
}
mem := x.Args[2]
if !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = i
v.Aux = s
v.AddArg(p)
v.AddArg(w)
v.AddArg(mem)
return true
}
// match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRLconst [8] w) mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i] {s} p w mem)
for {
i := v.AuxInt
s := v.Aux
_ = v.Args[2]
p := v.Args[0]
w := v.Args[1]
x := v.Args[2]
if x.Op != OpAMD64MOVBstore {
break
}
if x.AuxInt != i+1 {
break
}
if x.Aux != s {
break
}
_ = x.Args[2]
if p != x.Args[0] {
break
}
x_1 := x.Args[1]
if x_1.Op != OpAMD64SHRLconst {
break
}
if x_1.AuxInt != 8 {
break
}
if w != x_1.Args[0] {
break
}
mem := x.Args[2]
if !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = i
v.Aux = s
v.AddArg(p)
v.AddArg(w)
v.AddArg(mem)
return true
}
// match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRQconst [8] w) mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i] {s} p w mem)
for {
i := v.AuxInt
s := v.Aux
_ = v.Args[2]
p := v.Args[0]
w := v.Args[1]
x := v.Args[2]
if x.Op != OpAMD64MOVBstore {
break
}
if x.AuxInt != i+1 {
break
}
if x.Aux != s {
break
}
_ = x.Args[2]
if p != x.Args[0] {
break
}
x_1 := x.Args[1]
if x_1.Op != OpAMD64SHRQconst {
break
}
if x_1.AuxInt != 8 {
break
}
if w != x_1.Args[0] {
break
}
mem := x.Args[2]
if !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstore)
v.AuxInt = i
v.Aux = s
v.AddArg(p)
v.AddArg(w)
v.AddArg(mem)
return true
}
// match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem)) // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
// cond: x.Uses == 1 && clobber(x) // cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i-1] {s} p w0 mem) // result: (MOVWstore [i-1] {s} p w0 mem)
@ -13681,6 +13858,9 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
return false
}
func rewriteValueAMD64_OpAMD64MOVBstore_30(v *Value) bool {
// match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
// cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
@ -13868,6 +14048,37 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
// match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
// cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x)
// result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
for {
a := v.AuxInt
s := v.Aux
_ = v.Args[1]
p := v.Args[0]
x := v.Args[1]
if x.Op != OpAMD64MOVBstoreconst {
break
}
c := x.AuxInt
if x.Aux != s {
break
}
_ = x.Args[1]
if p != x.Args[0] {
break
}
mem := x.Args[1]
if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
break
}
v.reset(OpAMD64MOVWstoreconst)
v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
v.Aux = s
v.AddArg(p)
v.AddArg(mem)
return true
}
// match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
@ -15449,6 +15660,31 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool {
} }
return false return false
} }
func rewriteValueAMD64_OpAMD64MOVLload_10(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
// match: (MOVLload [off] {sym} (SB) _)
// cond: symIsRO(sym)
// result: (MOVQconst [int64(read32(sym, off, config.BigEndian))])
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpSB {
break
}
if !(symIsRO(sym)) {
break
}
v.reset(OpAMD64MOVQconst)
v.AuxInt = int64(read32(sym, off, config.BigEndian))
return true
}
return false
}
func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool {
// match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
// cond: // cond:
@ -15957,6 +16193,30 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
// match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem)
// cond: validOff(off)
// result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64MOVQconst {
break
}
c := v_1.AuxInt
mem := v.Args[2]
if !(validOff(off)) {
break
}
v.reset(OpAMD64MOVLstoreconst)
v.AuxInt = makeValAndOff(int64(int32(c)), off)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
@ -16102,6 +16362,13 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
return false
}
func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
// match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
// cond: x.Uses == 1 && clobber(x) // cond: x.Uses == 1 && clobber(x)
// result: (MOVQstore [i-4] {s} p w mem) // result: (MOVQstore [i-4] {s} p w mem)
@ -16147,13 +16414,6 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
return false
}
func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
// match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
// cond: x.Uses == 1 && clobber(x) // cond: x.Uses == 1 && clobber(x)
// result: (MOVQstore [i-4] {s} p w0 mem) // result: (MOVQstore [i-4] {s} p w0 mem)
@ -16519,6 +16779,9 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
return false
}
func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool {
// match: (MOVLstore {sym} [off] ptr y:(ADDL x l:(MOVLload [off] {sym} ptr mem)) mem) // match: (MOVLstore {sym} [off] ptr y:(ADDL x l:(MOVLload [off] {sym} ptr mem)) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
// result: (ADDLmodify [off] {sym} ptr x mem) // result: (ADDLmodify [off] {sym} ptr x mem)
@ -16562,9 +16825,6 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
return false
}
func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool {
// match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem) // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
// result: (SUBLmodify [off] {sym} ptr x mem) // result: (SUBLmodify [off] {sym} ptr x mem)
@ -16952,6 +17212,9 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
return false
}
func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool {
// match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) x) mem) // match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) x) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
// result: (BTSLmodify [off] {sym} ptr x mem) // result: (BTSLmodify [off] {sym} ptr x mem)
@ -16995,9 +17258,6 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
return false
}
func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool {
// match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
// cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
// result: (ADDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) // result: (ADDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
@ -17462,6 +17722,40 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
// match: (MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem))
// cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x)
// result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
for {
a := v.AuxInt
s := v.Aux
_ = v.Args[1]
p := v.Args[0]
x := v.Args[1]
if x.Op != OpAMD64MOVLstoreconst {
break
}
c := x.AuxInt
if x.Aux != s {
break
}
_ = x.Args[1]
if p != x.Args[0] {
break
}
mem := x.Args[1]
if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
break
}
v.reset(OpAMD64MOVQstore)
v.AuxInt = ValAndOff(a).Off()
v.Aux = s
v.AddArg(p)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
v.AddArg(v0)
v.AddArg(mem)
return true
}
// match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
@ -18481,6 +18775,10 @@ func rewriteValueAMD64_OpAMD64MOVQi2f_0(v *Value) bool {
return false return false
} }
func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
// match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _))
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: x // result: x
@ -18713,6 +19011,24 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool {
v.AddArg(val) v.AddArg(val)
return true return true
} }
// match: (MOVQload [off] {sym} (SB) _)
// cond: symIsRO(sym)
// result: (MOVQconst [int64(read64(sym, off, config.BigEndian))])
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpSB {
break
}
if !(symIsRO(sym)) {
break
}
v.reset(OpAMD64MOVQconst)
v.AuxInt = int64(read64(sym, off, config.BigEndian))
return true
}
return false return false
} }
func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool {
@ -22529,6 +22845,10 @@ func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool {
return false return false
} }
func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool {
b := v.Block
_ = b
config := b.Func.Config
_ = config
// match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: (MOVWQZX x) // result: (MOVWQZX x)
@ -22733,6 +23053,24 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
// match: (MOVWload [off] {sym} (SB) _)
// cond: symIsRO(sym)
// result: (MOVLconst [int64(read16(sym, off, config.BigEndian))])
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpSB {
break
}
if !(symIsRO(sym)) {
break
}
v.reset(OpAMD64MOVLconst)
v.AuxInt = int64(read16(sym, off, config.BigEndian))
return true
}
return false return false
} }
func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool {
@ -23114,6 +23452,30 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
// match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem)
// cond: validOff(off)
// result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64MOVQconst {
break
}
c := v_1.AuxInt
mem := v.Args[2]
if !(validOff(off)) {
break
}
v.reset(OpAMD64MOVWstoreconst)
v.AuxInt = makeValAndOff(int64(int16(c)), off)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
@ -23274,6 +23636,13 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
return false
}
func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
// match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
// cond: x.Uses == 1 && clobber(x) // cond: x.Uses == 1 && clobber(x)
// result: (MOVLstore [i-2] {s} p w mem) // result: (MOVLstore [i-2] {s} p w mem)
@ -23319,13 +23688,6 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
return false
}
func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
// match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem)) // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
// cond: x.Uses == 1 && clobber(x) // cond: x.Uses == 1 && clobber(x)
// result: (MOVLstore [i-2] {s} p w0 mem) // result: (MOVLstore [i-2] {s} p w0 mem)
@ -23708,6 +24070,37 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
// match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
// cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
// result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
for {
a := v.AuxInt
s := v.Aux
_ = v.Args[1]
p := v.Args[0]
x := v.Args[1]
if x.Op != OpAMD64MOVWstoreconst {
break
}
c := x.AuxInt
if x.Aux != s {
break
}
_ = x.Args[1]
if p != x.Args[0] {
break
}
mem := x.Args[1]
if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
break
}
v.reset(OpAMD64MOVLstoreconst)
v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
v.Aux = s
v.AddArg(p)
v.AddArg(mem)
return true
}
// match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
@ -59398,15 +59791,17 @@ func rewriteValueAMD64_OpDiv16_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (Div16 x y) // match: (Div16 [a] x y)
// cond: // cond:
// result: (Select0 (DIVW x y)) // result: (Select0 (DIVW [a] x y))
for { for {
a := v.AuxInt
_ = v.Args[1] _ = v.Args[1]
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
v.reset(OpSelect0) v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
v0.AuxInt = a
v0.AddArg(x) v0.AddArg(x)
v0.AddArg(y) v0.AddArg(y)
v.AddArg(v0) v.AddArg(v0)
@ -59438,15 +59833,17 @@ func rewriteValueAMD64_OpDiv32_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (Div32 x y) // match: (Div32 [a] x y)
// cond: // cond:
// result: (Select0 (DIVL x y)) // result: (Select0 (DIVL [a] x y))
for { for {
a := v.AuxInt
_ = v.Args[1] _ = v.Args[1]
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
v.reset(OpSelect0) v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
v0.AuxInt = a
v0.AddArg(x) v0.AddArg(x)
v0.AddArg(y) v0.AddArg(y)
v.AddArg(v0) v.AddArg(v0)
@ -59492,15 +59889,17 @@ func rewriteValueAMD64_OpDiv64_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (Div64 x y) // match: (Div64 [a] x y)
// cond: // cond:
// result: (Select0 (DIVQ x y)) // result: (Select0 (DIVQ [a] x y))
for { for {
a := v.AuxInt
_ = v.Args[1] _ = v.Args[1]
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
v.reset(OpSelect0) v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
v0.AuxInt = a
v0.AddArg(x) v0.AddArg(x)
v0.AddArg(y) v0.AddArg(y)
v.AddArg(v0) v.AddArg(v0)
@ -61578,15 +61977,17 @@ func rewriteValueAMD64_OpMod16_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (Mod16 x y) // match: (Mod16 [a] x y)
// cond: // cond:
// result: (Select1 (DIVW x y)) // result: (Select1 (DIVW [a] x y))
for { for {
a := v.AuxInt
_ = v.Args[1] _ = v.Args[1]
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
v.reset(OpSelect1) v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
v0.AuxInt = a
v0.AddArg(x) v0.AddArg(x)
v0.AddArg(y) v0.AddArg(y)
v.AddArg(v0) v.AddArg(v0)
@ -61618,15 +62019,17 @@ func rewriteValueAMD64_OpMod32_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (Mod32 x y) // match: (Mod32 [a] x y)
// cond: // cond:
// result: (Select1 (DIVL x y)) // result: (Select1 (DIVL [a] x y))
for { for {
a := v.AuxInt
_ = v.Args[1] _ = v.Args[1]
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
v.reset(OpSelect1) v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
v0.AuxInt = a
v0.AddArg(x) v0.AddArg(x)
v0.AddArg(y) v0.AddArg(y)
v.AddArg(v0) v.AddArg(v0)
@ -61658,15 +62061,17 @@ func rewriteValueAMD64_OpMod64_0(v *Value) bool {
_ = b _ = b
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
_ = typ _ = typ
// match: (Mod64 x y) // match: (Mod64 [a] x y)
// cond: // cond:
// result: (Select1 (DIVQ x y)) // result: (Select1 (DIVQ [a] x y))
for { for {
a := v.AuxInt
_ = v.Args[1] _ = v.Args[1]
x := v.Args[0] x := v.Args[0]
y := v.Args[1] y := v.Args[1]
v.reset(OpSelect1) v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
v0.AuxInt = a
v0.AddArg(x) v0.AddArg(x)
v0.AddArg(y) v0.AddArg(y)
v.AddArg(v0) v.AddArg(v0)
@ -64393,6 +64798,46 @@ func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool {
func rewriteValueAMD64_OpSelect0_0(v *Value) bool { func rewriteValueAMD64_OpSelect0_0(v *Value) bool {
b := v.Block b := v.Block
_ = b _ = b
typ := &b.Func.Config.Types
_ = typ
// match: (Select0 (Mul64uover x y))
// cond:
// result: (Select0 <typ.UInt64> (MULQU x y))
for {
v_0 := v.Args[0]
if v_0.Op != OpMul64uover {
break
}
_ = v_0.Args[1]
x := v_0.Args[0]
y := v_0.Args[1]
v.reset(OpSelect0)
v.Type = typ.UInt64
v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
// match: (Select0 (Mul32uover x y))
// cond:
// result: (Select0 <typ.UInt32> (MULLU x y))
for {
v_0 := v.Args[0]
if v_0.Op != OpMul32uover {
break
}
_ = v_0.Args[1]
x := v_0.Args[0]
y := v_0.Args[1]
v.reset(OpSelect0)
v.Type = typ.UInt32
v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
// match: (Select0 <t> (AddTupleFirst32 val tuple)) // match: (Select0 <t> (AddTupleFirst32 val tuple))
// cond: // cond:
// result: (ADDL val (Select0 <t> tuple)) // result: (ADDL val (Select0 <t> tuple))
@ -64434,6 +64879,50 @@ func rewriteValueAMD64_OpSelect0_0(v *Value) bool {
return false return false
} }
func rewriteValueAMD64_OpSelect1_0(v *Value) bool { func rewriteValueAMD64_OpSelect1_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
// match: (Select1 (Mul64uover x y))
// cond:
// result: (SETO (Select1 <types.TypeFlags> (MULQU x y)))
for {
v_0 := v.Args[0]
if v_0.Op != OpMul64uover {
break
}
_ = v_0.Args[1]
x := v_0.Args[0]
y := v_0.Args[1]
v.reset(OpAMD64SETO)
v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
v1.AddArg(x)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
// match: (Select1 (Mul32uover x y))
// cond:
// result: (SETO (Select1 <types.TypeFlags> (MULLU x y)))
for {
v_0 := v.Args[0]
if v_0.Op != OpMul32uover {
break
}
_ = v_0.Args[1]
x := v_0.Args[0]
y := v_0.Args[1]
v.reset(OpAMD64SETO)
v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
v1.AddArg(x)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
// match: (Select1 (AddTupleFirst32 _ tuple)) // match: (Select1 (AddTupleFirst32 _ tuple))
// cond: // cond:
// result: (Select1 tuple) // result: (Select1 tuple)
@ -66598,6 +67087,20 @@ func rewriteBlockAMD64(b *Block) bool {
b.Aux = nil b.Aux = nil
return true return true
} }
// match: (If (SETO cmp) yes no)
// cond:
// result: (OS cmp yes no)
for {
v := b.Control
if v.Op != OpAMD64SETO {
break
}
cmp := v.Args[0]
b.Kind = BlockAMD64OS
b.SetControl(cmp)
b.Aux = nil
return true
}
// match: (If (SETGF cmp) yes no) // match: (If (SETGF cmp) yes no)
// cond: // cond:
// result: (UGT cmp yes no) // result: (UGT cmp yes no)
@ -67355,6 +67858,58 @@ func rewriteBlockAMD64(b *Block) bool {
b.Aux = nil b.Aux = nil
return true return true
} }
// match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no)
// cond:
// result: (OS cmp yes no)
for {
v := b.Control
if v.Op != OpAMD64TESTB {
break
}
_ = v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpAMD64SETO {
break
}
cmp := v_0.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64SETO {
break
}
if cmp != v_1.Args[0] {
break
}
b.Kind = BlockAMD64OS
b.SetControl(cmp)
b.Aux = nil
return true
}
// match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no)
// cond:
// result: (OS cmp yes no)
for {
v := b.Control
if v.Op != OpAMD64TESTB {
break
}
_ = v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpAMD64SETO {
break
}
cmp := v_0.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64SETO {
break
}
if cmp != v_1.Args[0] {
break
}
b.Kind = BlockAMD64OS
b.SetControl(cmp)
b.Aux = nil
return true
}
// match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y))
// cond: !config.nacl // cond: !config.nacl
// result: (ULT (BTL x y)) // result: (ULT (BTL x y))

View file

@ -6883,6 +6883,24 @@ func rewriteValueARM_OpARMMOVBUload_0(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
// match: (MOVBUload [off] {sym} (SB) _)
// cond: symIsRO(sym)
// result: (MOVWconst [int64(read8(sym, off))])
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpSB {
break
}
if !(symIsRO(sym)) {
break
}
v.reset(OpARMMOVWconst)
v.AuxInt = int64(read8(sym, off))
return true
}
return false return false
} }
func rewriteValueARM_OpARMMOVBUloadidx_0(v *Value) bool { func rewriteValueARM_OpARMMOVBUloadidx_0(v *Value) bool {
@ -7953,6 +7971,24 @@ func rewriteValueARM_OpARMMOVHUload_0(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
// match: (MOVHUload [off] {sym} (SB) _)
// cond: symIsRO(sym)
// result: (MOVWconst [int64(read16(sym, off, config.BigEndian))])
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpSB {
break
}
if !(symIsRO(sym)) {
break
}
v.reset(OpARMMOVWconst)
v.AuxInt = int64(read16(sym, off, config.BigEndian))
return true
}
return false return false
} }
func rewriteValueARM_OpARMMOVHUloadidx_0(v *Value) bool { func rewriteValueARM_OpARMMOVHUloadidx_0(v *Value) bool {
@ -8797,6 +8833,24 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool {
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
// match: (MOVWload [off] {sym} (SB) _)
// cond: symIsRO(sym)
// result: (MOVWconst [int64(int32(read32(sym, off, config.BigEndian)))])
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpSB {
break
}
if !(symIsRO(sym)) {
break
}
v.reset(OpARMMOVWconst)
v.AuxInt = int64(int32(read32(sym, off, config.BigEndian)))
return true
}
return false return false
} }
func rewriteValueARM_OpARMMOVWloadidx_0(v *Value) bool { func rewriteValueARM_OpARMMOVWloadidx_0(v *Value) bool {

Some files were not shown because too many files have changed in this diff Show more