diff --git a/.gitignore b/.gitignore
index c6512e64a4e..344b31f7ac1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -30,6 +30,7 @@ _testmain.go
/misc/cgo/testso/main
/pkg/
/src/*.*/
+/src/_artifacts/
/src/cmd/cgo/zdefaultcc.go
/src/cmd/dist/dist
/src/cmd/go/internal/cfg/zdefaultcc.go
diff --git a/api/next/68021.txt b/api/next/68021.txt
new file mode 100644
index 00000000000..46156e06654
--- /dev/null
+++ b/api/next/68021.txt
@@ -0,0 +1,13 @@
+pkg go/ast, func ParseDirective(token.Pos, string) (Directive, bool) #68021
+pkg go/ast, method (*Directive) End() token.Pos #68021
+pkg go/ast, method (*Directive) ParseArgs() ([]DirectiveArg, error) #68021
+pkg go/ast, method (*Directive) Pos() token.Pos #68021
+pkg go/ast, type Directive struct #68021
+pkg go/ast, type Directive struct, Args string #68021
+pkg go/ast, type Directive struct, ArgsPos token.Pos #68021
+pkg go/ast, type Directive struct, Name string #68021
+pkg go/ast, type Directive struct, Slash token.Pos #68021
+pkg go/ast, type Directive struct, Tool string #68021
+pkg go/ast, type DirectiveArg struct #68021
+pkg go/ast, type DirectiveArg struct, Arg string #68021
+pkg go/ast, type DirectiveArg struct, Pos token.Pos #68021
diff --git a/api/next/71287.txt b/api/next/71287.txt
new file mode 100644
index 00000000000..c1e09a1f523
--- /dev/null
+++ b/api/next/71287.txt
@@ -0,0 +1,4 @@
+pkg testing, method (*B) ArtifactDir() string #71287
+pkg testing, method (*F) ArtifactDir() string #71287
+pkg testing, method (*T) ArtifactDir() string #71287
+pkg testing, type TB interface, ArtifactDir() string #71287
diff --git a/api/next/73794.txt b/api/next/73794.txt
new file mode 100644
index 00000000000..4018c149ecb
--- /dev/null
+++ b/api/next/73794.txt
@@ -0,0 +1 @@
+pkg bytes, method (*Buffer) Peek(int) ([]uint8, error) #73794
diff --git a/doc/go_spec.html b/doc/go_spec.html
index 92afe1cee0b..a2f22e31dbf 100644
--- a/doc/go_spec.html
+++ b/doc/go_spec.html
@@ -1,6 +1,6 @@
@@ -2686,22 +2686,6 @@ of a method declaration associated
with a generic type.
-
-Within a type parameter list of a generic type T, a type constraint
-may not (directly, or indirectly through the type parameter list of another
-generic type) refer to T.
-
-
-
-type T1[P T1[P]] … // illegal: T1 refers to itself
-type T2[P interface{ T2[int] }] … // illegal: T2 refers to itself
-type T3[P interface{ m(T3[int])}] … // illegal: T3 refers to itself
-type T4[P T5[P]] … // illegal: T4 refers to T5 and
-type T5[P T4[P]] … // T5 refers to T4
-
-type T6[P int] struct{ f *T6[P] } // ok: reference to T6 is not in type parameter list
-
-
Type constraints
@@ -3173,7 +3157,7 @@ Element = Expression | LiteralValue .
Unless the LiteralType is a type parameter,
-its underlying type
+its underlying type
must be a struct, array, slice, or map type
(the syntax enforces this constraint except when the type is given
as a TypeName).
@@ -4873,7 +4857,7 @@ For instance, x / y * z is the same as (x / y) * z.
x <= f() // x <= f()
^a >> b // (^a) >> b
f() || g() // f() || g()
-x == y+1 && <-chanInt > 0 // (x == (y+1)) && ((<-chanInt) > 0)
+x == y+1 && <-chanInt > 0 // (x == (y+1)) && ((<-chanInt) > 0)
@@ -6635,7 +6619,7 @@ iteration's variable at that moment.
var prints []func()
-for i := 0; i < 5; i++ {
+for i := 0; i < 5; i++ {
prints = append(prints, func() { println(i) })
i++
}
@@ -6772,7 +6756,7 @@ if the iteration variable is preexisting, the type of the iteration values is th
variable, which must be of integer type.
Otherwise, if the iteration variable is declared by the "range" clause or is absent,
the type of the iteration values is the default type for n.
-If n <= 0, the loop does not run any iterations.
+If n <= 0, the loop does not run any iterations.
@@ -7383,8 +7367,8 @@ The values x are passed to a parameter of type ...E
where E is the element type of S
and the respective parameter
passing rules apply.
-As a special case, append also accepts a first argument assignable
-to type []byte with a second argument of string type followed by
+As a special case, append also accepts a slice whose type is assignable to
+type []byte with a second argument of string type followed by
....
This form appends the bytes of the string.
@@ -7799,7 +7783,7 @@ compared lexically byte-wise:
-min(x, y) == if x <= y then x else y
+min(x, y) == if x <= y then x else y
min(x, y, z) == min(min(x, y), z)
diff --git a/doc/godebug.md b/doc/godebug.md
index aaa0f9dd55e..d9ae462b980 100644
--- a/doc/godebug.md
+++ b/doc/godebug.md
@@ -153,6 +153,21 @@ for example,
see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables)
and the [go command documentation](/cmd/go#hdr-Build_and_test_caching).
+### Go 1.26
+
+Go 1.26 added a new `httpcookiemaxnum` setting that controls the maximum number
+of cookies that net/http will accept when parsing HTTP headers. If the number of
+cookie in a header exceeds the number set in `httpcookiemaxnum`, cookie parsing
+will fail early. The default value is `httpcookiemaxnum=3000`. Setting
+`httpcookiemaxnum=0` will allow the cookie parsing to accept an indefinite
+number of cookies. To avoid denial of service attacks, this setting and default
+was backported to Go 1.25.2 and Go 1.24.8.
+
+Go 1.26 added a new `urlstrictcolons` setting that controls whether `net/url.Parse`
+allows malformed hostnames containing colons outside of a bracketed IPv6 address.
+The default `urlstrictcolons=1` rejects URLs such as `http://localhost:1:2` or `http://::1/`.
+Colons are permitted as part of a bracketed IPv6 address, such as `http://[::1]/`.
+
### Go 1.25
Go 1.25 added a new `decoratemappings` setting that controls whether the Go
diff --git a/doc/next/2-language.md b/doc/next/2-language.md
index ded7becf014..71da62f59e5 100644
--- a/doc/next/2-language.md
+++ b/doc/next/2-language.md
@@ -19,10 +19,14 @@ type Person struct {
Age *int `json:"age"` // age if known; nil otherwise
}
-func personJSON(name string, age int) ([]byte, error) {
+func personJSON(name string, born time.Time) ([]byte, error) {
return json.Marshal(Person{
Name: name,
- Age: new(age),
+ Age: new(yearsSince(born)),
})
}
+
+func yearsSince(t time.Time) int {
+ return int(time.Since(t).Hours() / (365.25 * 24)) // approximately
+}
```
diff --git a/doc/next/3-tools.md b/doc/next/3-tools.md
index 9459a5490e7..c0a4601c0b9 100644
--- a/doc/next/3-tools.md
+++ b/doc/next/3-tools.md
@@ -7,5 +7,15 @@
a replacement for `go tool doc`: it takes the same flags and arguments and
has the same behavior.
+
+The `go fix` command, following the pattern of `go vet` in Go 1.10,
+now uses the Go analysis framework (`golang.org/x/tools/go/analysis`).
+This means the same analyzers that provide diagnostics in `go vet`
+can be used to suggest and apply fixes in `go fix`.
+The `go fix` command's historical fixers, all of which were obsolete,
+have been removed and replaced by a suite of new analyzers that
+offer fixes to use newer features of the language and library.
+
+
### Cgo {#cgo}
diff --git a/doc/next/5-toolchain.md b/doc/next/5-toolchain.md
index cc32f30a521..b5893288e5c 100644
--- a/doc/next/5-toolchain.md
+++ b/doc/next/5-toolchain.md
@@ -4,6 +4,10 @@
## Linker {#linker}
+On 64-bit ARM-based Windows (the `windows/arm64` port), the linker now supports internal
+linking mode of cgo programs, which can be requested with the
+`-ldflags=-linkmode=internal` flag.
+
## Bootstrap {#bootstrap}
diff --git a/doc/next/6-stdlib/99-minor/bytes/73794.md b/doc/next/6-stdlib/99-minor/bytes/73794.md
new file mode 100644
index 00000000000..a44dfc10e69
--- /dev/null
+++ b/doc/next/6-stdlib/99-minor/bytes/73794.md
@@ -0,0 +1,2 @@
+The new [Buffer.Peek] method returns the next n bytes from the buffer without
+advancing it.
diff --git a/doc/next/6-stdlib/99-minor/go/ast/68021.md b/doc/next/6-stdlib/99-minor/go/ast/68021.md
new file mode 100644
index 00000000000..0ff1a0b11e8
--- /dev/null
+++ b/doc/next/6-stdlib/99-minor/go/ast/68021.md
@@ -0,0 +1,4 @@
+The new [ParseDirective] function parses [directive
+comments](/doc/comment#Syntax), which are comments such as `//go:generate`.
+Source code tools can support their own directive comments and this new API
+should help them implement the conventional syntax.
diff --git a/doc/next/6-stdlib/99-minor/net/url/31024.md b/doc/next/6-stdlib/99-minor/net/url/31024.md
new file mode 100644
index 00000000000..11ed31e87c5
--- /dev/null
+++ b/doc/next/6-stdlib/99-minor/net/url/31024.md
@@ -0,0 +1,4 @@
+[Parse] now rejects malformed URLs containing colons in the host subcomponent,
+such as `http://::1/` or `http://localhost:80:80/`.
+URLs containing bracketed IPv6 addresses, such as `http://[::1]/` are still accepted.
+The new GODEBUG=urlstrictcolons=0 setting restores the old behavior.
diff --git a/doc/next/6-stdlib/99-minor/testing/71287.md b/doc/next/6-stdlib/99-minor/testing/71287.md
new file mode 100644
index 00000000000..82cac638101
--- /dev/null
+++ b/doc/next/6-stdlib/99-minor/testing/71287.md
@@ -0,0 +1,18 @@
+The new methods [T.ArtifactDir], [B.ArtifactDir], and [F.ArtifactDir]
+return a directory in which to write test output files (artifacts).
+
+When the `-artifacts` flag is provided to `go test`,
+this directory will be located under the output directory
+(specified with `-outputdir`, or the current directory by default).
+Otherwise, artifacts are stored in a temporary directory
+which is removed after the test completes.
+
+The first call to `ArtifactDir` when `-artifacts` is provided
+writes the location of the directory to the test log.
+
+For example, in a test named `TestArtifacts`,
+`t.ArtifactDir()` emits:
+
+```
+=== ARTIFACTS Test /path/to/artifact/dir
+```
diff --git a/lib/hg/goreposum.py b/lib/hg/goreposum.py
new file mode 100644
index 00000000000..1a7d7a44466
--- /dev/null
+++ b/lib/hg/goreposum.py
@@ -0,0 +1,64 @@
+# Copyright 2025 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Mercurial extension to add a 'goreposum' command that
+# computes a hash of a remote repo's tag state.
+# Tag definitions can come from the .hgtags file stored in
+# any head of any branch, and the server protocol does not
+# expose the tags directly. However, the protocol does expose
+# the hashes of all the branch heads, so we can use a hash of
+# all those branch names and heads as a conservative snapshot
+# of the entire remote repo state, and use that as the tag sum.
+# Any change on the server then invalidates the tag sum,
+# even if it didn't have anything to do with tags, but at least
+# we will avoid re-cloning a server when there have been no
+# changes at all.
+#
+# Separately, this extension also adds a 'golookup' command that
+# returns the hash of a specific reference, like 'default' or a tag.
+# And golookup of a hash confirms that it still exists on the server.
+# We can use that to revalidate that specific versions still exist and
+# have the same meaning they did the last time we checked.
+#
+# Usage:
+#
+# hg --config "extensions.goreposum=$GOROOT/lib/hg/goreposum.py" goreposum REPOURL
+
+import base64, hashlib, sys
+from mercurial import registrar, ui, hg, node
+from mercurial.i18n import _
+cmdtable = {}
+command = registrar.command(cmdtable)
+@command(b'goreposum', [], _('url'), norepo=True)
+def goreposum(ui, url):
+ """
+ goreposum computes a checksum of all the named state in the remote repo.
+ It hashes together all the branch names and hashes
+ and then all the bookmark names and hashes.
+ Tags are stored in .hgtags files in any of the branches,
+ so the branch metadata includes the tags as well.
+ """
+ h = hashlib.sha256()
+ peer = hg.peer(ui, {}, url)
+ for name, revs in peer.branchmap().items():
+ h.update(name)
+ for r in revs:
+ h.update(b' ')
+ h.update(r)
+ h.update(b'\n')
+ if (b'bookmarks' in peer.listkeys(b'namespaces')):
+ for name, rev in peer.listkeys(b'bookmarks').items():
+ h.update(name)
+ h.update(b'=')
+ h.update(rev)
+ h.update(b'\n')
+ print('r1:'+base64.standard_b64encode(h.digest()).decode('utf-8'))
+
+@command(b'golookup', [], _('url rev'), norepo=True)
+def golookup(ui, url, rev):
+ """
+ golookup looks up a single identifier in the repo,
+ printing its hash.
+ """
+ print(node.hex(hg.peer(ui, {}, url).lookup(rev)).decode('utf-8'))
diff --git a/src/archive/tar/common.go b/src/archive/tar/common.go
index 7b3945ff153..ad31bbb64aa 100644
--- a/src/archive/tar/common.go
+++ b/src/archive/tar/common.go
@@ -39,6 +39,7 @@ var (
errMissData = errors.New("archive/tar: sparse file references non-existent data")
errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data")
errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole")
+ errSparseTooLong = errors.New("archive/tar: sparse map too long")
)
type headerError []string
diff --git a/src/archive/tar/reader.go b/src/archive/tar/reader.go
index 8483fb52a28..16ac2f5b17c 100644
--- a/src/archive/tar/reader.go
+++ b/src/archive/tar/reader.go
@@ -531,12 +531,17 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
cntNewline int64
buf bytes.Buffer
blk block
+ totalSize int
)
// feedTokens copies data in blocks from r into buf until there are
// at least cnt newlines in buf. It will not read more blocks than needed.
feedTokens := func(n int64) error {
for cntNewline < n {
+ totalSize += len(blk)
+ if totalSize > maxSpecialFileSize {
+ return errSparseTooLong
+ }
if _, err := mustReadFull(r, blk[:]); err != nil {
return err
}
@@ -569,8 +574,8 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
}
// Parse for all member entries.
- // numEntries is trusted after this since a potential attacker must have
- // committed resources proportional to what this library used.
+ // numEntries is trusted after this since feedTokens limits the number of
+ // tokens based on maxSpecialFileSize.
if err := feedTokens(2 * numEntries); err != nil {
return nil, err
}
diff --git a/src/archive/tar/reader_test.go b/src/archive/tar/reader_test.go
index 99340a30471..fca53dae741 100644
--- a/src/archive/tar/reader_test.go
+++ b/src/archive/tar/reader_test.go
@@ -621,6 +621,11 @@ func TestReader(t *testing.T) {
},
Format: FormatPAX,
}},
+ }, {
+ // Small compressed file that uncompresses to
+ // a file with a very large GNU 1.0 sparse map.
+ file: "testdata/gnu-sparse-many-zeros.tar.bz2",
+ err: errSparseTooLong,
}}
for _, v := range vectors {
diff --git a/src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2 b/src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2
new file mode 100644
index 00000000000..751d7fd4b68
Binary files /dev/null and b/src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2 differ
diff --git a/src/archive/zip/reader_test.go b/src/archive/zip/reader_test.go
index 410b2d037e4..cb8a0c28714 100644
--- a/src/archive/zip/reader_test.go
+++ b/src/archive/zip/reader_test.go
@@ -1213,7 +1213,6 @@ func TestFS(t *testing.T) {
[]string{"a/b/c"},
},
} {
- test := test
t.Run(test.file, func(t *testing.T) {
t.Parallel()
z, err := OpenReader(test.file)
@@ -1247,7 +1246,6 @@ func TestFSWalk(t *testing.T) {
wantErr: true,
},
} {
- test := test
t.Run(test.file, func(t *testing.T) {
t.Parallel()
z, err := OpenReader(test.file)
diff --git a/src/bytes/buffer.go b/src/bytes/buffer.go
index 9684513942d..3eb5b350c38 100644
--- a/src/bytes/buffer.go
+++ b/src/bytes/buffer.go
@@ -77,6 +77,18 @@ func (b *Buffer) String() string {
return string(b.buf[b.off:])
}
+// Peek returns the next n bytes without advancing the buffer.
+// If Peek returns fewer than n bytes, it also returns [io.EOF].
+// The slice is only valid until the next call to a read or write method.
+// The slice aliases the buffer content at least until the next buffer modification,
+// so immediate changes to the slice will affect the result of future reads.
+func (b *Buffer) Peek(n int) ([]byte, error) {
+ if b.Len() < n {
+ return b.buf[b.off:], io.EOF
+ }
+ return b.buf[b.off:n], nil
+}
+
// empty reports whether the unread portion of the buffer is empty.
func (b *Buffer) empty() bool { return len(b.buf) <= b.off }
diff --git a/src/bytes/buffer_test.go b/src/bytes/buffer_test.go
index b46ba1204eb..5f5cc483b03 100644
--- a/src/bytes/buffer_test.go
+++ b/src/bytes/buffer_test.go
@@ -531,6 +531,34 @@ func TestReadString(t *testing.T) {
}
}
+var peekTests = []struct {
+ buffer string
+ n int
+ expected string
+ err error
+}{
+ {"", 0, "", nil},
+ {"aaa", 3, "aaa", nil},
+ {"foobar", 2, "fo", nil},
+ {"a", 2, "a", io.EOF},
+}
+
+func TestPeek(t *testing.T) {
+ for _, test := range peekTests {
+ buf := NewBufferString(test.buffer)
+ bytes, err := buf.Peek(test.n)
+ if string(bytes) != test.expected {
+ t.Errorf("expected %q, got %q", test.expected, bytes)
+ }
+ if err != test.err {
+ t.Errorf("expected error %v, got %v", test.err, err)
+ }
+ if buf.Len() != len(test.buffer) {
+ t.Errorf("bad length after peek: %d, want %d", buf.Len(), len(test.buffer))
+ }
+ }
+}
+
func BenchmarkReadString(b *testing.B) {
const n = 32 << 10
diff --git a/src/bytes/bytes_test.go b/src/bytes/bytes_test.go
index f18915c879e..9547ede312f 100644
--- a/src/bytes/bytes_test.go
+++ b/src/bytes/bytes_test.go
@@ -1224,7 +1224,7 @@ func TestMap(t *testing.T) {
// Run a couple of awful growth/shrinkage tests
a := tenRunes('a')
- // 1. Grow. This triggers two reallocations in Map.
+ // 1. Grow. This triggers two reallocations in Map.
maxRune := func(r rune) rune { return unicode.MaxRune }
m := Map(maxRune, []byte(a))
expect := tenRunes(unicode.MaxRune)
diff --git a/src/cmd/asm/internal/arch/arch.go b/src/cmd/asm/internal/arch/arch.go
index 8481a8f378f..fb9e7851110 100644
--- a/src/cmd/asm/internal/arch/arch.go
+++ b/src/cmd/asm/internal/arch/arch.go
@@ -92,7 +92,8 @@ func jumpX86(word string) bool {
func jumpRISCV(word string) bool {
switch word {
case "BEQ", "BEQZ", "BGE", "BGEU", "BGEZ", "BGT", "BGTU", "BGTZ", "BLE", "BLEU", "BLEZ",
- "BLT", "BLTU", "BLTZ", "BNE", "BNEZ", "CALL", "JAL", "JALR", "JMP":
+ "BLT", "BLTU", "BLTZ", "BNE", "BNEZ", "CALL", "CBEQZ", "CBNEZ", "CJ", "CJALR", "CJR",
+ "JAL", "JALR", "JMP":
return true
}
return false
diff --git a/src/cmd/asm/internal/arch/arm64.go b/src/cmd/asm/internal/arch/arm64.go
index 87ccb8c0409..d562e5907d6 100644
--- a/src/cmd/asm/internal/arch/arm64.go
+++ b/src/cmd/asm/internal/arch/arm64.go
@@ -195,149 +195,6 @@ func ARM64RegisterShift(reg, op, count int16) (int64, error) {
return int64(reg&31)<<16 | int64(op)<<22 | int64(uint16(count)), nil
}
-// ARM64RegisterExtension constructs an ARM64 register with extension or arrangement.
-func ARM64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, isIndex bool) error {
- Rnum := (reg & 31) + int16(num<<5)
- if isAmount {
- if num < 0 || num > 7 {
- return errors.New("index shift amount is out of range")
- }
- }
- if reg <= arm64.REG_R31 && reg >= arm64.REG_R0 {
- if !isAmount {
- return errors.New("invalid register extension")
- }
- switch ext {
- case "UXTB":
- if a.Type == obj.TYPE_MEM {
- return errors.New("invalid shift for the register offset addressing mode")
- }
- a.Reg = arm64.REG_UXTB + Rnum
- case "UXTH":
- if a.Type == obj.TYPE_MEM {
- return errors.New("invalid shift for the register offset addressing mode")
- }
- a.Reg = arm64.REG_UXTH + Rnum
- case "UXTW":
- // effective address of memory is a base register value and an offset register value.
- if a.Type == obj.TYPE_MEM {
- a.Index = arm64.REG_UXTW + Rnum
- } else {
- a.Reg = arm64.REG_UXTW + Rnum
- }
- case "UXTX":
- if a.Type == obj.TYPE_MEM {
- return errors.New("invalid shift for the register offset addressing mode")
- }
- a.Reg = arm64.REG_UXTX + Rnum
- case "SXTB":
- if a.Type == obj.TYPE_MEM {
- return errors.New("invalid shift for the register offset addressing mode")
- }
- a.Reg = arm64.REG_SXTB + Rnum
- case "SXTH":
- if a.Type == obj.TYPE_MEM {
- return errors.New("invalid shift for the register offset addressing mode")
- }
- a.Reg = arm64.REG_SXTH + Rnum
- case "SXTW":
- if a.Type == obj.TYPE_MEM {
- a.Index = arm64.REG_SXTW + Rnum
- } else {
- a.Reg = arm64.REG_SXTW + Rnum
- }
- case "SXTX":
- if a.Type == obj.TYPE_MEM {
- a.Index = arm64.REG_SXTX + Rnum
- } else {
- a.Reg = arm64.REG_SXTX + Rnum
- }
- case "LSL":
- a.Index = arm64.REG_LSL + Rnum
- default:
- return errors.New("unsupported general register extension type: " + ext)
-
- }
- } else if reg <= arm64.REG_V31 && reg >= arm64.REG_V0 {
- switch ext {
- case "B8":
- if isIndex {
- return errors.New("invalid register extension")
- }
- a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8B & 15) << 5)
- case "B16":
- if isIndex {
- return errors.New("invalid register extension")
- }
- a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_16B & 15) << 5)
- case "H4":
- if isIndex {
- return errors.New("invalid register extension")
- }
- a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4H & 15) << 5)
- case "H8":
- if isIndex {
- return errors.New("invalid register extension")
- }
- a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8H & 15) << 5)
- case "S2":
- if isIndex {
- return errors.New("invalid register extension")
- }
- a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2S & 15) << 5)
- case "S4":
- if isIndex {
- return errors.New("invalid register extension")
- }
- a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4S & 15) << 5)
- case "D1":
- if isIndex {
- return errors.New("invalid register extension")
- }
- a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_1D & 15) << 5)
- case "D2":
- if isIndex {
- return errors.New("invalid register extension")
- }
- a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2D & 15) << 5)
- case "Q1":
- if isIndex {
- return errors.New("invalid register extension")
- }
- a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_1Q & 15) << 5)
- case "B":
- if !isIndex {
- return nil
- }
- a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_B & 15) << 5)
- a.Index = num
- case "H":
- if !isIndex {
- return nil
- }
- a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_H & 15) << 5)
- a.Index = num
- case "S":
- if !isIndex {
- return nil
- }
- a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_S & 15) << 5)
- a.Index = num
- case "D":
- if !isIndex {
- return nil
- }
- a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_D & 15) << 5)
- a.Index = num
- default:
- return errors.New("unsupported simd register extension type: " + ext)
- }
- } else {
- return errors.New("invalid register and extension combination")
- }
- return nil
-}
-
// ARM64RegisterArrangement constructs an ARM64 vector register arrangement.
func ARM64RegisterArrangement(reg int16, name, arng string) (int64, error) {
var curQ, curSize uint16
diff --git a/src/cmd/asm/internal/asm/asm.go b/src/cmd/asm/internal/asm/asm.go
index 389307af29e..0f75edf4e5d 100644
--- a/src/cmd/asm/internal/asm/asm.go
+++ b/src/cmd/asm/internal/asm/asm.go
@@ -248,7 +248,7 @@ func (p *Parser) asmData(operands [][]lex.Token) {
case obj.TYPE_CONST:
switch sz {
case 1, 2, 4, 8:
- nameAddr.Sym.WriteInt(p.ctxt, nameAddr.Offset, int(sz), valueAddr.Offset)
+ nameAddr.Sym.WriteInt(p.ctxt, nameAddr.Offset, sz, valueAddr.Offset)
default:
p.errorf("bad int size for DATA argument: %d", sz)
}
@@ -262,10 +262,10 @@ func (p *Parser) asmData(operands [][]lex.Token) {
p.errorf("bad float size for DATA argument: %d", sz)
}
case obj.TYPE_SCONST:
- nameAddr.Sym.WriteString(p.ctxt, nameAddr.Offset, int(sz), valueAddr.Val.(string))
+ nameAddr.Sym.WriteString(p.ctxt, nameAddr.Offset, sz, valueAddr.Val.(string))
case obj.TYPE_ADDR:
if sz == p.arch.PtrSize {
- nameAddr.Sym.WriteAddr(p.ctxt, nameAddr.Offset, int(sz), valueAddr.Sym, valueAddr.Offset)
+ nameAddr.Sym.WriteAddr(p.ctxt, nameAddr.Offset, sz, valueAddr.Sym, valueAddr.Offset)
} else {
p.errorf("bad addr size for DATA argument: %d", sz)
}
diff --git a/src/cmd/asm/internal/asm/endtoend_test.go b/src/cmd/asm/internal/asm/endtoend_test.go
index afaf02815f9..e53263356d1 100644
--- a/src/cmd/asm/internal/asm/endtoend_test.go
+++ b/src/cmd/asm/internal/asm/endtoend_test.go
@@ -38,7 +38,7 @@ func testEndToEnd(t *testing.T, goarch, file string) {
ctxt.IsAsm = true
defer ctxt.Bso.Flush()
failed := false
- ctxt.DiagFunc = func(format string, args ...interface{}) {
+ ctxt.DiagFunc = func(format string, args ...any) {
failed = true
t.Errorf(format, args...)
}
@@ -193,7 +193,7 @@ Diff:
top := pList.Firstpc
var text *obj.LSym
ok = true
- ctxt.DiagFunc = func(format string, args ...interface{}) {
+ ctxt.DiagFunc = func(format string, args ...any) {
t.Errorf(format, args...)
ok = false
}
@@ -294,7 +294,7 @@ func testErrors(t *testing.T, goarch, file string, flags ...string) {
failed := false
var errBuf bytes.Buffer
parser.errorWriter = &errBuf
- ctxt.DiagFunc = func(format string, args ...interface{}) {
+ ctxt.DiagFunc = func(format string, args ...any) {
failed = true
s := fmt.Sprintf(format, args...)
if !strings.HasSuffix(s, "\n") {
@@ -467,6 +467,7 @@ func TestLOONG64Encoder(t *testing.T) {
testEndToEnd(t, "loong64", "loong64enc3")
testEndToEnd(t, "loong64", "loong64enc4")
testEndToEnd(t, "loong64", "loong64enc5")
+ testEndToEnd(t, "loong64", "loong64enc6")
testEndToEnd(t, "loong64", "loong64")
}
diff --git a/src/cmd/asm/internal/asm/parse.go b/src/cmd/asm/internal/asm/parse.go
index 8f8f6dcc346..25d596f4d66 100644
--- a/src/cmd/asm/internal/asm/parse.go
+++ b/src/cmd/asm/internal/asm/parse.go
@@ -78,7 +78,7 @@ func NewParser(ctxt *obj.Link, ar *arch.Arch, lexer lex.TokenReader) *Parser {
// and turn it into a recoverable panic.
var panicOnError bool
-func (p *Parser) errorf(format string, args ...interface{}) {
+func (p *Parser) errorf(format string, args ...any) {
if panicOnError {
panic(fmt.Errorf(format, args...))
}
@@ -90,7 +90,7 @@ func (p *Parser) errorf(format string, args ...interface{}) {
if p.lex != nil {
// Put file and line information on head of message.
format = "%s:%d: " + format + "\n"
- args = append([]interface{}{p.lex.File(), p.lineNum}, args...)
+ args = append([]any{p.lex.File(), p.lineNum}, args...)
}
fmt.Fprintf(p.errorWriter, format, args...)
p.errorCount++
@@ -775,7 +775,7 @@ func (p *Parser) registerExtension(a *obj.Addr, name string, prefix rune) {
switch p.arch.Family {
case sys.ARM64:
- err := arch.ARM64RegisterExtension(a, ext, reg, num, isAmount, isIndex)
+ err := arm64.ARM64RegisterExtension(a, ext, reg, num, isAmount, isIndex)
if err != nil {
p.errorf("%v", err)
}
diff --git a/src/cmd/asm/internal/asm/testdata/arm64.s b/src/cmd/asm/internal/asm/testdata/arm64.s
index 236f1a66979..773380e9bb6 100644
--- a/src/cmd/asm/internal/asm/testdata/arm64.s
+++ b/src/cmd/asm/internal/asm/testdata/arm64.s
@@ -400,6 +400,8 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
MOVD $0x11110000, R1 // MOVD $286326784, R1 // 2122a2d2
MOVD $0xaaaa0000aaaa1111, R1 // MOVD $-6149102338357718767, R1 // 212282d24155b5f24155f5f2
MOVD $0x1111ffff1111aaaa, R1 // MOVD $1230045644216969898, R1 // a1aa8a922122a2f22122e2f2
+ MOVD $0xaaaaaaaaaaaaaaab, R1 // MOVD $-6148914691236517205, R1 // e1f301b2615595f2
+ MOVD $0x0ff019940ff00ff0, R1 // MOVD $1148446028692721648, R1 // e19f0cb28132c3f2
MOVD $0, R1 // e1031faa
MOVD $-1, R1 // 01008092
MOVD $0x210000, R0 // MOVD $2162688, R0 // 2004a0d2
@@ -630,6 +632,8 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
FMOVS F1, 0x44332211(R2) // FMOVS F1, 1144201745(R2)
FMOVD F1, 0x1007000(R2) // FMOVD F1, 16805888(R2)
FMOVD F1, 0x44332211(R2) // FMOVD F1, 1144201745(R2)
+ FMOVQ F1, 0x1003000(R2) // FMOVQ F1, 16789504(R2)
+ FMOVQ F1, 0x44332211(R2) // FMOVQ F1, 1144201745(R2)
MOVB 0x1000000(R1), R2 // MOVB 16777216(R1), R2
MOVB 0x44332211(R1), R2 // MOVB 1144201745(R1), R2
@@ -643,6 +647,8 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
FMOVS 0x44332211(R1), F2 // FMOVS 1144201745(R1), F2
FMOVD 0x1000000(R1), F2 // FMOVD 16777216(R1), F2
FMOVD 0x44332211(R1), F2 // FMOVD 1144201745(R1), F2
+ FMOVQ 0x1000000(R1), F2 // FMOVQ 16777216(R1), F2
+ FMOVQ 0x44332211(R1), F2 // FMOVQ 1144201745(R1), F2
// shifted or extended register offset.
MOVD (R2)(R6.SXTW), R4 // 44c866f8
@@ -1894,4 +1900,12 @@ next:
BTI J // 9f2403d5
BTI JC // df2403d5
+// Pointer Authentication Codes (PAC)
+ PACIASP // 3f2303d5
+ AUTIASP // bf2303d5
+ PACIBSP // 7f2303d5
+ AUTIBSP // ff2303d5
+ AUTIA1716 // 9f2103d5
+ AUTIB1716 // df2103d5
+
END
diff --git a/src/cmd/asm/internal/asm/testdata/arm64error.s b/src/cmd/asm/internal/asm/testdata/arm64error.s
index 55890ce3e63..ce88e3ca540 100644
--- a/src/cmd/asm/internal/asm/testdata/arm64error.s
+++ b/src/cmd/asm/internal/asm/testdata/arm64error.s
@@ -422,4 +422,10 @@ TEXT errors(SB),$0
SHA1H V1.B16, V2.B16 // ERROR "invalid operands"
BTI // ERROR "missing operand"
BTI PLDL1KEEP // ERROR "illegal argument"
+ PACIASP C // ERROR "illegal combination"
+ AUTIASP R2 // ERROR "illegal combination"
+ PACIBSP R0 // ERROR "illegal combination"
+ AUTIBSP C // ERROR "illegal combination"
+ AUTIA1716 $45 // ERROR "illegal combination"
+ AUTIB1716 R0 // ERROR "illegal combination"
RET
diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc1.s b/src/cmd/asm/internal/asm/testdata/loong64enc1.s
index fd86db7a4fc..c820a0a5a10 100644
--- a/src/cmd/asm/internal/asm/testdata/loong64enc1.s
+++ b/src/cmd/asm/internal/asm/testdata/loong64enc1.s
@@ -93,8 +93,8 @@ lable2:
MOVV R4, 1(R5) // a404c029
MOVB R4, 1(R5) // a4040029
MOVBU R4, 1(R5) // a4040029
- SC R4, 1(R5) // a4040021
- SCV R4, 1(R5) // a4040023
+ SC R4, 4096(R5) // a4001021
+ SCV R4, 4096(R5) // a4001023
MOVW y+8(FP), R4 // 64408028
MOVWU y+8(FP), R4 // 6440802a
MOVV y+8(FP), R4 // 6440c028
@@ -105,8 +105,8 @@ lable2:
MOVV 1(R5), R4 // a404c028
MOVB 1(R5), R4 // a4040028
MOVBU 1(R5), R4 // a404002a
- LL 1(R5), R4 // a4040020
- LLV 1(R5), R4 // a4040022
+ LL 4096(R5), R4 // a4001020
+ LLV 4096(R5), R4 // a4001022
MOVW $4(R4), R5 // 8510c002
MOVV $4(R4), R5 // 8510c002
MOVW $-1, R4 // 04fcff02
@@ -261,22 +261,18 @@ lable2:
MOVV R4, FCC0 // 80d81401
// LDPTR.{W/D} and STPTR.{W/D} instructions
- MOVWP R5, -32768(R4) // 85008025
MOVWP R5, 32764(R4) // 85fc7f25
MOVWP R5, 32(R4) // 85200025
MOVWP R5, 4(R4) // 85040025
MOVWP R5, (R4) // 85000025
- MOVVP R5, -32768(R4) // 85008027
MOVVP R5, 32764(R4) // 85fc7f27
MOVVP R5, 32(R4) // 85200027
MOVVP R5, 4(R4) // 85040027
MOVVP R5, (R4) // 85000027
- MOVWP -32768(R5), R4 // a4008024
MOVWP 32764(R5), R4 // a4fc7f24
MOVWP 32(R5), R4 // a4200024
MOVWP 4(R5), R4 // a4040024
MOVWP (R5), R4 // a4000024
- MOVVP -32768(R5), R4 // a4008026
MOVVP 32764(R5), R4 // a4fc7f26
MOVVP 32(R5), R4 // a4200026
MOVVP 4(R5), R4 // a4040026
@@ -537,12 +533,18 @@ lable2:
XVMOVQ X28.V[3], X8 // 88ef0377
XVMOVQ X27.V[0], X9 // 69e30377
- //Move vector element to vector.
+ // Move vector element to vector.
VMOVQ V1.B[3], V9.B16 // 298cf772
VMOVQ V2.H[2], V8.H8 // 48c8f772
VMOVQ V3.W[1], V7.W4 // 67e4f772
VMOVQ V4.V[0], V6.V2 // 86f0f772
+ // Move vector register to vector register.
+ VMOVQ V1, V9 // 29002d73
+ VMOVQ V2, V8 // 48002d73
+ XVMOVQ X3, X7 // 67002d77
+ XVMOVQ X4, X6 // 86002d77
+
// Load data from memory and broadcast to each element of a vector register: VMOVQ offset(Rj), .
VMOVQ (R4), V0.B16 // 80008030
VMOVQ 1(R4), V0.B16 // 80048030
@@ -841,6 +843,42 @@ lable2:
XVSUBWU $15, X1, X2 // 223c8d76
XVSUBVU $16, X1, X2 // 22c08d76
+ // [X]VSADD{B,H,W,V}, [X]VSSUB{B,H,W,V} instructions
+ VSADDB V1, V2, V3 // 43044670
+ VSADDH V1, V2, V3 // 43844670
+ VSADDW V1, V2, V3 // 43044770
+ VSADDV V1, V2, V3 // 43844770
+ VSSUBB V1, V2, V3 // 43044870
+ VSSUBH V1, V2, V3 // 43844870
+ VSSUBW V1, V2, V3 // 43044970
+ VSSUBV V1, V2, V3 // 43844970
+ XVSADDB X3, X2, X1 // 410c4674
+ XVSADDH X3, X2, X1 // 418c4674
+ XVSADDW X3, X2, X1 // 410c4774
+ XVSADDV X3, X2, X1 // 418c4774
+ XVSSUBB X3, X2, X1 // 410c4874
+ XVSSUBH X3, X2, X1 // 418c4874
+ XVSSUBW X3, X2, X1 // 410c4974
+ XVSSUBV X3, X2, X1 // 418c4974
+
+ // [X]VSADD{B,H,W,V}U, [X]VSSUB{B,H,W,V}U instructions
+ VSADDBU V1, V2, V3 // 43044a70
+ VSADDHU V1, V2, V3 // 43844a70
+ VSADDWU V1, V2, V3 // 43044b70
+ VSADDVU V1, V2, V3 // 43844b70
+ VSSUBBU V1, V2, V3 // 43044c70
+ VSSUBHU V1, V2, V3 // 43844c70
+ VSSUBWU V1, V2, V3 // 43044d70
+ VSSUBVU V1, V2, V3 // 43844d70
+ XVSADDBU X1, X2, X3 // 43044a74
+ XVSADDHU X1, X2, X3 // 43044b74
+ XVSADDWU X1, X2, X3 // 43044b74
+ XVSADDVU X1, X2, X3 // 43844b74
+ XVSSUBBU X1, X2, X3 // 43044c74
+ XVSSUBHU X1, X2, X3 // 43844c74
+ XVSSUBWU X1, X2, X3 // 43044d74
+ XVSSUBVU X1, X2, X3 // 43844d74
+
// [X]VILV{L/H}{B,H,W,V} instructions
VILVLB V1, V2, V3 // 43041a71
VILVLH V1, V2, V3 // 43841a71
@@ -1021,6 +1059,32 @@ lable2:
XVSHUF4IV $8, X1, X2 // 22209c77
XVSHUF4IV $15, X1, X2 // 223c9c77
+ // [X]VSHUF.{B/H/W/V} instructions
+ VSHUFH V1, V2, V3 // 43847a71
+ VSHUFW V1, V2, V3 // 43047b71
+ VSHUFV V1, V2, V3 // 43847b71
+ XVSHUFH X1, X2, X3 // 43847a75
+ XVSHUFW X1, X2, X3 // 43047b75
+ XVSHUFV X1, X2, X3 // 43847b75
+ VSHUFB V1, V2, V3, V4 // 6488500d
+ XVSHUFB X1, X2, X3, X4 // 6488600d
+
+ // VPERMIW, XVPERMI{W,V,Q} instructions
+ VPERMIW $0x1B, V1, V2 // VPERMIW $27, V1, V2 // 226ce473
+ XVPERMIW $0x2B, X1, X2 // XVPERMIW $43, X1, X2 // 22ace477
+ XVPERMIV $0x3B, X1, X2 // XVPERMIV $59, X1, X2 // 22ece877
+ XVPERMIQ $0x4B, X1, X2 // XVPERMIQ $75, X1, X2 // 222ced77
+
+ // A{,X}VEXTRINS.{B,H,W,V} instructions
+ VEXTRINSB $0x18, V1, V2 // VEXTRINSB $24, V1, V2 // 22608c73
+ VEXTRINSH $0x27, V1, V2 // VEXTRINSH $39, V1, V2 // 229c8873
+ VEXTRINSW $0x36, V1, V2 // VEXTRINSW $54, V1, V2 // 22d88473
+ VEXTRINSV $0x45, V1, V2 // VEXTRINSV $69, V1, V2 // 22148173
+ XVEXTRINSB $0x54, X1, X2 // XVEXTRINSB $84, X1, X2 // 22508d77
+ XVEXTRINSH $0x63, X1, X2 // XVEXTRINSH $99, X1, X2 // 228c8977
+ XVEXTRINSW $0x72, X1, X2 // XVEXTRINSW $114, X1, X2 // 22c88577
+ XVEXTRINSV $0x81, X1, X2 // XVEXTRINSV $129, X1, X2 // 22048277
+
// [X]VSETEQZ.V, [X]VSETNEZ.V
VSETEQV V1, FCC0 // 20989c72
VSETNEV V1, FCC0 // 209c9c72
diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc3.s b/src/cmd/asm/internal/asm/testdata/loong64enc3.s
index 2d83bd719a5..2dc6529dcb0 100644
--- a/src/cmd/asm/internal/asm/testdata/loong64enc3.s
+++ b/src/cmd/asm/internal/asm/testdata/loong64enc3.s
@@ -42,8 +42,10 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
MOVB R4, 4096(R5) // 3e000014de971000c4030029
MOVBU R4, 65536(R5) // 1e020014de971000c4030029
MOVBU R4, 4096(R5) // 3e000014de971000c4030029
- SC R4, 65536(R5) // 1e020014de971000c4030021
- SC R4, 4096(R5) // 3e000014de971000c4030021
+ SC R4, 65536(R5) // 1e040010de971000c4030021
+ SCV R4, 65536(R5) // 1e040010de971000c4030023
+ LL 65536(R5), R4 // 1e040010de971000c4030020
+ LLV 65536(R5), R4 // 1e040010de971000c4030022
MOVW y+65540(FP), R4 // 1e020014de8f1000c4338028
MOVWU y+65540(FP), R4 // 1e020014de8f1000c433802a
MOVV y+65540(FP), R4 // 1e020014de8f1000c433c028
@@ -122,6 +124,21 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
XOR $4097, R4 // 3e000014de07800384f81500
XOR $4097, R4, R5 // 3e000014de07800385f81500
+ MOVWP R5, -32768(R4) // 1efcff13de931000c5038025
+ MOVWP R5, 32768(R4) // 1e000010de931000c5038025
+ MOVWP R5, 65536(R4) // 1e040010de931000c5030025
+ MOVWP R5, 1048576(R4) // 1e400010de931000c5030025
+ MOVVP R5, -32768(R4) // 1efcff13de931000c5038027
+ MOVVP R5, 65536(R4) // 1e040010de931000c5030027
+ MOVVP R5, 1048576(R4) // 1e400010de931000c5030027
+ MOVWP -32768(R5), R4 // 1efcff13de971000c4038024
+ MOVWP 2229248(R5), R4 // 1e880010de971000c4030424
+ MOVWP -2145518592(R5), R4 // 1e740012de971000c403fc24
+ MOVVP -32768(R5), R4 // 1efcff13de971000c4038026
+ MOVVP 2229248(R5), R4 // 1e880010de971000c4030426
+ MOVVP -2145518592(R5), R4 // 1e740012de971000c403fc26
+
+
// MOVV C_DCON32_12S, r
MOVV $0x27312345fffff800, R4 // MOVV $2824077224892692480, R4 // 0400a002a468241684cc0903
MOVV $0xf7312345fffff800, R4 // MOVV $-634687288927848448, R4 // 0400a002a468241684cc3d03
diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc6.s b/src/cmd/asm/internal/asm/testdata/loong64enc6.s
new file mode 100644
index 00000000000..bd19ea76012
--- /dev/null
+++ b/src/cmd/asm/internal/asm/testdata/loong64enc6.s
@@ -0,0 +1,12 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "../../../../../runtime/textflag.h"
+
+TEXT asmtest(SB),DUPOK|NOSPLIT,$0
+ // MOVWP LOREG_64(Rx), Ry
+ MOVWP 81985529216486896(R4), R5 // 9e571315dec3b703feac6816de4b000384f8100085000025
+ MOVWP -81985529216486896(R4), R5 // 7ea8ec14de4388031e539717deb73f0384f8100085000025
+ MOVWP R4, 81985529216486896(R5) // 9e571315dec3b703feac6816de4b0003a5f81000a4000025
+ MOVWP R4, -81985529216486896(R5) // 7ea8ec14de4388031e539717deb73f03a5f81000a4000025
diff --git a/src/cmd/asm/internal/asm/testdata/loong64error.s b/src/cmd/asm/internal/asm/testdata/loong64error.s
index 2dcd34bf61c..1bc0ddea557 100644
--- a/src/cmd/asm/internal/asm/testdata/loong64error.s
+++ b/src/cmd/asm/internal/asm/testdata/loong64error.s
@@ -7,3 +7,8 @@ TEXT errors(SB),$0
XVSHUF4IV $16, X1, X2 // ERROR "operand out of range 0 to 15"
ADDV16 $1, R4, R5 // ERROR "the constant must be a multiple of 65536."
ADDV16 $65535, R4, R5 // ERROR "the constant must be a multiple of 65536."
+ SC R4, 1(R5) // ERROR "offset must be a multiple of 4."
+ SCV R4, 1(R5) // ERROR "offset must be a multiple of 4."
+ LL 1(R5), R4 // ERROR "offset must be a multiple of 4."
+ LLV 1(R5), R4 // ERROR "offset must be a multiple of 4."
+
diff --git a/src/cmd/asm/internal/asm/testdata/riscv64.s b/src/cmd/asm/internal/asm/testdata/riscv64.s
index 702b82223b3..4615119af00 100644
--- a/src/cmd/asm/internal/asm/testdata/riscv64.s
+++ b/src/cmd/asm/internal/asm/testdata/riscv64.s
@@ -372,6 +372,76 @@ start:
// 21.7: Double-Precision Floating-Point Classify Instruction
FCLASSD F0, X5 // d31200e2
+ //
+ // "C" Extension for Compressed Instructions, Version 2.0
+ //
+
+ // 26.3.1: Compressed Stack-Pointer-Based Loads and Stores
+ CLWSP 20(SP), X10 // 5245
+ CLDSP 24(SP), X10 // 6265
+ CFLDSP 32(SP), F10 // 0235
+ CSWSP X10, 20(SP) // 2aca
+ CSDSP X10, 24(SP) // 2aec
+ CFSDSP F10, 32(SP) // 2ab0
+
+ // 26.3.2: Compressed Register-Based Loads and Stores
+ CLW 20(X10), X11 // 4c49
+ CLD 24(X10), X11 // 0c6d
+ CFLD 32(X10), F11 // 0c31
+ CSW X11, 20(X10) // 4cc9
+ CSD X11, 24(X10) // 0ced
+ CFSD F11, 32(X10) // 0cb1
+
+ // 26.4: Compressed Control Transfer Instructions
+ CJ 1(PC) // 09a0
+ CJR X5 // 8282
+ CJALR X5 // 8292
+ CBEQZ X10, 1(PC) // 09c1
+ CBNEZ X10, 1(PC) // 09e1
+
+ // 26.5.1: Compressed Integer Constant-Generation Instructions
+ CLI $-32, X5 // 8152
+ CLI $31, X5 // fd42
+ CLUI $-32, X5 // 8172
+ CLUI $31, X5 // fd62
+
+ // 26.5.2: Compressed Integer Register-Immediate Operations
+ CADD $-32, X5 // 8112
+ CADD $31, X5 // fd02
+ CADDI $-32, X5 // 8112
+ CADDI $31, X5 // fd02
+ CADDW $-32, X5 // 8132
+ CADDW $31, X5 // fd22
+ CADDIW $-32, X5 // 8132
+ CADDIW $31, X5 // fd22
+ CADDI16SP $-512, SP // 0171
+ CADDI16SP $496, SP // 7d61
+ CADDI4SPN $4, SP, X10 // 4800
+ CADDI4SPN $1020, SP, X10 // e81f
+ CSLLI $63, X5 // fe12
+ CSRLI $63, X10 // 7d91
+ CSRAI $63, X10 // 7d95
+ CAND $-32, X10 // 0199
+ CAND $31, X10 // 7d89
+ CANDI $-32, X10 // 0199
+ CANDI $31, X10 // 7d89
+
+ // 26.5.3: Compressed Integer Register-Register Operations
+ CMV X6, X5 // 9a82
+ CADD X9, X8 // 2694
+ CAND X9, X8 // 658c
+ COR X9, X8 // 458c
+ CXOR X9, X8 // 258c
+ CSUB X9, X8 // 058c
+ CADDW X9, X8 // 259c
+ CSUBW X9, X8 // 059c
+
+ // 26.5.5: Compressed NOP Instruction
+ CNOP // 0100
+
+ // 26.5.6: Compressed Breakpoint Instruction
+ CEBREAK // 0290
+
// 28.4.1: Address Generation Instructions (Zba)
ADDUW X10, X11, X12 // 3b86a508
ADDUW X10, X11 // bb85a508
diff --git a/src/cmd/asm/internal/asm/testdata/riscv64validation.s b/src/cmd/asm/internal/asm/testdata/riscv64validation.s
index 65497659167..6a2e5f92dee 100644
--- a/src/cmd/asm/internal/asm/testdata/riscv64validation.s
+++ b/src/cmd/asm/internal/asm/testdata/riscv64validation.s
@@ -12,6 +12,147 @@ TEXT validation(SB),$0
SRLI $1, X5, F1 // ERROR "expected integer register in rd position but got non-integer register F1"
SRLI $1, F1, X5 // ERROR "expected integer register in rs1 position but got non-integer register F1"
+ WORD $-1 // ERROR "must be in range [0x0, 0xffffffff]"
+ WORD $0x100000000 // ERROR "must be in range [0x0, 0xffffffff]"
+
+ //
+ // "C" Extension for Compressed Instructions, Version 2.0
+ //
+ CLWSP 20(X5), X10 // ERROR "rs2 must be SP/X2"
+ CLWSP 20(SP), X0 // ERROR "cannot use register X0"
+ CLWSP 20(SP), F10 // ERROR "expected integer register in rd position"
+ CLWSP 22(SP), X10 // ERROR "must be a multiple of 4"
+ CLDSP 24(X5), X10 // ERROR "rs2 must be SP/X2"
+ CLDSP 24(SP), X0 // ERROR "cannot use register X0"
+ CLDSP 24(SP), F10 // ERROR "expected integer register in rd position"
+ CLDSP 28(SP), X10 // ERROR "must be a multiple of 8"
+ CFLDSP 32(X5), F10 // ERROR "rs2 must be SP/X2"
+ CFLDSP 32(SP), X10 // ERROR "expected float register in rd position"
+ CFLDSP 36(SP), F10 // ERROR "must be a multiple of 8"
+ CSWSP X10, 20(X5) // ERROR "rd must be SP/X2"
+ CSWSP F10, 20(SP) // ERROR "expected integer register in rs2 position"
+ CSWSP X10, 22(SP) // ERROR "must be a multiple of 4"
+ CSDSP X10, 24(X5) // ERROR "rd must be SP/X2"
+ CSDSP F10, 24(SP) // ERROR "expected integer register in rs2 position"
+ CSDSP X10, 28(SP) // ERROR "must be a multiple of 8"
+ CFSDSP F10, 32(X5) // ERROR "rd must be SP/X2"
+ CFSDSP X10, 32(SP) // ERROR "expected float register in rs2 position"
+ CFSDSP F10, 36(SP) // ERROR "must be a multiple of 8"
+ CLW 20(X10), F11 // ERROR "expected integer prime register in rd position"
+ CLW 20(X5), X11 // ERROR "expected integer prime register in rs1 position"
+ CLW 20(X10), X5 // ERROR "expected integer prime register in rd position"
+ CLW -1(X10), X11 // ERROR "must be in range [0, 127]"
+ CLW 22(X10), X11 // ERROR "must be a multiple of 4"
+ CLW 128(X10), X11 // ERROR "must be in range [0, 127]"
+ CLD 24(X10), F11 // ERROR "expected integer prime register in rd position"
+ CLD 24(X5), X11 // ERROR "expected integer prime register in rs1 position"
+ CLD -1(X10), X11 // ERROR "must be in range [0, 255]"
+ CLD 30(X10), X11 // ERROR "must be a multiple of 8"
+ CLD 256(X10), X11 // ERROR "must be in range [0, 255]"
+ CFLD 32(X10), X11 // ERROR "expected float prime register in rd position"
+ CFLD 32(X5), F11 // ERROR "expected integer prime register in rs1 position"
+ CFLD -1(X10), F11 // ERROR "must be in range [0, 255]"
+ CFLD 34(X10), F11 // ERROR "must be a multiple of 8"
+ CFLD 256(X10), F11 // ERROR "must be in range [0, 255]"
+ CSW F11, 20(X10) // ERROR "expected integer prime register in rs2 position"
+ CSW X11, -1(X10) // ERROR "must be in range [0, 127]"
+ CSW X11, 22(X10) // ERROR "must be a multiple of 4"
+ CSW X11, 128(X10) // ERROR "must be in range [0, 127]"
+ CSD F11, 24(X10) // ERROR "expected integer prime register in rs2 position"
+ CSD X11, -1(X10) // ERROR "must be in range [0, 255]"
+ CSD X11, 28(X10) // ERROR "must be a multiple of 8"
+ CSD X11, 256(X10) // ERROR "must be in range [0, 255]"
+ CFSD X11, 32(X10) // ERROR "expected float prime register in rs2 position"
+ CFSD F11, -1(X10) // ERROR "must be in range [0, 255]"
+ CFSD F11, 36(X10) // ERROR "must be a multiple of 8"
+ CFSD F11, 256(X10) // ERROR "must be in range [0, 255]"
+ CJR X0 // ERROR "cannot use register X0 in rs1"
+ CJR X10, X11 // ERROR "expected no register in rs2"
+ CJALR X0 // ERROR "cannot use register X0 in rs1"
+ CJALR X10, X11 // ERROR "expected no register in rd"
+ CBEQZ X5, 1(PC) // ERROR "expected integer prime register in rs1"
+ CBNEZ X5, 1(PC) // ERROR "expected integer prime register in rs1"
+ CLI $3, X0 // ERROR "cannot use register X0 in rd"
+ CLI $-33, X5 // ERROR "must be in range [-32, 31]"
+ CLI $32, X5 // ERROR "must be in range [-32, 31]"
+ CLUI $0, X5 // ERROR "immediate cannot be zero"
+ CLUI $3, X0 // ERROR "cannot use register X0 in rd"
+ CLUI $3, X2 // ERROR "cannot use register SP/X2 in rd"
+ CLUI $-33, X5 // ERROR "must be in range [-32, 31]"
+ CLUI $32, X5 // ERROR "must be in range [-32, 31]"
+ CADD $31, X5, X6 // ERROR "rd must be the same as rs1"
+ CADD $-33, X5 // ERROR "must be in range [-32, 31]"
+ CADD $32, X5 // ERROR "must be in range [-32, 31]"
+ CADDI $0, X5 // ERROR "immediate cannot be zero"
+ CADDI $31, X5, X6 // ERROR "rd must be the same as rs1"
+ CADDI $-33, X5 // ERROR "must be in range [-32, 31]"
+ CADDI $32, X5 // ERROR "must be in range [-32, 31]"
+ CADDW $-33, X5 // ERROR "must be in range [-32, 31]"
+ CADDW $32, X5 // ERROR "must be in range [-32, 31]"
+ CADDIW $-33, X5 // ERROR "must be in range [-32, 31]"
+ CADDIW $32, X5 // ERROR "must be in range [-32, 31]"
+ CADDI16SP $0, SP // ERROR "immediate cannot be zero"
+ CADDI16SP $16, X5 // ERROR "rd must be SP/X2"
+ CADDI16SP $-513, SP // ERROR "must be in range [-512, 511]"
+ CADDI16SP $20, SP // ERROR "must be a multiple of 16"
+ CADDI16SP $512, SP // ERROR "must be in range [-512, 511]"
+ CADDI4SPN $4, SP, X5 // ERROR "expected integer prime register in rd"
+ CADDI4SPN $4, X5, X10 // ERROR "SP/X2 must be in rs1"
+ CADDI4SPN $-1, SP, X10 // ERROR "must be in range [0, 1023]"
+ CADDI4SPN $0, SP, X10 // ERROR "immediate cannot be zero"
+ CADDI4SPN $6, SP, X10 // ERROR "must be a multiple of 4"
+ CADDI4SPN $1024, SP, X10 // ERROR "must be in range [0, 1023]"
+ CSLLI $63, X5, X6 // ERROR "rd must be the same as rs1"
+ CSLLI $-1, X5 // ERROR "must be in range [0, 63]"
+ CSLLI $0, X5 // ERROR "immediate cannot be zero"
+ CSLLI $64, X5 // ERROR "must be in range [0, 63]"
+ CSRLI $63, X10, X11 // ERROR "rd must be the same as rs1"
+ CSRLI $63, X5 // ERROR "expected integer prime register in rd"
+ CSRLI $-1, X10 // ERROR "must be in range [0, 63]"
+ CSRLI $0, X10 // ERROR "immediate cannot be zero"
+ CSRLI $64, X10 // ERROR "must be in range [0, 63]"
+ CSRAI $63, X10, X11 // ERROR "rd must be the same as rs1"
+ CSRAI $63, X5 // ERROR "expected integer prime register in rd"
+ CSRAI $-1, X10 // ERROR "must be in range [0, 63]"
+ CSRAI $0, X10 // ERROR "immediate cannot be zero"
+ CSRAI $64, X10 // ERROR "must be in range [0, 63]"
+ CAND $1, X10, X11 // ERROR "rd must be the same as rs1"
+ CAND $1, X5 // ERROR "expected integer prime register in rd"
+ CAND $-64, X10 // ERROR "must be in range [-32, 31]"
+ CAND $63, X10 // ERROR "must be in range [-32, 31]"
+ CANDI $1, X10, X11 // ERROR "rd must be the same as rs1"
+ CANDI $1, X5 // ERROR "expected integer prime register in rd"
+ CANDI $-64, X10 // ERROR "must be in range [-32, 31]"
+ CANDI $63, X10 // ERROR "must be in range [-32, 31]"
+ CMV X0, X5 // ERROR "cannot use register X0 in rs2"
+ CMV X5, X6, X7 // ERROR "expected no register in rs1"
+ CMV X5, X0 // ERROR "cannot use register X0 in rd"
+ CMV F1, X5 // ERROR "expected integer register in rs2"
+ CMV X5, F1 // ERROR "expected integer register in rd"
+ CADD X5, X6, X7 // ERROR "rd must be the same as rs1"
+ CADD X0, X8 // ERROR "cannot use register X0 in rs2"
+ CADD X8, X0 // ERROR "cannot use register X0 in rd"
+ CAND X10, X11, X12 // ERROR "rd must be the same as rs1"
+ CAND X5, X11 // ERROR "expected integer prime register in rs2"
+ CAND X10, X5 // ERROR "expected integer prime register in rd"
+ COR X10, X11, X12 // ERROR "rd must be the same as rs1"
+ COR X5, X11 // ERROR "expected integer prime register in rs2"
+ COR X10, X5 // ERROR "expected integer prime register in rd"
+ CXOR X10, X11, X12 // ERROR "rd must be the same as rs1"
+ CXOR X5, X11 // ERROR "expected integer prime register in rs2"
+ CXOR X10, X5 // ERROR "expected integer prime register in rd"
+ CSUB X10, X11, X12 // ERROR "rd must be the same as rs1"
+ CSUB X5, X11 // ERROR "expected integer prime register in rs2"
+ CSUB X10, X5 // ERROR "expected integer prime register in rd"
+ CADDW X10, X11, X12 // ERROR "rd must be the same as rs1"
+ CADDW X5, X11 // ERROR "expected integer prime register in rs2"
+ CADDW X10, X5 // ERROR "expected integer prime register in rd"
+ CSUBW X10, X11, X12 // ERROR "rd must be the same as rs1"
+ CSUBW X5, X11 // ERROR "expected integer prime register in rs2"
+ CSUBW X10, X5 // ERROR "expected integer prime register in rd"
+ CNOP X10 // ERROR "expected no register in rs2"
+ CEBREAK X10 // ERROR "expected no register in rs2"
+
//
// "V" Standard Extension for Vector Operations, Version 1.0
//
diff --git a/src/cmd/asm/internal/lex/input.go b/src/cmd/asm/internal/lex/input.go
index 789e229a779..342ac5ac483 100644
--- a/src/cmd/asm/internal/lex/input.go
+++ b/src/cmd/asm/internal/lex/input.go
@@ -68,7 +68,7 @@ func predefine(defines flags.MultiFlag) map[string]*Macro {
var panicOnError bool // For testing.
-func (in *Input) Error(args ...interface{}) {
+func (in *Input) Error(args ...any) {
if panicOnError {
panic(fmt.Errorf("%s:%d: %s", in.File(), in.Line(), fmt.Sprintln(args...)))
}
@@ -77,7 +77,7 @@ func (in *Input) Error(args ...interface{}) {
}
// expectText is like Error but adds "got XXX" where XXX is a quoted representation of the most recent token.
-func (in *Input) expectText(args ...interface{}) {
+func (in *Input) expectText(args ...any) {
in.Error(append(args, "; got", strconv.Quote(in.Stack.Text()))...)
}
diff --git a/src/cmd/asm/main.go b/src/cmd/asm/main.go
index 2a9ebe9b3e2..f2697db5169 100644
--- a/src/cmd/asm/main.go
+++ b/src/cmd/asm/main.go
@@ -58,7 +58,7 @@ func main() {
// nothing
case "index":
// known to compiler; ignore here so people can use
- // the same list with -gcflags=-spectre=LIST and -asmflags=-spectrre=LIST
+ // the same list with -gcflags=-spectre=LIST and -asmflags=-spectre=LIST
case "all", "ret":
ctxt.Retpoline = true
}
@@ -93,7 +93,7 @@ func main() {
for _, f := range flag.Args() {
lexer := lex.NewLexer(f)
parser := asm.NewParser(ctxt, architecture, lexer)
- ctxt.DiagFunc = func(format string, args ...interface{}) {
+ ctxt.DiagFunc = func(format string, args ...any) {
diag = true
log.Printf(format, args...)
}
diff --git a/src/cmd/cgo/ast.go b/src/cmd/cgo/ast.go
index 861479db7ac..2da6ca5a30f 100644
--- a/src/cmd/cgo/ast.go
+++ b/src/cmd/cgo/ast.go
@@ -199,7 +199,7 @@ func commentText(g *ast.CommentGroup) string {
return strings.Join(pieces, "")
}
-func (f *File) validateIdents(x interface{}, context astContext) {
+func (f *File) validateIdents(x any, context astContext) {
if x, ok := x.(*ast.Ident); ok {
if f.isMangledName(x.Name) {
error_(x.Pos(), "identifier %q may conflict with identifiers generated by cgo", x.Name)
@@ -208,7 +208,7 @@ func (f *File) validateIdents(x interface{}, context astContext) {
}
// Save various references we are going to need later.
-func (f *File) saveExprs(x interface{}, context astContext) {
+func (f *File) saveExprs(x any, context astContext) {
switch x := x.(type) {
case *ast.Expr:
switch (*x).(type) {
@@ -278,7 +278,7 @@ func (f *File) saveCall(call *ast.CallExpr, context astContext) {
}
// If a function should be exported add it to ExpFunc.
-func (f *File) saveExport(x interface{}, context astContext) {
+func (f *File) saveExport(x any, context astContext) {
n, ok := x.(*ast.FuncDecl)
if !ok {
return
@@ -318,7 +318,7 @@ func (f *File) saveExport(x interface{}, context astContext) {
}
// Make f.ExpFunc[i] point at the Func from this AST instead of the other one.
-func (f *File) saveExport2(x interface{}, context astContext) {
+func (f *File) saveExport2(x any, context astContext) {
n, ok := x.(*ast.FuncDecl)
if !ok {
return
@@ -355,7 +355,7 @@ const (
)
// walk walks the AST x, calling visit(f, x, context) for each node.
-func (f *File) walk(x interface{}, context astContext, visit func(*File, interface{}, astContext)) {
+func (f *File) walk(x any, context astContext, visit func(*File, any, astContext)) {
visit(f, x, context)
switch n := x.(type) {
case *ast.Expr:
@@ -363,7 +363,8 @@ func (f *File) walk(x interface{}, context astContext, visit func(*File, interfa
// everything else just recurs
default:
- f.walkUnexpected(x, context, visit)
+ error_(token.NoPos, "unexpected type %T in walk", x)
+ panic("unexpected type")
case nil:
@@ -396,6 +397,9 @@ func (f *File) walk(x interface{}, context astContext, visit func(*File, interfa
case *ast.IndexExpr:
f.walk(&n.X, ctxExpr, visit)
f.walk(&n.Index, ctxExpr, visit)
+ case *ast.IndexListExpr:
+ f.walk(&n.X, ctxExpr, visit)
+ f.walk(n.Indices, ctxExpr, visit)
case *ast.SliceExpr:
f.walk(&n.X, ctxExpr, visit)
if n.Low != nil {
@@ -434,8 +438,8 @@ func (f *File) walk(x interface{}, context astContext, visit func(*File, interfa
case *ast.StructType:
f.walk(n.Fields, ctxField, visit)
case *ast.FuncType:
- if tparams := funcTypeTypeParams(n); tparams != nil {
- f.walk(tparams, ctxParam, visit)
+ if n.TypeParams != nil {
+ f.walk(n.TypeParams, ctxParam, visit)
}
f.walk(n.Params, ctxParam, visit)
if n.Results != nil {
@@ -524,8 +528,8 @@ func (f *File) walk(x interface{}, context astContext, visit func(*File, interfa
f.walk(n.Values, ctxExpr, visit)
}
case *ast.TypeSpec:
- if tparams := typeSpecTypeParams(n); tparams != nil {
- f.walk(tparams, ctxParam, visit)
+ if n.TypeParams != nil {
+ f.walk(n.TypeParams, ctxParam, visit)
}
f.walk(&n.Type, ctxType, visit)
diff --git a/src/cmd/cgo/ast_go1.go b/src/cmd/cgo/ast_go1.go
deleted file mode 100644
index 2f65f0f7183..00000000000
--- a/src/cmd/cgo/ast_go1.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build compiler_bootstrap
-
-package main
-
-import (
- "go/ast"
- "go/token"
-)
-
-func (f *File) walkUnexpected(x interface{}, context astContext, visit func(*File, interface{}, astContext)) {
- error_(token.NoPos, "unexpected type %T in walk", x)
- panic("unexpected type")
-}
-
-func funcTypeTypeParams(n *ast.FuncType) *ast.FieldList {
- return nil
-}
-
-func typeSpecTypeParams(n *ast.TypeSpec) *ast.FieldList {
- return nil
-}
diff --git a/src/cmd/cgo/ast_go118.go b/src/cmd/cgo/ast_go118.go
deleted file mode 100644
index ced30728dc9..00000000000
--- a/src/cmd/cgo/ast_go118.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !compiler_bootstrap
-
-package main
-
-import (
- "go/ast"
- "go/token"
-)
-
-func (f *File) walkUnexpected(x interface{}, context astContext, visit func(*File, interface{}, astContext)) {
- switch n := x.(type) {
- default:
- error_(token.NoPos, "unexpected type %T in walk", x)
- panic("unexpected type")
-
- case *ast.IndexListExpr:
- f.walk(&n.X, ctxExpr, visit)
- f.walk(n.Indices, ctxExpr, visit)
- }
-}
-
-func funcTypeTypeParams(n *ast.FuncType) *ast.FieldList {
- return n.TypeParams
-}
-
-func typeSpecTypeParams(n *ast.TypeSpec) *ast.FieldList {
- return n.TypeParams
-}
diff --git a/src/cmd/cgo/doc.go b/src/cmd/cgo/doc.go
index ef5272299bb..7e8486874ef 100644
--- a/src/cmd/cgo/doc.go
+++ b/src/cmd/cgo/doc.go
@@ -127,7 +127,7 @@ environment variable when running the go tool: set it to 1 to enable
the use of cgo, and to 0 to disable it. The go tool will set the
build constraint "cgo" if cgo is enabled. The special import "C"
implies the "cgo" build constraint, as though the file also said
-"//go:build cgo". Therefore, if cgo is disabled, files that import
+"//go:build cgo". Therefore, if cgo is disabled, files that import
"C" will not be built by the go tool. (For more about build constraints
see https://golang.org/pkg/go/build/#hdr-Build_Constraints).
diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go
index 6c1695bdb0a..d3de3906b48 100644
--- a/src/cmd/cgo/gcc.go
+++ b/src/cmd/cgo/gcc.go
@@ -1056,7 +1056,7 @@ func (p *Package) rewriteCall(f *File, call *Call) (string, bool) {
func (p *Package) needsPointerCheck(f *File, t ast.Expr, arg ast.Expr) bool {
// An untyped nil does not need a pointer check, and when
// _cgoCheckPointer returns the untyped nil the type assertion we
- // are going to insert will fail. Easier to just skip nil arguments.
+ // are going to insert will fail. Easier to just skip nil arguments.
// TODO: Note that this fails if nil is shadowed.
if id, ok := arg.(*ast.Ident); ok && id.Name == "nil" {
return false
@@ -1158,7 +1158,7 @@ func (p *Package) hasPointer(f *File, t ast.Expr, top bool) bool {
// If addPosition is true, add position info to the idents of C names in arg.
func (p *Package) mangle(f *File, arg *ast.Expr, addPosition bool) (ast.Expr, bool) {
needsUnsafe := false
- f.walk(arg, ctxExpr, func(f *File, arg interface{}, context astContext) {
+ f.walk(arg, ctxExpr, func(f *File, arg any, context astContext) {
px, ok := arg.(*ast.Expr)
if !ok {
return
@@ -2154,7 +2154,7 @@ func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int6
for _, s := range f.Symbols {
switch {
case isDebugInts(s.Name):
- if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
+ if i := s.SectionNumber - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
@@ -2167,7 +2167,7 @@ func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int6
}
}
case isDebugFloats(s.Name):
- if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
+ if i := s.SectionNumber - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
@@ -2181,7 +2181,7 @@ func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int6
}
default:
if n := indexOfDebugStr(s.Name); n != -1 {
- if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
+ if i := s.SectionNumber - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
@@ -2193,7 +2193,7 @@ func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int6
break
}
if n := indexOfDebugStrlen(s.Name); n != -1 {
- if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
+ if i := s.SectionNumber - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
@@ -2439,7 +2439,7 @@ func (tr *TypeRepr) Empty() bool {
// Set modifies the type representation.
// If fargs are provided, repr is used as a format for fmt.Sprintf.
// Otherwise, repr is used unprocessed as the type representation.
-func (tr *TypeRepr) Set(repr string, fargs ...interface{}) {
+func (tr *TypeRepr) Set(repr string, fargs ...any) {
tr.Repr = repr
tr.FormatArgs = fargs
}
@@ -2713,7 +2713,7 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
// so execute the basic things that the struct case would do
// other than try to determine a Go representation.
tt := *t
- tt.C = &TypeRepr{"%s %s", []interface{}{dt.Kind, tag}}
+ tt.C = &TypeRepr{"%s %s", []any{dt.Kind, tag}}
// We don't know what the representation of this struct is, so don't let
// anyone allocate one on the Go side. As a side effect of this annotation,
// pointers to this type will not be considered pointers in Go. They won't
@@ -2743,7 +2743,7 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
t.Align = align
tt := *t
if tag != "" {
- tt.C = &TypeRepr{"struct %s", []interface{}{tag}}
+ tt.C = &TypeRepr{"struct %s", []any{tag}}
}
tt.Go = g
if c.incompleteStructs[tag] {
@@ -3010,7 +3010,7 @@ func (c *typeConv) FuncType(dtype *dwarf.FuncType, pos token.Pos) *FuncType {
for i, f := range dtype.ParamType {
// gcc's DWARF generator outputs a single DotDotDotType parameter for
// function pointers that specify no parameters (e.g. void
- // (*__cgo_0)()). Treat this special case as void. This case is
+ // (*__cgo_0)()). Treat this special case as void. This case is
// invalid according to ISO C anyway (i.e. void (*__cgo_1)(...) is not
// legal).
if _, ok := f.(*dwarf.DotDotDotType); ok && i == 0 {
@@ -3081,7 +3081,7 @@ func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.Struct
off := int64(0)
// Rename struct fields that happen to be named Go keywords into
- // _{keyword}. Create a map from C ident -> Go ident. The Go ident will
+ // _{keyword}. Create a map from C ident -> Go ident. The Go ident will
// be mangled. Any existing identifier that already has the same name on
// the C-side will cause the Go-mangled version to be prefixed with _.
// (e.g. in a struct with fields '_type' and 'type', the latter would be
@@ -3309,7 +3309,7 @@ func godefsFields(fld []*ast.Field) {
// fieldPrefix returns the prefix that should be removed from all the
// field names when generating the C or Go code. For generated
// C, we leave the names as is (tv_sec, tv_usec), since that's what
-// people are used to seeing in C. For generated Go code, such as
+// people are used to seeing in C. For generated Go code, such as
// package syscall's data structures, we drop a common prefix
// (so sec, usec, which will get turned into Sec, Usec for exporting).
func fieldPrefix(fld []*ast.Field) string {
@@ -3456,7 +3456,7 @@ func (c *typeConv) badCFType(dt *dwarf.TypedefType) bool {
// Tagged pointer support
// Low-bit set means tagged object, next 3 bits (currently)
// define the tagged object class, next 4 bits are for type
-// information for the specific tagged object class. Thus,
+// information for the specific tagged object class. Thus,
// the low byte is for type info, and the rest of a pointer
// (32 or 64-bit) is for payload, whatever the tagged class.
//
diff --git a/src/cmd/cgo/godefs.go b/src/cmd/cgo/godefs.go
index 9cf626c1732..93f90271571 100644
--- a/src/cmd/cgo/godefs.go
+++ b/src/cmd/cgo/godefs.go
@@ -117,7 +117,7 @@ func (p *Package) godefs(f *File, args []string) string {
var gofmtBuf strings.Builder
// gofmt returns the gofmt-formatted string for an AST node.
-func gofmt(n interface{}) string {
+func gofmt(n any) string {
gofmtBuf.Reset()
err := printer.Fprint(&gofmtBuf, fset, n)
if err != nil {
diff --git a/src/cmd/cgo/internal/swig/swig_test.go b/src/cmd/cgo/internal/swig/swig_test.go
index 603dab4917c..9d5ea0051ac 100644
--- a/src/cmd/cgo/internal/swig/swig_test.go
+++ b/src/cmd/cgo/internal/swig/swig_test.go
@@ -80,7 +80,7 @@ func mustHaveCxx(t *testing.T) {
if len(args) == 0 {
t.Skip("no C++ compiler")
}
- testenv.MustHaveExecPath(t, string(args[0]))
+ testenv.MustHaveExecPath(t, args[0])
}
var (
diff --git a/src/cmd/cgo/internal/test/buildid_linux.go b/src/cmd/cgo/internal/test/buildid_linux.go
index 84d3edb664e..7e0fd0fd126 100644
--- a/src/cmd/cgo/internal/test/buildid_linux.go
+++ b/src/cmd/cgo/internal/test/buildid_linux.go
@@ -4,9 +4,9 @@
package cgotest
-// Test that we have no more than one build ID. In the past we used
+// Test that we have no more than one build ID. In the past we used
// to generate a separate build ID for each package using cgo, and the
-// linker concatenated them all. We don't want that--we only want
+// linker concatenated them all. We don't want that--we only want
// one.
import (
@@ -42,7 +42,7 @@ sections:
for len(d) > 0 {
// ELF standards differ as to the sizes in
- // note sections. Both the GNU linker and
+ // note sections. Both the GNU linker and
// gold always generate 32-bit sizes, so that
// is what we assume here.
diff --git a/src/cmd/cgo/internal/test/callback.go b/src/cmd/cgo/internal/test/callback.go
index 478bf8294af..8f8dd8fded6 100644
--- a/src/cmd/cgo/internal/test/callback.go
+++ b/src/cmd/cgo/internal/test/callback.go
@@ -40,7 +40,7 @@ func nestedCall(f func()) {
callbackMutex.Unlock()
// Pass the address of i because the C function was written to
- // take a pointer. We could pass an int if we felt like
+ // take a pointer. We could pass an int if we felt like
// rewriting the C code.
C.callback(unsafe.Pointer(&i))
diff --git a/src/cmd/cgo/internal/test/cgo_darwin_test.go b/src/cmd/cgo/internal/test/cgo_darwin_test.go
new file mode 100644
index 00000000000..5d9d1640021
--- /dev/null
+++ b/src/cmd/cgo/internal/test/cgo_darwin_test.go
@@ -0,0 +1,11 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cgo && darwin
+
+package cgotest
+
+import "testing"
+
+func TestIssue76023(t *testing.T) { issue76023(t) }
diff --git a/src/cmd/cgo/internal/test/gcc68255/a.go b/src/cmd/cgo/internal/test/gcc68255/a.go
index e106dee3ec0..cc4804b90bd 100644
--- a/src/cmd/cgo/internal/test/gcc68255/a.go
+++ b/src/cmd/cgo/internal/test/gcc68255/a.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Test that it's OK to have C code that does nothing other than
-// initialize a global variable. This used to fail with gccgo.
+// initialize a global variable. This used to fail with gccgo.
package gcc68255
diff --git a/src/cmd/cgo/internal/test/issue76023.go b/src/cmd/cgo/internal/test/issue76023.go
new file mode 100644
index 00000000000..7fe8ae53f7e
--- /dev/null
+++ b/src/cmd/cgo/internal/test/issue76023.go
@@ -0,0 +1,27 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin
+
+package cgotest
+
+/*
+#cgo LDFLAGS: -Wl,-undefined,dynamic_lookup
+
+extern void __gotest_cgo_null_api(void) __attribute__((weak_import));
+
+int issue76023(void) {
+ if (__gotest_cgo_null_api) return 1;
+ return 0;
+}
+*/
+import "C"
+import "testing"
+
+func issue76023(t *testing.T) {
+ r := C.issue76023()
+ if r != 0 {
+ t.Error("found __gotest_cgo_null_api")
+ }
+}
diff --git a/src/cmd/cgo/internal/testcarchive/carchive_test.go b/src/cmd/cgo/internal/testcarchive/carchive_test.go
index c0ad79f2314..7c6e2b1d848 100644
--- a/src/cmd/cgo/internal/testcarchive/carchive_test.go
+++ b/src/cmd/cgo/internal/testcarchive/carchive_test.go
@@ -102,14 +102,14 @@ func testMain(m *testing.M) int {
bin = cmdToRun("./testp")
ccOut := goEnv("CC")
- cc = []string{string(ccOut)}
+ cc = []string{ccOut}
out := goEnv("GOGCCFLAGS")
quote := '\000'
start := 0
lastSpace := true
backslash := false
- s := string(out)
+ s := out
for i, c := range s {
if quote == '\000' && unicode.IsSpace(c) {
if !lastSpace {
diff --git a/src/cmd/cgo/internal/testcshared/cshared_test.go b/src/cmd/cgo/internal/testcshared/cshared_test.go
index 2ce705adba4..c01f5cf2cfb 100644
--- a/src/cmd/cgo/internal/testcshared/cshared_test.go
+++ b/src/cmd/cgo/internal/testcshared/cshared_test.go
@@ -76,7 +76,7 @@ func testMain(m *testing.M) int {
start := 0
lastSpace := true
backslash := false
- s := string(out)
+ s := out
for i, c := range s {
if quote == '\000' && unicode.IsSpace(c) {
if !lastSpace {
diff --git a/src/cmd/cgo/internal/testerrors/badsym_test.go b/src/cmd/cgo/internal/testerrors/badsym_test.go
index 4fd5c44505a..756ffdb1fc7 100644
--- a/src/cmd/cgo/internal/testerrors/badsym_test.go
+++ b/src/cmd/cgo/internal/testerrors/badsym_test.go
@@ -186,7 +186,7 @@ func cCompilerCmd(t *testing.T) []string {
start := 0
lastSpace := true
backslash := false
- s := string(out)
+ s := out
for i, c := range s {
if quote == '\000' && unicode.IsSpace(c) {
if !lastSpace {
diff --git a/src/cmd/cgo/internal/testerrors/ptr_test.go b/src/cmd/cgo/internal/testerrors/ptr_test.go
index beba0d26ac1..bc1cc1c6e08 100644
--- a/src/cmd/cgo/internal/testerrors/ptr_test.go
+++ b/src/cmd/cgo/internal/testerrors/ptr_test.go
@@ -14,6 +14,7 @@ import (
"os"
"os/exec"
"path/filepath"
+ "regexp"
"slices"
"strings"
"sync/atomic"
@@ -24,15 +25,16 @@ var tmp = flag.String("tmp", "", "use `dir` for temporary files and do not clean
// ptrTest is the tests without the boilerplate.
type ptrTest struct {
- name string // for reporting
- c string // the cgo comment
- c1 string // cgo comment forced into non-export cgo file
- imports []string // a list of imports
- support string // supporting functions
- body string // the body of the main function
- extra []extra // extra files
- fail bool // whether the test should fail
- expensive bool // whether the test requires the expensive check
+ name string // for reporting
+ c string // the cgo comment
+ c1 string // cgo comment forced into non-export cgo file
+ imports []string // a list of imports
+ support string // supporting functions
+ body string // the body of the main function
+ extra []extra // extra files
+ fail bool // whether the test should fail
+ expensive bool // whether the test requires the expensive check
+ errTextRegexp string // error text regexp; if empty, use the pattern `.*unpinned Go.*`
}
type extra struct {
@@ -489,6 +491,27 @@ var ptrTests = []ptrTest{
body: `i := 0; a := &[2]unsafe.Pointer{nil, unsafe.Pointer(&i)}; C.f45(&a[0])`,
fail: true,
},
+ {
+ // Passing a Go map as argument to C.
+ name: "argmap",
+ c: `void f46(void* p) {}`,
+ imports: []string{"unsafe"},
+ body: `m := map[int]int{0: 1,}; C.f46(unsafe.Pointer(&m))`,
+ fail: true,
+ errTextRegexp: `.*argument of cgo function has Go pointer to unpinned Go map`,
+ },
+ {
+ // Returning a Go map to C.
+ name: "retmap",
+ c: `extern void f47();`,
+ support: `//export GoMap47
+ func GoMap47() map[int]int { return map[int]int{0: 1,} }`,
+ body: `C.f47()`,
+ c1: `extern void* GoMap47();
+ void f47() { GoMap47(); }`,
+ fail: true,
+ errTextRegexp: `.*result of Go function GoMap47 called from cgo is unpinned Go map or points to unpinned Go map.*`,
+ },
}
func TestPointerChecks(t *testing.T) {
@@ -519,7 +542,6 @@ func TestPointerChecks(t *testing.T) {
// after testOne finishes.
var pending int32
for _, pt := range ptrTests {
- pt := pt
t.Run(pt.name, func(t *testing.T) {
atomic.AddInt32(&pending, +1)
defer func() {
@@ -690,11 +712,17 @@ func testOne(t *testing.T, pt ptrTest, exe, exe2 string) {
}
buf, err := runcmd(cgocheck)
+
+ var pattern string = pt.errTextRegexp
+ if pt.errTextRegexp == "" {
+ pattern = `.*unpinned Go.*`
+ }
+
if pt.fail {
if err == nil {
t.Logf("%s", buf)
t.Fatalf("did not fail as expected")
- } else if !bytes.Contains(buf, []byte("Go pointer")) {
+ } else if ok, _ := regexp.Match(pattern, buf); !ok {
t.Logf("%s", buf)
t.Fatalf("did not print expected error (failed with %v)", err)
}
diff --git a/src/cmd/cgo/internal/testout/out_test.go b/src/cmd/cgo/internal/testout/out_test.go
index 81dfa365871..e8ea5092a35 100644
--- a/src/cmd/cgo/internal/testout/out_test.go
+++ b/src/cmd/cgo/internal/testout/out_test.go
@@ -8,8 +8,8 @@ import (
"bufio"
"bytes"
"fmt"
- "internal/testenv"
"internal/goarch"
+ "internal/testenv"
"os"
"path/filepath"
"regexp"
diff --git a/src/cmd/cgo/internal/testplugin/plugin_test.go b/src/cmd/cgo/internal/testplugin/plugin_test.go
index 2afb542ec4f..3216073edbc 100644
--- a/src/cmd/cgo/internal/testplugin/plugin_test.go
+++ b/src/cmd/cgo/internal/testplugin/plugin_test.go
@@ -37,7 +37,7 @@ func TestMain(m *testing.M) {
var tmpDir string
// prettyPrintf prints lines with tmpDir sanitized.
-func prettyPrintf(format string, args ...interface{}) {
+func prettyPrintf(format string, args ...any) {
s := fmt.Sprintf(format, args...)
if tmpDir != "" {
s = strings.ReplaceAll(s, tmpDir, "$TMPDIR")
diff --git a/src/cmd/cgo/internal/teststdio/testdata/fib.go b/src/cmd/cgo/internal/teststdio/testdata/fib.go
index 96173683353..69147880c20 100644
--- a/src/cmd/cgo/internal/teststdio/testdata/fib.go
+++ b/src/cmd/cgo/internal/teststdio/testdata/fib.go
@@ -5,7 +5,7 @@
//go:build test_run
// Compute Fibonacci numbers with two goroutines
-// that pass integers back and forth. No actual
+// that pass integers back and forth. No actual
// concurrency, just threads and synchronization
// and foreign code on multiple pthreads.
diff --git a/src/cmd/cgo/main.go b/src/cmd/cgo/main.go
index 5e08427daf9..ba8e52a6e02 100644
--- a/src/cmd/cgo/main.go
+++ b/src/cmd/cgo/main.go
@@ -72,8 +72,8 @@ type File struct {
ExpFunc []*ExpFunc // exported functions for this file
Name map[string]*Name // map from Go name to Name
NamePos map[*Name]token.Pos // map from Name to position of the first reference
- NoCallbacks map[string]bool // C function names that with #cgo nocallback directive
- NoEscapes map[string]bool // C function names that with #cgo noescape directive
+ NoCallbacks map[string]bool // C function names with #cgo nocallback directive
+ NoEscapes map[string]bool // C function names with #cgo noescape directive
Edit *edit.Buffer
debugs []*debug // debug data from iterations of gccDebug. Initialized by File.loadDebug.
@@ -148,7 +148,7 @@ type ExpFunc struct {
// A TypeRepr contains the string representation of a type.
type TypeRepr struct {
Repr string
- FormatArgs []interface{}
+ FormatArgs []any
}
// A Type collects information about a type in both the C and Go worlds.
diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go
index a2bcdf89c5a..701a8530ffc 100644
--- a/src/cmd/cgo/out.go
+++ b/src/cmd/cgo/out.go
@@ -649,13 +649,15 @@ func (p *Package) writeDefsFunc(fgo2 io.Writer, n *Name, callsMalloc *bool) {
if p.noEscapes[n.C] && p.noCallbacks[n.C] {
touchFunc = "_Cgo_keepalive"
}
- fmt.Fprintf(fgo2, "\tif _Cgo_always_false {\n")
- if d.Type.Params != nil {
+
+ if len(paramnames) > 0 {
+ fmt.Fprintf(fgo2, "\tif _Cgo_always_false {\n")
for _, name := range paramnames {
fmt.Fprintf(fgo2, "\t\t%s(%s)\n", touchFunc, name)
}
+ fmt.Fprintf(fgo2, "\t}\n")
}
- fmt.Fprintf(fgo2, "\t}\n")
+
fmt.Fprintf(fgo2, "\treturn\n")
fmt.Fprintf(fgo2, "}\n")
}
@@ -951,7 +953,7 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
npad := 0
// the align is at least 1 (for char)
maxAlign := int64(1)
- argField := func(typ ast.Expr, namePat string, args ...interface{}) {
+ argField := func(typ ast.Expr, namePat string, args ...any) {
name := fmt.Sprintf(namePat, args...)
t := p.cgoType(typ)
if off%t.Align != 0 {
@@ -1144,6 +1146,10 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
if !p.hasPointer(nil, atype, false) {
return
}
+
+ // Use the export'ed file/line in error messages.
+ pos := fset.Position(exp.Func.Pos())
+ fmt.Fprintf(fgo2, "//line %s:%d\n", pos.Filename, pos.Line)
fmt.Fprintf(fgo2, "\t_cgoCheckResult(a.r%d)\n", i)
})
}
@@ -1406,7 +1412,7 @@ func forFieldList(fl *ast.FieldList, fn func(int, string, ast.Expr)) {
}
}
-func c(repr string, args ...interface{}) *TypeRepr {
+func c(repr string, args ...any) *TypeRepr {
return &TypeRepr{repr, args}
}
diff --git a/src/cmd/cgo/util.go b/src/cmd/cgo/util.go
index 23b4a414db7..e83634ffb22 100644
--- a/src/cmd/cgo/util.go
+++ b/src/cmd/cgo/util.go
@@ -75,7 +75,7 @@ func lineno(pos token.Pos) string {
}
// Die with an error message.
-func fatalf(msg string, args ...interface{}) {
+func fatalf(msg string, args ...any) {
// If we've already printed other errors, they might have
// caused the fatal condition. Assume they're enough.
if nerrors == 0 {
@@ -86,7 +86,7 @@ func fatalf(msg string, args ...interface{}) {
var nerrors int
-func error_(pos token.Pos, msg string, args ...interface{}) {
+func error_(pos token.Pos, msg string, args ...any) {
nerrors++
if pos.IsValid() {
fmt.Fprintf(os.Stderr, "%s: ", fset.Position(pos).String())
diff --git a/src/cmd/compile/README.md b/src/cmd/compile/README.md
index 1089348030d..02429d56886 100644
--- a/src/cmd/compile/README.md
+++ b/src/cmd/compile/README.md
@@ -289,9 +289,9 @@ dependencies, so is not suitable for distributed build systems.)
```
After that, your edit/compile/test cycle can be similar to:
```
- <... make edits to cmd/compile source ...>
+ [... make edits to cmd/compile source ...]
$ toolstash restore && go install cmd/compile # restore known good tools to build compiler
- <... 'go build', 'go test', etc. ...> # use freshly built compiler
+ [... 'go build', 'go test', etc. ...] # use freshly built compiler
```
* toolstash also allows comparing the installed vs. stashed copy of
diff --git a/src/cmd/compile/internal/abt/avlint32.go b/src/cmd/compile/internal/abt/avlint32.go
index ddfca346a2f..e41a6c0ca40 100644
--- a/src/cmd/compile/internal/abt/avlint32.go
+++ b/src/cmd/compile/internal/abt/avlint32.go
@@ -28,7 +28,7 @@ type T struct {
type node32 struct {
// Standard conventions hold for left = smaller, right = larger
left, right *node32
- data interface{}
+ data any
key int32
height_ int8
}
@@ -49,21 +49,21 @@ func (t *T) IsSingle() bool {
// VisitInOrder applies f to the key and data pairs in t,
// with keys ordered from smallest to largest.
-func (t *T) VisitInOrder(f func(int32, interface{})) {
+func (t *T) VisitInOrder(f func(int32, any)) {
if t.root == nil {
return
}
t.root.visitInOrder(f)
}
-func (n *node32) nilOrData() interface{} {
+func (n *node32) nilOrData() any {
if n == nil {
return nil
}
return n.data
}
-func (n *node32) nilOrKeyAndData() (k int32, d interface{}) {
+func (n *node32) nilOrKeyAndData() (k int32, d any) {
if n == nil {
k = NOT_KEY32
d = nil
@@ -83,7 +83,7 @@ func (n *node32) height() int8 {
// Find returns the data associated with x in the tree, or
// nil if x is not in the tree.
-func (t *T) Find(x int32) interface{} {
+func (t *T) Find(x int32) any {
return t.root.find(x).nilOrData()
}
@@ -92,7 +92,7 @@ func (t *T) Find(x int32) interface{} {
// x was already a key in the tree. The previous data associated
// with x is returned, and is nil if x was not previously a
// key in the tree.
-func (t *T) Insert(x int32, data interface{}) interface{} {
+func (t *T) Insert(x int32, data any) any {
if x == NOT_KEY32 {
panic("Cannot use sentinel value -0x80000000 as key")
}
@@ -105,7 +105,7 @@ func (t *T) Insert(x int32, data interface{}) interface{} {
} else {
newroot, n, o = n.aInsert(x)
}
- var r interface{}
+ var r any
if o != nil {
r = o.data
} else {
@@ -121,7 +121,7 @@ func (t *T) Copy() *T {
return &u
}
-func (t *T) Delete(x int32) interface{} {
+func (t *T) Delete(x int32) any {
n := t.root
if n == nil {
return nil
@@ -135,7 +135,7 @@ func (t *T) Delete(x int32) interface{} {
return d.data
}
-func (t *T) DeleteMin() (int32, interface{}) {
+func (t *T) DeleteMin() (int32, any) {
n := t.root
if n == nil {
return NOT_KEY32, nil
@@ -149,7 +149,7 @@ func (t *T) DeleteMin() (int32, interface{}) {
return d.key, d.data
}
-func (t *T) DeleteMax() (int32, interface{}) {
+func (t *T) DeleteMax() (int32, any) {
n := t.root
if n == nil {
return NOT_KEY32, nil
@@ -172,7 +172,7 @@ func (t *T) Size() int {
// not be symmetric. If f returns nil, then the key and data are not
// added to the result. If f itself is nil, then whatever value was
// already present in the smaller set is used.
-func (t *T) Intersection(u *T, f func(x, y interface{}) interface{}) *T {
+func (t *T) Intersection(u *T, f func(x, y any) any) *T {
if t.Size() == 0 || u.Size() == 0 {
return &T{}
}
@@ -227,7 +227,7 @@ func (t *T) Intersection(u *T, f func(x, y interface{}) interface{}) *T {
// is given by f(t's data, u's data) -- f need not be symmetric. If f returns nil,
// then the key and data are not added to the result. If f itself is nil, then
// whatever value was already present in the larger set is used.
-func (t *T) Union(u *T, f func(x, y interface{}) interface{}) *T {
+func (t *T) Union(u *T, f func(x, y any) any) *T {
if t.Size() == 0 {
return u
}
@@ -284,7 +284,7 @@ func (t *T) Union(u *T, f func(x, y interface{}) interface{}) *T {
// of f applied to data corresponding to equal keys. If f returns nil
// (or if f is nil) then the key+data are excluded, as usual. If f
// returns not-nil, then that key+data pair is inserted. instead.
-func (t *T) Difference(u *T, f func(x, y interface{}) interface{}) *T {
+func (t *T) Difference(u *T, f func(x, y any) any) *T {
if t.Size() == 0 {
return &T{}
}
@@ -365,7 +365,7 @@ func (t *node32) equals(u *node32) bool {
return it.done() == iu.done()
}
-func (t *T) Equiv(u *T, eqv func(x, y interface{}) bool) bool {
+func (t *T) Equiv(u *T, eqv func(x, y any) bool) bool {
if t == u {
return true
}
@@ -375,7 +375,7 @@ func (t *T) Equiv(u *T, eqv func(x, y interface{}) bool) bool {
return t.root.equiv(u.root, eqv)
}
-func (t *node32) equiv(u *node32, eqv func(x, y interface{}) bool) bool {
+func (t *node32) equiv(u *node32, eqv func(x, y any) bool) bool {
if t == u {
return true
}
@@ -404,7 +404,7 @@ type Iterator struct {
it iterator
}
-func (it *Iterator) Next() (int32, interface{}) {
+func (it *Iterator) Next() (int32, any) {
x := it.it.next()
if x == nil {
return NOT_KEY32, nil
@@ -461,37 +461,37 @@ func (it *iterator) next() *node32 {
// Min returns the minimum element of t.
// If t is empty, then (NOT_KEY32, nil) is returned.
-func (t *T) Min() (k int32, d interface{}) {
+func (t *T) Min() (k int32, d any) {
return t.root.min().nilOrKeyAndData()
}
// Max returns the maximum element of t.
// If t is empty, then (NOT_KEY32, nil) is returned.
-func (t *T) Max() (k int32, d interface{}) {
+func (t *T) Max() (k int32, d any) {
return t.root.max().nilOrKeyAndData()
}
// Glb returns the greatest-lower-bound-exclusive of x and the associated
// data. If x has no glb in the tree, then (NOT_KEY32, nil) is returned.
-func (t *T) Glb(x int32) (k int32, d interface{}) {
+func (t *T) Glb(x int32) (k int32, d any) {
return t.root.glb(x, false).nilOrKeyAndData()
}
// GlbEq returns the greatest-lower-bound-inclusive of x and the associated
// data. If x has no glbEQ in the tree, then (NOT_KEY32, nil) is returned.
-func (t *T) GlbEq(x int32) (k int32, d interface{}) {
+func (t *T) GlbEq(x int32) (k int32, d any) {
return t.root.glb(x, true).nilOrKeyAndData()
}
// Lub returns the least-upper-bound-exclusive of x and the associated
// data. If x has no lub in the tree, then (NOT_KEY32, nil) is returned.
-func (t *T) Lub(x int32) (k int32, d interface{}) {
+func (t *T) Lub(x int32) (k int32, d any) {
return t.root.lub(x, false).nilOrKeyAndData()
}
// LubEq returns the least-upper-bound-inclusive of x and the associated
// data. If x has no lubEq in the tree, then (NOT_KEY32, nil) is returned.
-func (t *T) LubEq(x int32) (k int32, d interface{}) {
+func (t *T) LubEq(x int32) (k int32, d any) {
return t.root.lub(x, true).nilOrKeyAndData()
}
@@ -499,7 +499,7 @@ func (t *node32) isLeaf() bool {
return t.left == nil && t.right == nil && t.height_ == LEAF_HEIGHT
}
-func (t *node32) visitInOrder(f func(int32, interface{})) {
+func (t *node32) visitInOrder(f func(int32, any)) {
if t.left != nil {
t.left.visitInOrder(f)
}
diff --git a/src/cmd/compile/internal/abt/avlint32_test.go b/src/cmd/compile/internal/abt/avlint32_test.go
index 7fa9ed4fd68..71962445f2b 100644
--- a/src/cmd/compile/internal/abt/avlint32_test.go
+++ b/src/cmd/compile/internal/abt/avlint32_test.go
@@ -317,7 +317,7 @@ func applicIterator(te *testing.T, x []int32) {
}
}
-func equiv(a, b interface{}) bool {
+func equiv(a, b any) bool {
sa, sb := a.(*sstring), b.(*sstring)
return *sa == *sb
}
@@ -450,16 +450,16 @@ func TestEquals(t *testing.T) {
[]int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2})
}
-func first(x, y interface{}) interface{} {
+func first(x, y any) any {
return x
}
-func second(x, y interface{}) interface{} {
+func second(x, y any) any {
return y
}
-func alwaysNil(x, y interface{}) interface{} {
+func alwaysNil(x, y any) any {
return nil
}
-func smaller(x, y interface{}) interface{} {
+func smaller(x, y any) any {
xi, _ := strconv.Atoi(fmt.Sprint(x))
yi, _ := strconv.Atoi(fmt.Sprint(y))
if xi < yi {
@@ -560,7 +560,7 @@ func (s *sstring) String() string {
return s.s
}
-func stringer(s string) interface{} {
+func stringer(s string) any {
return &sstring{s}
}
diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go
index a3bfb491b8b..b31ffa474bc 100644
--- a/src/cmd/compile/internal/arm/ssa.go
+++ b/src/cmd/compile/internal/arm/ssa.go
@@ -245,6 +245,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpARMADDS,
+ ssa.OpARMADCS,
ssa.OpARMSUBS:
r := v.Reg0()
r1 := v.Args[0].Reg()
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index 7bc0e536e94..43ecb6b4b71 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -1189,8 +1189,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
if dstReg == srcReg {
break
}
- tmpReg1 := int16(arm64.REG_R24)
- tmpReg2 := int16(arm64.REG_R25)
+ tmpReg1 := int16(arm64.REG_R25)
+ tmpFReg1 := int16(arm64.REG_F16)
+ tmpFReg2 := int16(arm64.REG_F17)
n := v.AuxInt
if n < 16 {
v.Fatalf("Move too small %d", n)
@@ -1198,10 +1199,17 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// Generate copying instructions.
var off int64
+ for n >= 32 {
+ // FLDPQ off(srcReg), (tmpFReg1, tmpFReg2)
+ // FSTPQ (tmpFReg1, tmpFReg2), off(dstReg)
+ move32(s, srcReg, dstReg, tmpFReg1, tmpFReg2, off, false)
+ off += 32
+ n -= 32
+ }
for n >= 16 {
- // LDP off(srcReg), (tmpReg1, tmpReg2)
- // STP (tmpReg1, tmpReg2), off(dstReg)
- move16(s, srcReg, dstReg, tmpReg1, tmpReg2, off, false)
+ // FMOVQ off(src), tmpFReg1
+ // FMOVQ tmpFReg1, off(dst)
+ move16(s, srcReg, dstReg, tmpFReg1, off, false)
off += 16
n -= 16
}
@@ -1223,9 +1231,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
if dstReg == srcReg {
break
}
- countReg := int16(arm64.REG_R23)
- tmpReg1 := int16(arm64.REG_R24)
- tmpReg2 := int16(arm64.REG_R25)
+ countReg := int16(arm64.REG_R24)
+ tmpReg1 := int16(arm64.REG_R25)
+ tmpFReg1 := int16(arm64.REG_F16)
+ tmpFReg2 := int16(arm64.REG_F17)
n := v.AuxInt
loopSize := int64(64)
if n < 3*loopSize {
@@ -1251,10 +1260,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// Move loopSize bytes starting at srcReg to dstReg.
// Increment srcReg and destReg by loopSize as a side effect.
- for range loopSize / 16 {
- // LDP.P 16(srcReg), (tmpReg1, tmpReg2)
- // STP.P (tmpReg1, tmpReg2), 16(dstReg)
- move16(s, srcReg, dstReg, tmpReg1, tmpReg2, 0, true)
+ for range loopSize / 32 {
+ // FLDPQ.P 32(srcReg), (tmpFReg1, tmpFReg2)
+ // FSTPQ.P (tmpFReg1, tmpFReg2), 32(dstReg)
+ move32(s, srcReg, dstReg, tmpFReg1, tmpFReg2, 0, true)
}
// Decrement loop count.
// SUB $1, countReg
@@ -1276,10 +1285,17 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// Copy any fractional portion.
var off int64
+ for n >= 32 {
+ // FLDPQ off(srcReg), (tmpFReg1, tmpFReg2)
+ // FSTPQ (tmpFReg1, tmpFReg2), off(dstReg)
+ move32(s, srcReg, dstReg, tmpFReg1, tmpFReg2, off, false)
+ off += 32
+ n -= 32
+ }
for n >= 16 {
- // LDP off(srcReg), (tmpReg1, tmpReg2)
- // STP (tmpReg1, tmpReg2), off(dstReg)
- move16(s, srcReg, dstReg, tmpReg1, tmpReg2, off, false)
+ // FMOVQ off(src), tmpFReg1
+ // FMOVQ tmpFReg1, off(dst)
+ move16(s, srcReg, dstReg, tmpFReg1, off, false)
off += 16
n -= 16
}
@@ -1699,26 +1715,55 @@ func zero8(s *ssagen.State, reg int16, off int64) {
p.To.Offset = off
}
-// move16 copies 16 bytes at src+off to dst+off.
+// move32 copies 32 bytes at src+off to dst+off.
// Uses registers tmp1 and tmp2.
-// If postInc is true, increment src and dst by 16.
-func move16(s *ssagen.State, src, dst, tmp1, tmp2 int16, off int64, postInc bool) {
- // LDP off(src), (tmp1, tmp2)
- ld := s.Prog(arm64.ALDP)
+// If postInc is true, increment src and dst by 32.
+func move32(s *ssagen.State, src, dst, tmp1, tmp2 int16, off int64, postInc bool) {
+ // FLDPQ off(src), (tmp1, tmp2)
+ ld := s.Prog(arm64.AFLDPQ)
ld.From.Type = obj.TYPE_MEM
ld.From.Reg = src
ld.From.Offset = off
ld.To.Type = obj.TYPE_REGREG
ld.To.Reg = tmp1
ld.To.Offset = int64(tmp2)
- // STP (tmp1, tmp2), off(dst)
- st := s.Prog(arm64.ASTP)
+ // FSTPQ (tmp1, tmp2), off(dst)
+ st := s.Prog(arm64.AFSTPQ)
st.From.Type = obj.TYPE_REGREG
st.From.Reg = tmp1
st.From.Offset = int64(tmp2)
st.To.Type = obj.TYPE_MEM
st.To.Reg = dst
st.To.Offset = off
+ if postInc {
+ if off != 0 {
+ panic("can't postinc with non-zero offset")
+ }
+ ld.Scond = arm64.C_XPOST
+ st.Scond = arm64.C_XPOST
+ ld.From.Offset = 32
+ st.To.Offset = 32
+ }
+}
+
+// move16 copies 16 bytes at src+off to dst+off.
+// Uses register tmp1
+// If postInc is true, increment src and dst by 16.
+func move16(s *ssagen.State, src, dst, tmp1 int16, off int64, postInc bool) {
+ // FMOVQ off(src), tmp1
+ ld := s.Prog(arm64.AFMOVQ)
+ ld.From.Type = obj.TYPE_MEM
+ ld.From.Reg = src
+ ld.From.Offset = off
+ ld.To.Type = obj.TYPE_REG
+ ld.To.Reg = tmp1
+ // FMOVQ tmp1, off(dst)
+ st := s.Prog(arm64.AFMOVQ)
+ st.From.Type = obj.TYPE_REG
+ st.From.Reg = tmp1
+ st.To.Type = obj.TYPE_MEM
+ st.To.Reg = dst
+ st.To.Offset = off
if postInc {
if off != 0 {
panic("can't postinc with non-zero offset")
diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go
index 85873dcc40e..9e8ab2f488b 100644
--- a/src/cmd/compile/internal/base/debug.go
+++ b/src/cmd/compile/internal/base/debug.go
@@ -20,6 +20,7 @@ type DebugFlags struct {
Append int `help:"print information about append compilation"`
Checkptr int `help:"instrument unsafe pointer conversions\n0: instrumentation disabled\n1: conversions involving unsafe.Pointer are instrumented\n2: conversions to unsafe.Pointer force heap allocation" concurrent:"ok"`
Closure int `help:"print information about closure compilation"`
+ Converthash string `help:"hash value for use in debugging changes to platform-dependent float-to-[u]int conversion" concurrent:"ok"`
Defer int `help:"print information about defer compilation"`
DisableNil int `help:"disable nil checks" concurrent:"ok"`
DumpInlFuncProps string `help:"dump function properties from inl heuristics to specified file"`
diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go
index a0ed876cfc8..1d211e0a2dd 100644
--- a/src/cmd/compile/internal/base/flag.go
+++ b/src/cmd/compile/internal/base/flag.go
@@ -262,6 +262,12 @@ func ParseFlags() {
Debug.LoopVar = 1
}
+ if Debug.Converthash != "" {
+ ConvertHash = NewHashDebug("converthash", Debug.Converthash, nil)
+ } else {
+ // quietly disable the convert hash changes
+ ConvertHash = NewHashDebug("converthash", "qn", nil)
+ }
if Debug.Fmahash != "" {
FmaHash = NewHashDebug("fmahash", Debug.Fmahash, nil)
}
diff --git a/src/cmd/compile/internal/base/hashdebug.go b/src/cmd/compile/internal/base/hashdebug.go
index fa63deb46a3..edf567457cb 100644
--- a/src/cmd/compile/internal/base/hashdebug.go
+++ b/src/cmd/compile/internal/base/hashdebug.go
@@ -53,6 +53,7 @@ func (d *HashDebug) SetInlineSuffixOnly(b bool) *HashDebug {
// The default compiler-debugging HashDebug, for "-d=gossahash=..."
var hashDebug *HashDebug
+var ConvertHash *HashDebug // for debugging float-to-[u]int conversion changes
var FmaHash *HashDebug // for debugging fused-multiply-add floating point changes
var LoopVarHash *HashDebug // for debugging shared/private loop variable changes
var PGOHash *HashDebug // for debugging PGO optimization decisions
diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go
index 9e3348c1ecc..6bfc84cd62d 100644
--- a/src/cmd/compile/internal/base/print.go
+++ b/src/cmd/compile/internal/base/print.go
@@ -45,7 +45,7 @@ func SyntaxErrors() int {
}
// addErrorMsg adds a new errorMsg (which may be a warning) to errorMsgs.
-func addErrorMsg(pos src.XPos, code errors.Code, format string, args ...interface{}) {
+func addErrorMsg(pos src.XPos, code errors.Code, format string, args ...any) {
msg := fmt.Sprintf(format, args...)
// Only add the position if know the position.
// See issue golang.org/issue/11361.
@@ -108,12 +108,12 @@ func sameline(a, b src.XPos) bool {
}
// Errorf reports a formatted error at the current line.
-func Errorf(format string, args ...interface{}) {
+func Errorf(format string, args ...any) {
ErrorfAt(Pos, 0, format, args...)
}
// ErrorfAt reports a formatted error message at pos.
-func ErrorfAt(pos src.XPos, code errors.Code, format string, args ...interface{}) {
+func ErrorfAt(pos src.XPos, code errors.Code, format string, args ...any) {
msg := fmt.Sprintf(format, args...)
if strings.HasPrefix(msg, "syntax error") {
@@ -164,7 +164,7 @@ func UpdateErrorDot(line string, name, expr string) {
// In general the Go compiler does NOT generate warnings,
// so this should be used only when the user has opted in
// to additional output by setting a particular flag.
-func Warn(format string, args ...interface{}) {
+func Warn(format string, args ...any) {
WarnfAt(Pos, format, args...)
}
@@ -172,7 +172,7 @@ func Warn(format string, args ...interface{}) {
// In general the Go compiler does NOT generate warnings,
// so this should be used only when the user has opted in
// to additional output by setting a particular flag.
-func WarnfAt(pos src.XPos, format string, args ...interface{}) {
+func WarnfAt(pos src.XPos, format string, args ...any) {
addErrorMsg(pos, 0, format, args...)
if Flag.LowerM != 0 {
FlushErrors()
@@ -191,7 +191,7 @@ func WarnfAt(pos src.XPos, format string, args ...interface{}) {
// prints a stack trace.
//
// If -h has been specified, Fatalf panics to force the usual runtime info dump.
-func Fatalf(format string, args ...interface{}) {
+func Fatalf(format string, args ...any) {
FatalfAt(Pos, format, args...)
}
@@ -209,7 +209,7 @@ var bugStack = counter.NewStack("compile/bug", 16) // 16 is arbitrary; used by g
// prints a stack trace.
//
// If -h has been specified, FatalfAt panics to force the usual runtime info dump.
-func FatalfAt(pos src.XPos, format string, args ...interface{}) {
+func FatalfAt(pos src.XPos, format string, args ...any) {
FlushErrors()
bugStack.Inc()
@@ -244,14 +244,14 @@ func Assert(b bool) {
}
// Assertf reports a fatal error with Fatalf, unless b is true.
-func Assertf(b bool, format string, args ...interface{}) {
+func Assertf(b bool, format string, args ...any) {
if !b {
Fatalf(format, args...)
}
}
// AssertfAt reports a fatal error with FatalfAt, unless b is true.
-func AssertfAt(b bool, pos src.XPos, format string, args ...interface{}) {
+func AssertfAt(b bool, pos src.XPos, format string, args ...any) {
if !b {
FatalfAt(pos, format, args...)
}
diff --git a/src/cmd/compile/internal/base/timings.go b/src/cmd/compile/internal/base/timings.go
index f48ac93699b..cbcd4dc6f55 100644
--- a/src/cmd/compile/internal/base/timings.go
+++ b/src/cmd/compile/internal/base/timings.go
@@ -168,7 +168,7 @@ type lines [][]string
func (lines *lines) add(label string, n int, dt, tot time.Duration, events []*event) {
var line []string
- add := func(format string, args ...interface{}) {
+ add := func(format string, args ...any) {
line = append(line, fmt.Sprintf(format, args...))
}
diff --git a/src/cmd/compile/internal/bitvec/bv.go b/src/cmd/compile/internal/bitvec/bv.go
index d3133dcd2dc..9214aa6cd05 100644
--- a/src/cmd/compile/internal/bitvec/bv.go
+++ b/src/cmd/compile/internal/bitvec/bv.go
@@ -93,7 +93,7 @@ func (bv BitVec) Unset(i int32) {
bv.B[i/wordBits] &^= mask
}
-// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
+// Next returns the smallest index >= i for which bvget(bv, i) == 1.
// If there is no such index, bvnext returns -1.
func (bv BitVec) Next(i int32) int32 {
if i >= bv.N {
diff --git a/src/cmd/compile/internal/coverage/cover.go b/src/cmd/compile/internal/coverage/cover.go
index 51f934f0600..5ecd5271f61 100644
--- a/src/cmd/compile/internal/coverage/cover.go
+++ b/src/cmd/compile/internal/coverage/cover.go
@@ -131,7 +131,7 @@ func metaHashAndLen() ([16]byte, int) {
}
var hv [16]byte
for i := 0; i < 16; i++ {
- nib := string(mhash[i*2 : i*2+2])
+ nib := mhash[i*2 : i*2+2]
x, err := strconv.ParseInt(nib, 16, 32)
if err != nil {
base.Fatalf("metahash bad byte %q", nib)
diff --git a/src/cmd/compile/internal/devirtualize/devirtualize.go b/src/cmd/compile/internal/devirtualize/devirtualize.go
index 372d0580940..cb4608a0246 100644
--- a/src/cmd/compile/internal/devirtualize/devirtualize.go
+++ b/src/cmd/compile/internal/devirtualize/devirtualize.go
@@ -18,9 +18,11 @@ import (
"cmd/compile/internal/types"
)
+const go126ImprovedConcreteTypeAnalysis = true
+
// StaticCall devirtualizes the given call if possible when the concrete callee
// is available statically.
-func StaticCall(call *ir.CallExpr) {
+func StaticCall(s *State, call *ir.CallExpr) {
// For promoted methods (including value-receiver methods promoted
// to pointer-receivers), the interface method wrapper may contain
// expressions that can panic (e.g., ODEREF, ODOTPTR,
@@ -40,15 +42,31 @@ func StaticCall(call *ir.CallExpr) {
}
sel := call.Fun.(*ir.SelectorExpr)
- r := ir.StaticValue(sel.X)
- if r.Op() != ir.OCONVIFACE {
- return
- }
- recv := r.(*ir.ConvExpr)
+ var typ *types.Type
+ if go126ImprovedConcreteTypeAnalysis {
+ typ = concreteType(s, sel.X)
+ if typ == nil {
+ return
+ }
- typ := recv.X.Type()
- if typ.IsInterface() {
- return
+ // Don't create type-assertions that would be impossible at compile-time.
+ // This can happen in such case: any(0).(interface {A()}).A(), this typechecks without
+ // any errors, but will cause a runtime panic. We statically know that int(0) does not
+ // implement that interface, thus we skip the devirtualization, as it is not possible
+ // to make an assertion: any(0).(interface{A()}).(int) (int does not implement interface{A()}).
+ if !typecheck.Implements(typ, sel.X.Type()) {
+ return
+ }
+ } else {
+ r := ir.StaticValue(sel.X)
+ if r.Op() != ir.OCONVIFACE {
+ return
+ }
+ recv := r.(*ir.ConvExpr)
+ typ = recv.X.Type()
+ if typ.IsInterface() {
+ return
+ }
}
// If typ is a shape type, then it was a type argument originally
@@ -99,8 +117,27 @@ func StaticCall(call *ir.CallExpr) {
return
}
- dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil)
- dt.SetType(typ)
+ dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, typ)
+
+ if go126ImprovedConcreteTypeAnalysis {
+ // Consider:
+ //
+ // var v Iface
+ // v.A()
+ // v = &Impl{}
+ //
+ // Here in the devirtualizer, we determine the concrete type of v as being an *Impl,
+ // but it can still be a nil interface, we have not detected that. The v.(*Impl)
+ // type assertion that we make here would also have failed, but with a different
+ // panic "pkg.Iface is nil, not *pkg.Impl", where previously we would get a nil panic.
+ // We fix this, by introducing an additional nilcheck on the itab.
+ // Calling a method on an nil interface (in most cases) is a bug in a program, so it is fine
+ // to devirtualize and further (possibly) inline them, even though we would never reach
+ // the called function.
+ dt.UseNilPanic = true
+ dt.SetPos(call.Pos())
+ }
+
x := typecheck.XDotMethod(sel.Pos(), dt, sel.Sel, true)
switch x.Op() {
case ir.ODOTMETH:
@@ -138,3 +175,413 @@ func StaticCall(call *ir.CallExpr) {
// Desugar OCALLMETH, if we created one (#57309).
typecheck.FixMethodCall(call)
}
+
+const concreteTypeDebug = false
+
+// concreteType determines the concrete type of n, following OCONVIFACEs and type asserts.
+// Returns nil when the concrete type could not be determined, or when there are multiple
+// (different) types assigned to an interface.
+func concreteType(s *State, n ir.Node) (typ *types.Type) {
+ typ = concreteType1(s, n, make(map[*ir.Name]struct{}))
+ if typ == &noType {
+ return nil
+ }
+ if typ != nil && typ.IsInterface() {
+ base.FatalfAt(n.Pos(), "typ.IsInterface() = true; want = false; typ = %v", typ)
+ }
+ return typ
+}
+
+// noType is a sentinel value returned by [concreteType1].
+var noType types.Type
+
+// concreteType1 analyzes the node n and returns its concrete type if it is statically known.
+// Otherwise, it returns a nil Type, indicating that a concrete type was not determined.
+// When n is known to be statically nil or a self-assignment is detected, in returns a sentinel [noType] type instead.
+func concreteType1(s *State, n ir.Node, seen map[*ir.Name]struct{}) (outT *types.Type) {
+ nn := n // for debug messages
+
+ if concreteTypeDebug {
+ defer func() {
+ t := "&noType"
+ if outT != &noType {
+ t = outT.String()
+ }
+ base.Warn("concreteType1(%v) -> %v", nn, t)
+ }()
+ }
+
+ for {
+ if concreteTypeDebug {
+ base.Warn("concreteType1(%v): analyzing %v", nn, n)
+ }
+
+ if !n.Type().IsInterface() {
+ return n.Type()
+ }
+
+ switch n1 := n.(type) {
+ case *ir.ConvExpr:
+ if n1.Op() == ir.OCONVNOP {
+ if !n1.Type().IsInterface() || !types.Identical(n1.Type().Underlying(), n1.X.Type().Underlying()) {
+ // As we check (directly before this switch) whether n is an interface, thus we should only reach
+ // here for iface conversions where both operands are the same.
+ base.FatalfAt(n1.Pos(), "not identical/interface types found n1.Type = %v; n1.X.Type = %v", n1.Type(), n1.X.Type())
+ }
+ n = n1.X
+ continue
+ }
+ if n1.Op() == ir.OCONVIFACE {
+ n = n1.X
+ continue
+ }
+ case *ir.InlinedCallExpr:
+ if n1.Op() == ir.OINLCALL {
+ n = n1.SingleResult()
+ continue
+ }
+ case *ir.ParenExpr:
+ n = n1.X
+ continue
+ case *ir.TypeAssertExpr:
+ n = n1.X
+ continue
+ }
+ break
+ }
+
+ if n.Op() != ir.ONAME {
+ return nil
+ }
+
+ name := n.(*ir.Name).Canonical()
+ if name.Class != ir.PAUTO {
+ return nil
+ }
+
+ if name.Op() != ir.ONAME {
+ base.FatalfAt(name.Pos(), "name.Op = %v; want = ONAME", n.Op())
+ }
+
+ // name.Curfn must be set, as we checked name.Class != ir.PAUTO before.
+ if name.Curfn == nil {
+ base.FatalfAt(name.Pos(), "name.Curfn = nil; want not nil")
+ }
+
+ if name.Addrtaken() {
+ return nil // conservatively assume it's reassigned with a different type indirectly
+ }
+
+ if _, ok := seen[name]; ok {
+ return &noType // Already analyzed assignments to name, no need to do that twice.
+ }
+ seen[name] = struct{}{}
+
+ if concreteTypeDebug {
+ base.Warn("concreteType1(%v): analyzing assignments to %v", nn, name)
+ }
+
+ var typ *types.Type
+ for _, v := range s.assignments(name) {
+ var t *types.Type
+ switch v := v.(type) {
+ case *types.Type:
+ t = v
+ case ir.Node:
+ t = concreteType1(s, v, seen)
+ if t == &noType {
+ continue
+ }
+ }
+ if t == nil || (typ != nil && !types.Identical(typ, t)) {
+ return nil
+ }
+ typ = t
+ }
+
+ if typ == nil {
+ // Variable either declared with zero value, or only assigned with nil.
+ return &noType
+ }
+
+ return typ
+}
+
+// assignment can be one of:
+// - nil - assignment from an interface type.
+// - *types.Type - assignment from a concrete type (non-interface).
+// - ir.Node - assignment from a ir.Node.
+//
+// In most cases assignment should be an [ir.Node], but in cases where we
+// do not follow the data-flow, we return either a concrete type (*types.Type) or a nil.
+// For example in range over a slice, if the slice elem is of an interface type, then we return
+// a nil, otherwise the elem's concrete type (We do so because we do not analyze assignment to the
+// slice being ranged-over).
+type assignment any
+
+// State holds precomputed state for use in [StaticCall].
+type State struct {
+ // ifaceAssignments maps interface variables to all their assignments
+ // defined inside functions stored in the analyzedFuncs set.
+ // Note: it does not include direct assignments to nil.
+ ifaceAssignments map[*ir.Name][]assignment
+
+ // ifaceCallExprAssigns stores every [*ir.CallExpr], which has an interface
+ // result, that is assigned to a variable.
+ ifaceCallExprAssigns map[*ir.CallExpr][]ifaceAssignRef
+
+ // analyzedFuncs is a set of Funcs that were analyzed for iface assignments.
+ analyzedFuncs map[*ir.Func]struct{}
+}
+
+type ifaceAssignRef struct {
+ name *ir.Name // ifaceAssignments[name]
+ assignmentIndex int // ifaceAssignments[name][assignmentIndex]
+ returnIndex int // (*ir.CallExpr).Result(returnIndex)
+}
+
+// InlinedCall updates the [State] to take into account a newly inlined call.
+func (s *State) InlinedCall(fun *ir.Func, origCall *ir.CallExpr, inlinedCall *ir.InlinedCallExpr) {
+ if _, ok := s.analyzedFuncs[fun]; !ok {
+ // Full analyze has not been yet executed for the provided function, so we can skip it for now.
+ // When no devirtualization happens in a function, it is unnecessary to analyze it.
+ return
+ }
+
+ // Analyze assignments in the newly inlined function.
+ s.analyze(inlinedCall.Init())
+ s.analyze(inlinedCall.Body)
+
+ refs, ok := s.ifaceCallExprAssigns[origCall]
+ if !ok {
+ return
+ }
+ delete(s.ifaceCallExprAssigns, origCall)
+
+ // Update assignments to reference the new ReturnVars of the inlined call.
+ for _, ref := range refs {
+ vt := &s.ifaceAssignments[ref.name][ref.assignmentIndex]
+ if *vt != nil {
+ base.Fatalf("unexpected non-nil assignment")
+ }
+ if concreteTypeDebug {
+ base.Warn(
+ "InlinedCall(%v, %v): replacing interface node in (%v,%v) to %v (typ %v)",
+ origCall, inlinedCall, ref.name, ref.assignmentIndex,
+ inlinedCall.ReturnVars[ref.returnIndex],
+ inlinedCall.ReturnVars[ref.returnIndex].Type(),
+ )
+ }
+
+ // Update ifaceAssignments with an ir.Node from the inlined function’s ReturnVars.
+ // This may enable future devirtualization of calls that reference ref.name.
+ // We will get calls to [StaticCall] from the interleaved package,
+ // to try devirtualize such calls afterwards.
+ *vt = inlinedCall.ReturnVars[ref.returnIndex]
+ }
+}
+
+// assignments returns all assignments to n.
+func (s *State) assignments(n *ir.Name) []assignment {
+ fun := n.Curfn
+ if fun == nil {
+ base.FatalfAt(n.Pos(), "n.Curfn = ")
+ }
+ if n.Class != ir.PAUTO {
+ base.FatalfAt(n.Pos(), "n.Class = %v; want = PAUTO", n.Class)
+ }
+
+ if !n.Type().IsInterface() {
+ base.FatalfAt(n.Pos(), "name passed to assignments is not of an interface type: %v", n.Type())
+ }
+
+ // Analyze assignments in func, if not analyzed before.
+ if _, ok := s.analyzedFuncs[fun]; !ok {
+ if concreteTypeDebug {
+ base.Warn("assignments(): analyzing assignments in %v func", fun)
+ }
+ if s.analyzedFuncs == nil {
+ s.ifaceAssignments = make(map[*ir.Name][]assignment)
+ s.ifaceCallExprAssigns = make(map[*ir.CallExpr][]ifaceAssignRef)
+ s.analyzedFuncs = make(map[*ir.Func]struct{})
+ }
+ s.analyzedFuncs[fun] = struct{}{}
+ s.analyze(fun.Init())
+ s.analyze(fun.Body)
+ }
+
+ return s.ifaceAssignments[n]
+}
+
+// analyze analyzes every assignment to interface variables in nodes, updating [State].
+func (s *State) analyze(nodes ir.Nodes) {
+ assign := func(name ir.Node, assignment assignment) (*ir.Name, int) {
+ if name == nil || name.Op() != ir.ONAME || ir.IsBlank(name) {
+ return nil, -1
+ }
+
+ n, ok := ir.OuterValue(name).(*ir.Name)
+ if !ok || n.Curfn == nil {
+ return nil, -1
+ }
+
+ // Do not track variables that are not of interface types.
+ // For devirtualization they are unnecessary, we will not even look them up.
+ if !n.Type().IsInterface() {
+ return nil, -1
+ }
+
+ n = n.Canonical()
+ if n.Op() != ir.ONAME {
+ base.FatalfAt(n.Pos(), "n.Op = %v; want = ONAME", n.Op())
+ }
+ if n.Class != ir.PAUTO {
+ return nil, -1
+ }
+
+ switch a := assignment.(type) {
+ case nil:
+ case *types.Type:
+ if a != nil && a.IsInterface() {
+ assignment = nil // non-concrete type
+ }
+ case ir.Node:
+ // nil assignment, we can safely ignore them, see [StaticCall].
+ if ir.IsNil(a) {
+ return nil, -1
+ }
+ default:
+ base.Fatalf("unexpected type: %v", assignment)
+ }
+
+ if concreteTypeDebug {
+ base.Warn("analyze(): assignment found %v = %v", name, assignment)
+ }
+
+ s.ifaceAssignments[n] = append(s.ifaceAssignments[n], assignment)
+ return n, len(s.ifaceAssignments[n]) - 1
+ }
+
+ var do func(n ir.Node)
+ do = func(n ir.Node) {
+ switch n.Op() {
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ if rhs := n.Y; rhs != nil {
+ for {
+ if r, ok := rhs.(*ir.ParenExpr); ok {
+ rhs = r.X
+ continue
+ }
+ break
+ }
+ if call, ok := rhs.(*ir.CallExpr); ok && call.Fun != nil {
+ retTyp := call.Fun.Type().Results()[0].Type
+ n, idx := assign(n.X, retTyp)
+ if n != nil && retTyp.IsInterface() {
+ // We have a call expression, that returns an interface, store it for later evaluation.
+ // In case this func gets inlined later, we will update the assignment (added before)
+ // with a reference to ReturnVars, see [State.InlinedCall], which might allow for future devirtualizing of n.X.
+ s.ifaceCallExprAssigns[call] = append(s.ifaceCallExprAssigns[call], ifaceAssignRef{n, idx, 0})
+ }
+ } else {
+ assign(n.X, rhs)
+ }
+ }
+ case ir.OAS2:
+ n := n.(*ir.AssignListStmt)
+ for i, p := range n.Lhs {
+ if n.Rhs[i] != nil {
+ assign(p, n.Rhs[i])
+ }
+ }
+ case ir.OAS2DOTTYPE:
+ n := n.(*ir.AssignListStmt)
+ if n.Rhs[0] == nil {
+ base.FatalfAt(n.Pos(), "n.Rhs[0] == nil; n = %v", n)
+ }
+ assign(n.Lhs[0], n.Rhs[0])
+ assign(n.Lhs[1], nil) // boolean does not have methods to devirtualize
+ case ir.OAS2MAPR, ir.OAS2RECV, ir.OSELRECV2:
+ n := n.(*ir.AssignListStmt)
+ if n.Rhs[0] == nil {
+ base.FatalfAt(n.Pos(), "n.Rhs[0] == nil; n = %v", n)
+ }
+ assign(n.Lhs[0], n.Rhs[0].Type())
+ assign(n.Lhs[1], nil) // boolean does not have methods to devirtualize
+ case ir.OAS2FUNC:
+ n := n.(*ir.AssignListStmt)
+ rhs := n.Rhs[0]
+ for {
+ if r, ok := rhs.(*ir.ParenExpr); ok {
+ rhs = r.X
+ continue
+ }
+ break
+ }
+ if call, ok := rhs.(*ir.CallExpr); ok {
+ for i, p := range n.Lhs {
+ retTyp := call.Fun.Type().Results()[i].Type
+ n, idx := assign(p, retTyp)
+ if n != nil && retTyp.IsInterface() {
+ // We have a call expression, that returns an interface, store it for later evaluation.
+ // In case this func gets inlined later, we will update the assignment (added before)
+ // with a reference to ReturnVars, see [State.InlinedCall], which might allow for future devirtualizing of n.X.
+ s.ifaceCallExprAssigns[call] = append(s.ifaceCallExprAssigns[call], ifaceAssignRef{n, idx, i})
+ }
+ }
+ } else if call, ok := rhs.(*ir.InlinedCallExpr); ok {
+ for i, p := range n.Lhs {
+ assign(p, call.ReturnVars[i])
+ }
+ } else {
+ base.FatalfAt(n.Pos(), "unexpected type %T in OAS2FUNC Rhs[0]", call)
+ }
+ case ir.ORANGE:
+ n := n.(*ir.RangeStmt)
+ xTyp := n.X.Type()
+
+ // Range over an array pointer.
+ if xTyp.IsPtr() && xTyp.Elem().IsArray() {
+ xTyp = xTyp.Elem()
+ }
+
+ if xTyp.IsArray() || xTyp.IsSlice() {
+ assign(n.Key, nil) // integer does not have methods to devirtualize
+ assign(n.Value, xTyp.Elem())
+ } else if xTyp.IsChan() {
+ assign(n.Key, xTyp.Elem())
+ base.AssertfAt(n.Value == nil, n.Pos(), "n.Value != nil in range over chan")
+ } else if xTyp.IsMap() {
+ assign(n.Key, xTyp.Key())
+ assign(n.Value, xTyp.Elem())
+ } else if xTyp.IsInteger() || xTyp.IsString() {
+ // Range over int/string, results do not have methods, so nothing to devirtualize.
+ assign(n.Key, nil)
+ assign(n.Value, nil)
+ } else {
+ // We will not reach here in case of an range-over-func, as it is
+ // rewrtten to function calls in the noder package.
+ base.FatalfAt(n.Pos(), "range over unexpected type %v", n.X.Type())
+ }
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+ if guard, ok := n.Tag.(*ir.TypeSwitchGuard); ok {
+ for _, v := range n.Cases {
+ if v.Var == nil {
+ base.Assert(guard.Tag == nil)
+ continue
+ }
+ assign(v.Var, guard.X)
+ }
+ }
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ if _, ok := s.analyzedFuncs[n.Func]; !ok {
+ s.analyzedFuncs[n.Func] = struct{}{}
+ ir.Visit(n.Func, do)
+ }
+ }
+ }
+ ir.VisitList(nodes, do)
+}
diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go
index b1ae55cdb6b..2ce5c8accc5 100644
--- a/src/cmd/compile/internal/inline/inl.go
+++ b/src/cmd/compile/internal/inline/inl.go
@@ -315,7 +315,7 @@ func CanInline(fn *ir.Func, profile *pgoir.Profile) {
// function is inlinable.
func noteInlinableFunc(n *ir.Name, fn *ir.Func, cost int32) {
if base.Flag.LowerM > 1 {
- fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, cost, fn.Type(), ir.Nodes(fn.Body))
+ fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, cost, fn.Type(), fn.Body)
} else if base.Flag.LowerM != 0 {
fmt.Printf("%v: can inline %v\n", ir.Line(fn), n)
}
diff --git a/src/cmd/compile/internal/inline/interleaved/interleaved.go b/src/cmd/compile/internal/inline/interleaved/interleaved.go
index 954cc306fc8..c83bbdb718d 100644
--- a/src/cmd/compile/internal/inline/interleaved/interleaved.go
+++ b/src/cmd/compile/internal/inline/interleaved/interleaved.go
@@ -45,6 +45,8 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) {
inlState := make(map[*ir.Func]*inlClosureState)
calleeUseCounts := make(map[*ir.Func]int)
+ var state devirtualize.State
+
// Pre-process all the functions, adding parentheses around call sites and starting their "inl state".
for _, fn := range typecheck.Target.Funcs {
bigCaller := base.Flag.LowerL != 0 && inline.IsBigFunc(fn)
@@ -58,7 +60,7 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) {
// Do a first pass at counting call sites.
for i := range s.parens {
- s.resolve(i)
+ s.resolve(&state, i)
}
}
@@ -102,10 +104,11 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) {
for {
for i := l0; i < l1; i++ { // can't use "range parens" here
paren := s.parens[i]
- if new := s.edit(i); new != nil {
+ if origCall, inlinedCall := s.edit(&state, i); inlinedCall != nil {
// Update AST and recursively mark nodes.
- paren.X = new
- ir.EditChildren(new, s.mark) // mark may append to parens
+ paren.X = inlinedCall
+ ir.EditChildren(inlinedCall, s.mark) // mark may append to parens
+ state.InlinedCall(s.fn, origCall, inlinedCall)
done = false
}
}
@@ -114,7 +117,7 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) {
break
}
for i := l0; i < l1; i++ {
- s.resolve(i)
+ s.resolve(&state, i)
}
}
@@ -188,7 +191,7 @@ type inlClosureState struct {
// resolve attempts to resolve a call to a potentially inlineable callee
// and updates use counts on the callees. Returns the call site count
// for that callee.
-func (s *inlClosureState) resolve(i int) (*ir.Func, int) {
+func (s *inlClosureState) resolve(state *devirtualize.State, i int) (*ir.Func, int) {
p := s.parens[i]
if i < len(s.resolved) {
if callee := s.resolved[i]; callee != nil {
@@ -200,7 +203,7 @@ func (s *inlClosureState) resolve(i int) (*ir.Func, int) {
if !ok { // previously inlined
return nil, -1
}
- devirtualize.StaticCall(call)
+ devirtualize.StaticCall(state, call)
if callee := inline.InlineCallTarget(s.fn, call, s.profile); callee != nil {
for len(s.resolved) <= i {
s.resolved = append(s.resolved, nil)
@@ -213,23 +216,23 @@ func (s *inlClosureState) resolve(i int) (*ir.Func, int) {
return nil, 0
}
-func (s *inlClosureState) edit(i int) ir.Node {
+func (s *inlClosureState) edit(state *devirtualize.State, i int) (*ir.CallExpr, *ir.InlinedCallExpr) {
n := s.parens[i].X
call, ok := n.(*ir.CallExpr)
if !ok {
- return nil
+ return nil, nil
}
// This is redundant with earlier calls to
// resolve, but because things can change it
// must be re-checked.
- callee, count := s.resolve(i)
+ callee, count := s.resolve(state, i)
if count <= 0 {
- return nil
+ return nil, nil
}
if inlCall := inline.TryInlineCall(s.fn, call, s.bigCaller, s.profile, count == 1 && callee.ClosureParent != nil); inlCall != nil {
- return inlCall
+ return call, inlCall
}
- return nil
+ return nil, nil
}
// Mark inserts parentheses, and is called repeatedly.
@@ -338,16 +341,18 @@ func (s *inlClosureState) unparenthesize() {
// returns.
func (s *inlClosureState) fixpoint() bool {
changed := false
+ var state devirtualize.State
ir.WithFunc(s.fn, func() {
done := false
for !done {
done = true
for i := 0; i < len(s.parens); i++ { // can't use "range parens" here
paren := s.parens[i]
- if new := s.edit(i); new != nil {
+ if origCall, inlinedCall := s.edit(&state, i); inlinedCall != nil {
// Update AST and recursively mark nodes.
- paren.X = new
- ir.EditChildren(new, s.mark) // mark may append to parens
+ paren.X = inlinedCall
+ ir.EditChildren(inlinedCall, s.mark) // mark may append to parens
+ state.InlinedCall(s.fn, origCall, inlinedCall)
done = false
changed = true
}
diff --git a/src/cmd/compile/internal/ir/bitset.go b/src/cmd/compile/internal/ir/bitset.go
index bae40058669..339e4e524f1 100644
--- a/src/cmd/compile/internal/ir/bitset.go
+++ b/src/cmd/compile/internal/ir/bitset.go
@@ -23,7 +23,7 @@ func (f *bitset8) set2(shift uint8, b uint8) {
// Clear old bits.
*(*uint8)(f) &^= 3 << shift
// Set new bits.
- *(*uint8)(f) |= uint8(b&3) << shift
+ *(*uint8)(f) |= (b & 3) << shift
}
type bitset16 uint16
diff --git a/src/cmd/compile/internal/ir/dump.go b/src/cmd/compile/internal/ir/dump.go
index 4c218682ea6..3e5e6fbdcee 100644
--- a/src/cmd/compile/internal/ir/dump.go
+++ b/src/cmd/compile/internal/ir/dump.go
@@ -21,7 +21,7 @@ import (
)
// DumpAny is like FDumpAny but prints to stderr.
-func DumpAny(root interface{}, filter string, depth int) {
+func DumpAny(root any, filter string, depth int) {
FDumpAny(os.Stderr, root, filter, depth)
}
@@ -42,7 +42,7 @@ func DumpAny(root interface{}, filter string, depth int) {
// rather than their type; struct fields with zero values or
// non-matching field names are omitted, and "…" means recursion
// depth has been reached or struct fields have been omitted.
-func FDumpAny(w io.Writer, root interface{}, filter string, depth int) {
+func FDumpAny(w io.Writer, root any, filter string, depth int) {
if root == nil {
fmt.Fprintln(w, "nil")
return
@@ -110,7 +110,7 @@ func (p *dumper) Write(data []byte) (n int, err error) {
}
// printf is a convenience wrapper.
-func (p *dumper) printf(format string, args ...interface{}) {
+func (p *dumper) printf(format string, args ...any) {
if _, err := fmt.Fprintf(p, format, args...); err != nil {
panic(err)
}
diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go
index d07e522d953..25654ca2536 100644
--- a/src/cmd/compile/internal/ir/expr.go
+++ b/src/cmd/compile/internal/ir/expr.go
@@ -617,7 +617,7 @@ func (o Op) IsSlice3() bool {
return false
}
-// A SliceHeader expression constructs a slice header from its parts.
+// A SliceHeaderExpr constructs a slice header from its parts.
type SliceHeaderExpr struct {
miniExpr
Ptr Node
@@ -665,7 +665,7 @@ func NewStarExpr(pos src.XPos, x Node) *StarExpr {
func (n *StarExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *StarExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
-// A TypeAssertionExpr is a selector expression X.(Type).
+// A TypeAssertExpr is a selector expression X.(Type).
// Before type-checking, the type is Ntype.
type TypeAssertExpr struct {
miniExpr
@@ -677,6 +677,11 @@ type TypeAssertExpr struct {
// An internal/abi.TypeAssert descriptor to pass to the runtime.
Descriptor *obj.LSym
+
+ // When set to true, if this assert would panic, then use a nil pointer panic
+ // instead of an interface conversion panic.
+ // It must not be set for type asserts using the commaok form.
+ UseNilPanic bool
}
func NewTypeAssertExpr(pos src.XPos, x Node, typ *types.Type) *TypeAssertExpr {
diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go
index 668537c90e6..e027fe82908 100644
--- a/src/cmd/compile/internal/ir/func.go
+++ b/src/cmd/compile/internal/ir/func.go
@@ -90,7 +90,7 @@ type Func struct {
Marks []Mark
FieldTrack map[*obj.LSym]struct{}
- DebugInfo interface{}
+ DebugInfo any
LSym *obj.LSym // Linker object in this function's native ABI (Func.ABI)
Inl *Inline
diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go
index 6f8d0a7fcc1..01f1c0c5022 100644
--- a/src/cmd/compile/internal/ir/name.go
+++ b/src/cmd/compile/internal/ir/name.go
@@ -43,8 +43,8 @@ type Name struct {
Func *Func // TODO(austin): nil for I.M
Offset_ int64
val constant.Value
- Opt interface{} // for use by escape analysis
- Embed *[]Embed // list of embedded files, for ONAME var
+ Opt any // for use by escape analysis
+ Embed *[]Embed // list of embedded files, for ONAME var
// For a local variable (not param) or extern, the initializing assignment (OAS or OAS2).
// For a closure var, the ONAME node of the original (outermost) captured variable.
diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go
index 14b6b4f3cd4..b805155e6e3 100644
--- a/src/cmd/compile/internal/ir/sizeof_test.go
+++ b/src/cmd/compile/internal/ir/sizeof_test.go
@@ -16,9 +16,9 @@ func TestSizeof(t *testing.T) {
const _64bit = unsafe.Sizeof(uintptr(0)) == 8
var tests = []struct {
- val interface{} // type as a value
- _32bit uintptr // size on 32bit platforms
- _64bit uintptr // size on 64bit platforms
+ val any // type as a value
+ _32bit uintptr // size on 32bit platforms
+ _64bit uintptr // size on 64bit platforms
}{
{Func{}, 184, 312},
{Name{}, 96, 160},
diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go
index 0cfa2a2262f..344985f7be1 100644
--- a/src/cmd/compile/internal/ir/symtab.go
+++ b/src/cmd/compile/internal/ir/symtab.go
@@ -13,48 +13,51 @@ import (
var Syms symsStruct
type symsStruct struct {
- AssertE2I *obj.LSym
- AssertE2I2 *obj.LSym
- Asanread *obj.LSym
- Asanwrite *obj.LSym
- CgoCheckMemmove *obj.LSym
- CgoCheckPtrWrite *obj.LSym
- CheckPtrAlignment *obj.LSym
- Deferproc *obj.LSym
- Deferprocat *obj.LSym
- DeferprocStack *obj.LSym
- Deferreturn *obj.LSym
- Duffcopy *obj.LSym
- Duffzero *obj.LSym
- GCWriteBarrier [8]*obj.LSym
- Goschedguarded *obj.LSym
- Growslice *obj.LSym
- InterfaceSwitch *obj.LSym
- MallocGC *obj.LSym
- Memmove *obj.LSym
- Msanread *obj.LSym
- Msanwrite *obj.LSym
- Msanmove *obj.LSym
- Newobject *obj.LSym
- Newproc *obj.LSym
- PanicBounds *obj.LSym
- PanicExtend *obj.LSym
- Panicdivide *obj.LSym
- Panicshift *obj.LSym
- PanicdottypeE *obj.LSym
- PanicdottypeI *obj.LSym
- Panicnildottype *obj.LSym
- Panicoverflow *obj.LSym
- PanicSimdImm *obj.LSym
- Racefuncenter *obj.LSym
- Racefuncexit *obj.LSym
- Raceread *obj.LSym
- Racereadrange *obj.LSym
- Racewrite *obj.LSym
- Racewriterange *obj.LSym
- TypeAssert *obj.LSym
- WBZero *obj.LSym
- WBMove *obj.LSym
+ AssertE2I *obj.LSym
+ AssertE2I2 *obj.LSym
+ Asanread *obj.LSym
+ Asanwrite *obj.LSym
+ CgoCheckMemmove *obj.LSym
+ CgoCheckPtrWrite *obj.LSym
+ CheckPtrAlignment *obj.LSym
+ Deferproc *obj.LSym
+ Deferprocat *obj.LSym
+ DeferprocStack *obj.LSym
+ Deferreturn *obj.LSym
+ Duffcopy *obj.LSym
+ Duffzero *obj.LSym
+ GCWriteBarrier [8]*obj.LSym
+ Goschedguarded *obj.LSym
+ Growslice *obj.LSym
+ InterfaceSwitch *obj.LSym
+ MallocGC *obj.LSym
+ MallocGCSmallNoScan [27]*obj.LSym
+ MallocGCSmallScanNoHeader [27]*obj.LSym
+ MallocGCTiny [16]*obj.LSym
+ Memmove *obj.LSym
+ Msanread *obj.LSym
+ Msanwrite *obj.LSym
+ Msanmove *obj.LSym
+ Newobject *obj.LSym
+ Newproc *obj.LSym
+ PanicBounds *obj.LSym
+ PanicExtend *obj.LSym
+ Panicdivide *obj.LSym
+ Panicshift *obj.LSym
+ PanicdottypeE *obj.LSym
+ PanicdottypeI *obj.LSym
+ Panicnildottype *obj.LSym
+ Panicoverflow *obj.LSym
+ PanicSimdImm *obj.LSym
+ Racefuncenter *obj.LSym
+ Racefuncexit *obj.LSym
+ Raceread *obj.LSym
+ Racereadrange *obj.LSym
+ Racewrite *obj.LSym
+ Racewriterange *obj.LSym
+ TypeAssert *obj.LSym
+ WBZero *obj.LSym
+ WBMove *obj.LSym
// Wasm
SigPanic *obj.LSym
Staticuint64s *obj.LSym
diff --git a/src/cmd/compile/internal/logopt/log_opts.go b/src/cmd/compile/internal/logopt/log_opts.go
index d08f6fb5d6d..c47c9ee5afb 100644
--- a/src/cmd/compile/internal/logopt/log_opts.go
+++ b/src/cmd/compile/internal/logopt/log_opts.go
@@ -224,12 +224,12 @@ type Diagnostic struct {
// A LoggedOpt is what the compiler produces and accumulates,
// to be converted to JSON for human or IDE consumption.
type LoggedOpt struct {
- pos src.XPos // Source code position at which the event occurred. If it is inlined, outer and all inlined locations will appear in JSON.
- lastPos src.XPos // Usually the same as pos; current exception is for reporting entire range of transformed loops
- compilerPass string // Compiler pass. For human/adhoc consumption; does not appear in JSON (yet)
- functionName string // Function name. For human/adhoc consumption; does not appear in JSON (yet)
- what string // The (non) optimization; "nilcheck", "boundsCheck", "inline", "noInline"
- target []interface{} // Optional target(s) or parameter(s) of "what" -- what was inlined, why it was not, size of copy, etc. 1st is most important/relevant.
+ pos src.XPos // Source code position at which the event occurred. If it is inlined, outer and all inlined locations will appear in JSON.
+ lastPos src.XPos // Usually the same as pos; current exception is for reporting entire range of transformed loops
+ compilerPass string // Compiler pass. For human/adhoc consumption; does not appear in JSON (yet)
+ functionName string // Function name. For human/adhoc consumption; does not appear in JSON (yet)
+ what string // The (non) optimization; "nilcheck", "boundsCheck", "inline", "noInline"
+ target []any // Optional target(s) or parameter(s) of "what" -- what was inlined, why it was not, size of copy, etc. 1st is most important/relevant.
}
type logFormat uint8
@@ -325,7 +325,7 @@ var mu = sync.Mutex{} // mu protects loggedOpts.
// Pos is the source position (including inlining), what is the message, pass is which pass created the message,
// funcName is the name of the function
// A typical use for this to accumulate an explanation for a missed optimization, for example, why did something escape?
-func NewLoggedOpt(pos, lastPos src.XPos, what, pass, funcName string, args ...interface{}) *LoggedOpt {
+func NewLoggedOpt(pos, lastPos src.XPos, what, pass, funcName string, args ...any) *LoggedOpt {
pass = strings.ReplaceAll(pass, " ", "_")
return &LoggedOpt{pos, lastPos, pass, funcName, what, args}
}
@@ -333,7 +333,7 @@ func NewLoggedOpt(pos, lastPos src.XPos, what, pass, funcName string, args ...in
// LogOpt logs information about a (usually missed) optimization performed by the compiler.
// Pos is the source position (including inlining), what is the message, pass is which pass created the message,
// funcName is the name of the function.
-func LogOpt(pos src.XPos, what, pass, funcName string, args ...interface{}) {
+func LogOpt(pos src.XPos, what, pass, funcName string, args ...any) {
if Format == None {
return
}
@@ -346,7 +346,7 @@ func LogOpt(pos src.XPos, what, pass, funcName string, args ...interface{}) {
// LogOptRange is the same as LogOpt, but includes the ability to express a range of positions,
// not just a point.
-func LogOptRange(pos, lastPos src.XPos, what, pass, funcName string, args ...interface{}) {
+func LogOptRange(pos, lastPos src.XPos, what, pass, funcName string, args ...any) {
if Format == None {
return
}
diff --git a/src/cmd/compile/internal/loong64/ssa.go b/src/cmd/compile/internal/loong64/ssa.go
index bd0d96a6954..84bbf9b394d 100644
--- a/src/cmd/compile/internal/loong64/ssa.go
+++ b/src/cmd/compile/internal/loong64/ssa.go
@@ -692,7 +692,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// vs
// 16 instructions in the straightline code
// Might as well use straightline code.
- v.Fatalf("ZeroLoop size too small %d", n)
+ v.Fatalf("MoveLoop size too small %d", n)
}
// Put iteration count in a register.
@@ -1175,8 +1175,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.AddRestSourceArgs([]obj.Addr{
- {Type: obj.TYPE_CONST, Offset: int64((v.AuxInt >> 5) & 0x1fffffffff)},
- {Type: obj.TYPE_CONST, Offset: int64((v.AuxInt >> 0) & 0x1f)},
+ {Type: obj.TYPE_CONST, Offset: (v.AuxInt >> 5) & 0x1fffffffff},
+ {Type: obj.TYPE_CONST, Offset: (v.AuxInt >> 0) & 0x1f},
})
case ssa.OpLOONG64ADDshiftLLV:
diff --git a/src/cmd/compile/internal/loopvar/loopvar.go b/src/cmd/compile/internal/loopvar/loopvar.go
index 5a4590d2998..267df2f905c 100644
--- a/src/cmd/compile/internal/loopvar/loopvar.go
+++ b/src/cmd/compile/internal/loopvar/loopvar.go
@@ -557,7 +557,7 @@ func LogTransformations(transformed []VarAndLoop) {
if logopt.Enabled() {
// For automated checking of coverage of this transformation, include this in the JSON information.
- var nString interface{} = n
+ var nString any = n
if inner != outer {
nString = fmt.Sprintf("%v (from inline)", n)
}
diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go
index 41eb2dce1cc..d7dd58d8caa 100644
--- a/src/cmd/compile/internal/noder/reader.go
+++ b/src/cmd/compile/internal/noder/reader.go
@@ -2961,6 +2961,7 @@ func (r *reader) multiExpr() []ir.Node {
as.Def = true
for i := range results {
tmp := r.temp(pos, r.typ())
+ tmp.Defn = as
as.PtrInit().Append(ir.NewDecl(pos, ir.ODCL, tmp))
as.Lhs.Append(tmp)
@@ -3576,7 +3577,7 @@ func unifiedInlineCall(callerfn *ir.Func, call *ir.CallExpr, fn *ir.Func, inlInd
edit(r.curfn)
})
- body := ir.Nodes(r.curfn.Body)
+ body := r.curfn.Body
// Reparent any declarations into the caller function.
for _, name := range r.curfn.Dcl {
diff --git a/src/cmd/compile/internal/noder/writer.go b/src/cmd/compile/internal/noder/writer.go
index 9c90d221c28..0b5aa007bf9 100644
--- a/src/cmd/compile/internal/noder/writer.go
+++ b/src/cmd/compile/internal/noder/writer.go
@@ -120,12 +120,12 @@ func newPkgWriter(m posMap, pkg *types2.Package, info *types2.Info, otherInfo ma
}
// errorf reports a user error about thing p.
-func (pw *pkgWriter) errorf(p poser, msg string, args ...interface{}) {
+func (pw *pkgWriter) errorf(p poser, msg string, args ...any) {
base.ErrorfAt(pw.m.pos(p), 0, msg, args...)
}
// fatalf reports an internal compiler error about thing p.
-func (pw *pkgWriter) fatalf(p poser, msg string, args ...interface{}) {
+func (pw *pkgWriter) fatalf(p poser, msg string, args ...any) {
base.FatalfAt(pw.m.pos(p), msg, args...)
}
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
index ace3024480e..f0d228559f3 100644
--- a/src/cmd/compile/internal/ppc64/ssa.go
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -631,7 +631,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
p.Reg = v.Args[0].Reg()
- p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(sh)}
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: sh}
p.AddRestSourceArgs([]obj.Addr{{Type: obj.TYPE_CONST, Offset: mb}, {Type: obj.TYPE_CONST, Offset: me}})
// Auxint holds mask
diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go
index 38b9391c5f1..324007ea798 100644
--- a/src/cmd/compile/internal/reflectdata/reflect.go
+++ b/src/cmd/compile/internal/reflectdata/reflect.go
@@ -1282,7 +1282,6 @@ func dgcptrmask(t *types.Type, write bool) *obj.LSym {
// word offsets in t that hold pointers.
// ptrmask is assumed to fit at least types.PtrDataSize(t)/PtrSize bits.
func fillptrmask(t *types.Type, ptrmask []byte) {
- clear(ptrmask)
if !t.HasPointers() {
return
}
diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go
index 61de983bb02..9aa77c3d02b 100644
--- a/src/cmd/compile/internal/riscv64/ssa.go
+++ b/src/cmd/compile/internal/riscv64/ssa.go
@@ -446,6 +446,14 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
+ case ssa.OpRISCV64FMOVDconst, ssa.OpRISCV64FMOVFconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = v.AuxFloat()
+ p.From.Name = obj.NAME_NONE
+ p.From.Reg = obj.REG_NONE
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
case ssa.OpRISCV64MOVaddr:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_ADDR
diff --git a/src/cmd/compile/internal/s390x/ggen.go b/src/cmd/compile/internal/s390x/ggen.go
index 70e40312248..c4f88e78262 100644
--- a/src/cmd/compile/internal/s390x/ggen.go
+++ b/src/cmd/compile/internal/s390x/ggen.go
@@ -11,7 +11,7 @@ import (
"cmd/internal/obj/s390x"
)
-// clearLoopCutOff is the (somewhat arbitrary) value above which it is better
+// clearLoopCutoff is the (somewhat arbitrary) value above which it is better
// to have a loop of clear instructions (e.g. XCs) rather than just generating
// multiple instructions (i.e. loop unrolling).
// Must be between 256 and 4096.
diff --git a/src/cmd/compile/internal/ssa/_gen/386.rules b/src/cmd/compile/internal/ssa/_gen/386.rules
index 5f115024192..cbe56f7579e 100644
--- a/src/cmd/compile/internal/ssa/_gen/386.rules
+++ b/src/cmd/compile/internal/ssa/_gen/386.rules
@@ -7,6 +7,7 @@
(Add(32|64)F ...) => (ADDS(S|D) ...)
(Add32carry ...) => (ADDLcarry ...)
(Add32withcarry ...) => (ADCL ...)
+(Add32carrywithcarry ...) => (ADCLcarry ...)
(Sub(Ptr|32|16|8) ...) => (SUBL ...)
(Sub(32|64)F ...) => (SUBS(S|D) ...)
diff --git a/src/cmd/compile/internal/ssa/_gen/386Ops.go b/src/cmd/compile/internal/ssa/_gen/386Ops.go
index 60599a33abb..09bfc4226ff 100644
--- a/src/cmd/compile/internal/ssa/_gen/386Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/386Ops.go
@@ -90,22 +90,23 @@ func init() {
// Common regInfo
var (
- gp01 = regInfo{inputs: nil, outputs: gponly}
- gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly}
- gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly}
- gp11sb = regInfo{inputs: []regMask{gpspsb}, outputs: gponly}
- gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
- gp11carry = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp, 0}}
- gp21carry = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}}
- gp1carry1 = regInfo{inputs: []regMask{gp}, outputs: gponly}
- gp2carry1 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
- gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly}
- gp21sb = regInfo{inputs: []regMask{gpspsb, gpsp}, outputs: gponly}
- gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}}
- gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax}, clobbers: dx}
- gp21hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax}
- gp11mod = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{dx}, clobbers: ax}
- gp21mul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}}
+ gp01 = regInfo{inputs: nil, outputs: gponly}
+ gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly}
+ gp11sb = regInfo{inputs: []regMask{gpspsb}, outputs: gponly}
+ gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp11carry = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp, 0}}
+ gp21carry = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}}
+ gp1carry1 = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp2carry1 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp2carry1carry = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}}
+ gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly}
+ gp21sb = regInfo{inputs: []regMask{gpspsb, gpsp}, outputs: gponly}
+ gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}}
+ gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax}, clobbers: dx}
+ gp21hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax}
+ gp11mod = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{dx}, clobbers: ax}
+ gp21mul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}}
gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}}
gp1flags = regInfo{inputs: []regMask{gpsp}}
@@ -181,10 +182,11 @@ func init() {
{name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1
{name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", typ: "UInt32", clobberFlags: true}, // arg0 + auxint
- {name: "ADDLcarry", argLength: 2, reg: gp21carry, asm: "ADDL", commutative: true, resultInArg0: true}, // arg0 + arg1, generates pair
- {name: "ADDLconstcarry", argLength: 1, reg: gp11carry, asm: "ADDL", aux: "Int32", resultInArg0: true}, // arg0 + auxint, generates pair
- {name: "ADCL", argLength: 3, reg: gp2carry1, asm: "ADCL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0+arg1+carry(arg2), where arg2 is flags
- {name: "ADCLconst", argLength: 2, reg: gp1carry1, asm: "ADCL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0+auxint+carry(arg1), where arg1 is flags
+ {name: "ADDLcarry", argLength: 2, reg: gp21carry, asm: "ADDL", commutative: true, resultInArg0: true}, // arg0 + arg1, generates pair
+ {name: "ADDLconstcarry", argLength: 1, reg: gp11carry, asm: "ADDL", aux: "Int32", resultInArg0: true}, // arg0 + auxint, generates pair
+ {name: "ADCL", argLength: 3, reg: gp2carry1, asm: "ADCL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0+arg1+carry(arg2), where arg2 is flags
+ {name: "ADCLcarry", argLength: 3, reg: gp2carry1carry, asm: "ADCL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0+arg1+carry(arg2), where arg2 is flags, generates pair
+ {name: "ADCLconst", argLength: 2, reg: gp1carry1, asm: "ADCL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0+auxint+carry(arg1), where arg1 is flags
{name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true}, // arg0 - arg1
{name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules
index 6191a7954af..38ca44f7eb0 100644
--- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules
@@ -162,10 +162,19 @@
(Cvt64to32F ...) => (CVTSQ2SS ...)
(Cvt64to64F ...) => (CVTSQ2SD ...)
-(Cvt32Fto32 ...) => (CVTTSS2SL ...)
-(Cvt32Fto64 ...) => (CVTTSS2SQ ...)
-(Cvt64Fto32 ...) => (CVTTSD2SL ...)
-(Cvt64Fto64 ...) => (CVTTSD2SQ ...)
+// Float, to int.
+// To make AMD64 "overflow" return max positive instead of max negative, compute
+// y and not x, smear the sign bit, and xor.
+(Cvt32Fto32 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORL y (SARLconst [31] (ANDL y:(CVTTSS2SL x) (NOTL (MOVLf2i x)))))
+(Cvt64Fto32 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORL y (SARLconst [31] (ANDL y:(CVTTSD2SL x) (NOTL (MOVLf2i (CVTSD2SS x))))))
+
+(Cvt32Fto64 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORQ y (SARQconst [63] (ANDQ y:(CVTTSS2SQ x) (NOTQ (MOVQf2i (CVTSS2SD x))) )))
+(Cvt64Fto64 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORQ y (SARQconst [63] (ANDQ y:(CVTTSD2SQ x) (NOTQ (MOVQf2i x)))))
+
+(Cvt32Fto32 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSS2SL x)
+(Cvt32Fto64 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSS2SQ x)
+(Cvt64Fto32 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSD2SL x)
+(Cvt64Fto64 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSD2SQ x)
(Cvt32Fto64F ...) => (CVTSS2SD ...)
(Cvt64Fto32F ...) => (CVTSD2SS ...)
@@ -388,20 +397,30 @@
(CondSelect x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is16BitInt(t)
=> (CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
-// If the condition does not set the flags, we need to generate a comparison.
-(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 1
- => (CondSelect x y (MOVBQZX check))
-(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 2
- => (CondSelect x y (MOVWQZX check))
-(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 4
- => (CondSelect x y (MOVLQZX check))
-
(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))
=> (CMOVQNE y x (CMPQconst [0] check))
(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)
=> (CMOVLNE y x (CMPQconst [0] check))
(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)
=> (CMOVWNE y x (CMPQconst [0] check))
+(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 4 && (is64BitInt(t) || isPtr(t))
+ => (CMOVQNE y x (CMPLconst [0] check))
+(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 4 && is32BitInt(t)
+ => (CMOVLNE y x (CMPLconst [0] check))
+(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 4 && is16BitInt(t)
+ => (CMOVWNE y x (CMPLconst [0] check))
+(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 2 && (is64BitInt(t) || isPtr(t))
+ => (CMOVQNE y x (CMPWconst [0] check))
+(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 2 && is32BitInt(t)
+ => (CMOVLNE y x (CMPWconst [0] check))
+(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 2 && is16BitInt(t)
+ => (CMOVWNE y x (CMPWconst [0] check))
+(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 1 && (is64BitInt(t) || isPtr(t))
+ => (CMOVQNE y x (CMPBconst [0] check))
+(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 1 && is32BitInt(t)
+ => (CMOVLNE y x (CMPBconst [0] check))
+(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 1 && is16BitInt(t)
+ => (CMOVWNE y x (CMPBconst [0] check))
// Absorb InvertFlags
(CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM.rules b/src/cmd/compile/internal/ssa/_gen/ARM.rules
index 18b5d6bba60..b63ca23de14 100644
--- a/src/cmd/compile/internal/ssa/_gen/ARM.rules
+++ b/src/cmd/compile/internal/ssa/_gen/ARM.rules
@@ -6,6 +6,7 @@
(Add(32|64)F ...) => (ADD(F|D) ...)
(Add32carry ...) => (ADDS ...)
(Add32withcarry ...) => (ADC ...)
+(Add32carrywithcarry ...) => (ADCS ...)
(Sub(Ptr|32|16|8) ...) => (SUB ...)
(Sub(32|64)F ...) => (SUB(F|D) ...)
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
index 43072ae9130..cc3758d1095 100644
--- a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
@@ -144,8 +144,9 @@ func init() {
gpspsbg = gpspg | buildReg("SB")
fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31")
callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ r25 = buildReg("R25")
r24to25 = buildReg("R24 R25")
- r23to25 = buildReg("R23 R24 R25")
+ f16to17 = buildReg("F16 F17")
rz = buildReg("ZERO")
first16 = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15")
)
@@ -599,8 +600,8 @@ func init() {
aux: "Int64",
argLength: 3,
reg: regInfo{
- inputs: []regMask{gp &^ r24to25, gp &^ r24to25},
- clobbers: r24to25, // TODO: figure out needIntTemp x2
+ inputs: []regMask{gp &^ r25, gp &^ r25},
+ clobbers: r25 | f16to17, // TODO: figure out needIntTemp + x2 for floats
},
faultOnNilArg0: true,
faultOnNilArg1: true,
@@ -617,8 +618,8 @@ func init() {
aux: "Int64",
argLength: 3,
reg: regInfo{
- inputs: []regMask{gp &^ r23to25, gp &^ r23to25},
- clobbers: r23to25, // TODO: figure out needIntTemp x3
+ inputs: []regMask{gp &^ r24to25, gp &^ r24to25},
+ clobbers: r24to25 | f16to17, // TODO: figure out needIntTemp x2 + x2 for floats
clobbersArg0: true,
clobbersArg1: true,
},
diff --git a/src/cmd/compile/internal/ssa/_gen/ARMOps.go b/src/cmd/compile/internal/ssa/_gen/ARMOps.go
index 01cd48835e2..59bb71b2e3c 100644
--- a/src/cmd/compile/internal/ssa/_gen/ARMOps.go
+++ b/src/cmd/compile/internal/ssa/_gen/ARMOps.go
@@ -102,36 +102,37 @@ func init() {
)
// Common regInfo
var (
- gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
- gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
- gp11carry = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp, 0}}
- gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
- gp1flags = regInfo{inputs: []regMask{gpg}}
- gp1flags1 = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}
- gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
- gp21carry = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, 0}}
- gp2flags = regInfo{inputs: []regMask{gpg, gpg}}
- gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
- gp22 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, gp}}
- gp31 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
- gp31carry = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp, 0}}
- gp3flags = regInfo{inputs: []regMask{gp, gp, gp}}
- gp3flags1 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
- gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
- gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
- gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
- gp2store = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}}
- fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
- fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
- fp1flags = regInfo{inputs: []regMask{fp}}
- fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}, clobbers: buildReg("F15")} // int-float conversion uses F15 as tmp
- gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}, clobbers: buildReg("F15")}
- fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
- fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}}
- fp2flags = regInfo{inputs: []regMask{fp, fp}}
- fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
- fpstore = regInfo{inputs: []regMask{gpspsbg, fp}}
- readflags = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11carry = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp, 0}}
+ gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
+ gp1flags = regInfo{inputs: []regMask{gpg}}
+ gp1flags1 = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
+ gp21carry = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, 0}}
+ gp2flags = regInfo{inputs: []regMask{gpg, gpg}}
+ gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
+ gp2flags1carry = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}}
+ gp22 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, gp}}
+ gp31 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
+ gp31carry = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp, 0}}
+ gp3flags = regInfo{inputs: []regMask{gp, gp, gp}}
+ gp3flags1 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
+ gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
+ gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ gp2store = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}}
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ fp1flags = regInfo{inputs: []regMask{fp}}
+ fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}, clobbers: buildReg("F15")} // int-float conversion uses F15 as tmp
+ gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}, clobbers: buildReg("F15")}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+ fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gpspsbg, fp}}
+ readflags = regInfo{inputs: nil, outputs: []regMask{gp}}
)
ops := []opData{
// binary ops
@@ -161,16 +162,17 @@ func init() {
call: false, // TODO(mdempsky): Should this be true?
},
- {name: "ADDS", argLength: 2, reg: gp21carry, asm: "ADD", commutative: true}, // arg0 + arg1, set carry flag
- {name: "ADDSconst", argLength: 1, reg: gp11carry, asm: "ADD", aux: "Int32"}, // arg0 + auxInt, set carry flag
- {name: "ADC", argLength: 3, reg: gp2flags1, asm: "ADC", commutative: true}, // arg0 + arg1 + carry, arg2=flags
- {name: "ADCconst", argLength: 2, reg: gp1flags1, asm: "ADC", aux: "Int32"}, // arg0 + auxInt + carry, arg1=flags
- {name: "SUBS", argLength: 2, reg: gp21carry, asm: "SUB"}, // arg0 - arg1, set carry flag
- {name: "SUBSconst", argLength: 1, reg: gp11carry, asm: "SUB", aux: "Int32"}, // arg0 - auxInt, set carry flag
- {name: "RSBSconst", argLength: 1, reg: gp11carry, asm: "RSB", aux: "Int32"}, // auxInt - arg0, set carry flag
- {name: "SBC", argLength: 3, reg: gp2flags1, asm: "SBC"}, // arg0 - arg1 - carry, arg2=flags
- {name: "SBCconst", argLength: 2, reg: gp1flags1, asm: "SBC", aux: "Int32"}, // arg0 - auxInt - carry, arg1=flags
- {name: "RSCconst", argLength: 2, reg: gp1flags1, asm: "RSC", aux: "Int32"}, // auxInt - arg0 - carry, arg1=flags
+ {name: "ADDS", argLength: 2, reg: gp21carry, asm: "ADD", commutative: true}, // arg0 + arg1, set carry flag
+ {name: "ADDSconst", argLength: 1, reg: gp11carry, asm: "ADD", aux: "Int32"}, // arg0 + auxInt, set carry flag
+ {name: "ADC", argLength: 3, reg: gp2flags1, asm: "ADC", commutative: true}, // arg0 + arg1 + carry, arg2=flags
+ {name: "ADCconst", argLength: 2, reg: gp1flags1, asm: "ADC", aux: "Int32"}, // arg0 + auxInt + carry, arg1=flags
+ {name: "ADCS", argLength: 3, reg: gp2flags1carry, asm: "ADC", commutative: true}, // arg0 + arg1 + carrry, sets carry
+ {name: "SUBS", argLength: 2, reg: gp21carry, asm: "SUB"}, // arg0 - arg1, set carry flag
+ {name: "SUBSconst", argLength: 1, reg: gp11carry, asm: "SUB", aux: "Int32"}, // arg0 - auxInt, set carry flag
+ {name: "RSBSconst", argLength: 1, reg: gp11carry, asm: "RSB", aux: "Int32"}, // auxInt - arg0, set carry flag
+ {name: "SBC", argLength: 3, reg: gp2flags1, asm: "SBC"}, // arg0 - arg1 - carry, arg2=flags
+ {name: "SBCconst", argLength: 2, reg: gp1flags1, asm: "SBC", aux: "Int32"}, // arg0 - auxInt - carry, arg1=flags
+ {name: "RSCconst", argLength: 2, reg: gp1flags1, asm: "RSC", aux: "Int32"}, // auxInt - arg0 - carry, arg1=flags
{name: "MULLU", argLength: 2, reg: gp22, asm: "MULLU", commutative: true}, // arg0 * arg1, high 32 bits in out0, low 32 bits in out1
{name: "MULA", argLength: 3, reg: gp31, asm: "MULA"}, // arg0 * arg1 + arg2
diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
index 287eedee374..9691296043a 100644
--- a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
@@ -611,15 +611,24 @@
(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
// register indexed load
-(MOVVload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVVloadidx ptr idx mem)
-(MOVWUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr idx mem)
-(MOVWload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx ptr idx mem)
-(MOVHUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr idx mem)
-(MOVHload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx ptr idx mem)
-(MOVBUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr idx mem)
-(MOVBload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx ptr idx mem)
-(MOVFload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVFloadidx ptr idx mem)
-(MOVDload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx ptr idx mem)
+(MOVVload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVVloadidx ptr idx mem)
+(MOVVload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVVloadidx ptr (SLLVconst [shift] idx) mem)
+(MOVWUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr idx mem)
+(MOVWUload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr (SLLVconst [shift] idx) mem)
+(MOVWload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx ptr idx mem)
+(MOVWload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx ptr (SLLVconst [shift] idx) mem)
+(MOVHUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr idx mem)
+(MOVHUload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr (SLLVconst [shift] idx) mem)
+(MOVHload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx ptr idx mem)
+(MOVHload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx ptr (SLLVconst [shift] idx) mem)
+(MOVBUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr idx mem)
+(MOVBUload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr (SLLVconst [shift] idx) mem)
+(MOVBload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx ptr idx mem)
+(MOVBload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx ptr (SLLVconst [shift] idx) mem)
+(MOVFload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVFloadidx ptr idx mem)
+(MOVFload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVFloadidx ptr (SLLVconst [shift] idx) mem)
+(MOVDload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx ptr idx mem)
+(MOVDload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx ptr (SLLVconst [shift] idx) mem)
(MOVVloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVVload [int32(c)] ptr mem)
(MOVVloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVVload [int32(c)] ptr mem)
(MOVWUloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
@@ -640,12 +649,18 @@
(MOVDloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
// register indexed store
-(MOVVstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVVstoreidx ptr idx val mem)
-(MOVWstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr idx val mem)
-(MOVHstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr idx val mem)
-(MOVBstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr idx val mem)
-(MOVFstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVFstoreidx ptr idx val mem)
-(MOVDstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr idx val mem)
+(MOVVstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVVstoreidx ptr idx val mem)
+(MOVVstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) && off == 0 && sym == nil => (MOVVstoreidx ptr (SLLVconst [shift] idx) val mem)
+(MOVWstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr idx val mem)
+(MOVWstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr (SLLVconst [shift] idx) val mem)
+(MOVHstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr idx val mem)
+(MOVHstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr (SLLVconst [shift] idx) val mem)
+(MOVBstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr idx val mem)
+(MOVBstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr (SLLVconst [shift] idx) val mem)
+(MOVFstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVFstoreidx ptr idx val mem)
+(MOVFstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) && off == 0 && sym == nil => (MOVFstoreidx ptr (SLLVconst [shift] idx) val mem)
+(MOVDstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr idx val mem)
+(MOVDstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr (SLLVconst [shift] idx) val mem)
(MOVVstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVVstore [int32(c)] ptr val mem)
(MOVVstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVVstore [int32(c)] idx val mem)
(MOVWstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVWstore [int32(c)] ptr val mem)
diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
index a85a566660e..7e8b8bf497b 100644
--- a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
@@ -143,6 +143,7 @@ func init() {
gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
gpstore2 = regInfo{inputs: []regMask{gpspsbg, gpg, gpg | rz}}
+ gpoldatom = regInfo{inputs: []regMask{gpspsbg, gpg}}
gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}}
preldreg = regInfo{inputs: []regMask{gpspg}}
@@ -431,6 +432,12 @@ func init() {
faultOnNilArg1: true,
},
+ // Atomic operations.
+ //
+ // resultNotInArgs is needed by all ops lowering to LoongArch
+ // atomic memory access instructions, because these instructions
+ // are defined to require rd != rj && rd != rk per the ISA spec.
+
// atomic loads.
// load from arg0. arg1=mem.
// returns so they can be properly ordered with other loads.
@@ -500,8 +507,8 @@ func init() {
// Atomic 32 bit AND/OR.
// *arg0 &= (|=) arg1. arg2=mem. returns nil.
- {name: "LoweredAtomicAnd32", argLength: 3, reg: gpxchg, asm: "AMANDDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
- {name: "LoweredAtomicOr32", argLength: 3, reg: gpxchg, asm: "AMORDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicAnd32", argLength: 3, reg: gpoldatom, asm: "AMANDDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicOr32", argLength: 3, reg: gpoldatom, asm: "AMORDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
// Atomic 32,64 bit AND/OR.
// *arg0 &= (|=) arg1. arg2=mem. returns . auxint must be zero.
diff --git a/src/cmd/compile/internal/ssa/_gen/MIPS.rules b/src/cmd/compile/internal/ssa/_gen/MIPS.rules
index 80bf9017f52..fe1e00a4e4c 100644
--- a/src/cmd/compile/internal/ssa/_gen/MIPS.rules
+++ b/src/cmd/compile/internal/ssa/_gen/MIPS.rules
@@ -9,6 +9,12 @@
(Select1 (Add32carry x y)) => (SGTU x (ADD x y))
(Add32withcarry x y c) => (ADD c (ADD x y))
+(Select0 (Add32carrywithcarry x y c)) => (ADD c (ADD x y))
+(Select1 (Add32carrywithcarry x y c)) =>
+ (OR
+ (SGTU x xy:(ADD x y))
+ (SGTU xy (ADD c xy)))
+
(Sub(Ptr|32|16|8) ...) => (SUB ...)
(Sub(32|64)F ...) => (SUB(F|D) ...)
diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64.rules b/src/cmd/compile/internal/ssa/_gen/PPC64.rules
index f5e381ac413..b5e8d81da2d 100644
--- a/src/cmd/compile/internal/ssa/_gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/PPC64.rules
@@ -18,7 +18,10 @@
(Max(32|64)F x y) && buildcfg.GOPPC64 >= 9 => (XSMAXJDP x y)
// Combine 64 bit integer multiply and adds
-(ADD l:(MULLD x y) z) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD x y z)
+(ADD z l:(MULLD x y)) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD x y z )
+(ADD z l:(MULLDconst [x] y)) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD (MOVDconst [int64(x)]) y z )
+(ADDconst [z] l:(MULLD x y)) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD x y (MOVDconst [int64(z)]))
+(ADDconst [z] l:(MULLDconst [x] y)) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD (MOVDconst [int64(x)]) y (MOVDconst [int64(z)]))
(Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y))
(Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
index 7059273eb2f..646948f2df2 100644
--- a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
@@ -467,8 +467,7 @@
(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
-(Const32F [val]) => (FMVSX (MOVDconst [int64(math.Float32bits(val))]))
-(Const64F [val]) => (FMVDX (MOVDconst [int64(math.Float64bits(val))]))
+(Const(64|32)F ...) => (FMOV(D|F)const ...)
(ConstNil) => (MOVDconst [0])
(ConstBool [val]) => (MOVDconst [int64(b2i(val))])
@@ -824,16 +823,28 @@
(F(MADD|NMADD|MSUB|NMSUB)D x y neg:(FNEGD z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)D x y z)
// Test for -∞ (bit 0) using 64 bit classify instruction.
-(FLTD x (FMVDX (MOVDconst [int64(math.Float64bits(-math.MaxFloat64))]))) => (ANDI [1] (FCLASSD x))
-(FLED (FMVDX (MOVDconst [int64(math.Float64bits(-math.MaxFloat64))])) x) => (SNEZ (ANDI [0xff &^ 1] (FCLASSD x)))
-(FEQD x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(-1)))]))) => (ANDI [1] (FCLASSD x))
-(FNED x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(-1)))]))) => (SEQZ (ANDI [1] (FCLASSD x)))
+(FLTD x (FMOVDconst [-math.MaxFloat64])) => (ANDI [0b00_0000_0001] (FCLASSD x))
+(FLED (FMOVDconst [-math.MaxFloat64]) x) => (SNEZ (ANDI [0b00_1111_1110] (FCLASSD x)))
+(FEQD x (FMOVDconst [math.Inf(-1)])) => (ANDI [0b00_0000_0001] (FCLASSD x))
+(FNED x (FMOVDconst [math.Inf(-1)])) => (SEQZ (ANDI [0b00_0000_0001] (FCLASSD x)))
// Test for +∞ (bit 7) using 64 bit classify instruction.
-(FLTD (FMVDX (MOVDconst [int64(math.Float64bits(math.MaxFloat64))])) x) => (SNEZ (ANDI [1<<7] (FCLASSD x)))
-(FLED x (FMVDX (MOVDconst [int64(math.Float64bits(math.MaxFloat64))]))) => (SNEZ (ANDI [0xff &^ (1<<7)] (FCLASSD x)))
-(FEQD x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(1)))]))) => (SNEZ (ANDI [1<<7] (FCLASSD x)))
-(FNED x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(1)))]))) => (SEQZ (ANDI [1<<7] (FCLASSD x)))
+(FLTD (FMOVDconst [math.MaxFloat64]) x) => (SNEZ (ANDI [0b00_1000_0000] (FCLASSD x)))
+(FLED x (FMOVDconst [math.MaxFloat64])) => (SNEZ (ANDI [0b00_0111_1111] (FCLASSD x)))
+(FEQD x (FMOVDconst [math.Inf(1)])) => (SNEZ (ANDI [0b00_1000_0000] (FCLASSD x)))
+(FNED x (FMOVDconst [math.Inf(1)])) => (SEQZ (ANDI [0b00_1000_0000] (FCLASSD x)))
+
+// Test for subnormal numbers using 64 bit classify instruction.
+(FLTD x (FMOVDconst [+0x1p-1022])) => (SNEZ (ANDI [0b00_0011_1111] (FCLASSD x)))
+(FLED (FMOVDconst [+0x1p-1022]) x) => (SNEZ (ANDI [0b00_1100_0000] (FCLASSD x)))
+(FLED x (FMOVDconst [-0x1p-1022])) => (SNEZ (ANDI [0b00_0000_0011] (FCLASSD x)))
+(FLTD (FMOVDconst [-0x1p-1022]) x) => (SNEZ (ANDI [0b00_1111_1100] (FCLASSD x)))
+
+// Absorb unary sign bit operations into 64 bit classify instruction.
+(S(EQ|NE)Z (ANDI [c] (FCLASSD (FNEGD x)))) => (S(EQ|NE)Z (ANDI [(c&0b11_0000_0000)|int64(bits.Reverse8(uint8(c))&0b1111_1111)] (FCLASSD x)))
+(S(EQ|NE)Z (ANDI [c] (FCLASSD (FABSD x)))) => (S(EQ|NE)Z (ANDI [(c&0b11_1111_0000)|int64(bits.Reverse8(uint8(c))&0b0000_1111)] (FCLASSD x)))
+(B(EQ|NE)Z (ANDI [c] (FCLASSD (FNEGD x))) yes no) => (B(EQ|NE)Z (ANDI [(c&0b11_0000_0000)|int64(bits.Reverse8(uint8(c))&0b1111_1111)] (FCLASSD x)) yes no)
+(B(EQ|NE)Z (ANDI [c] (FCLASSD (FABSD x))) yes no) => (B(EQ|NE)Z (ANDI [(c&0b11_1111_0000)|int64(bits.Reverse8(uint8(c))&0b0000_1111)] (FCLASSD x)) yes no)
//
// Optimisations for rva22u64 and above.
diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
index dc433ff9749..a0e1ab9754d 100644
--- a/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
@@ -99,7 +99,7 @@ func init() {
}
}
- // Floating pointer registers.
+ // Floating point registers.
for r := 32; r <= 63; r++ {
mask := addreg(r, "")
fpMask |= mask
@@ -132,6 +132,7 @@ func init() {
gpcas = regInfo{inputs: []regMask{gpspsbgMask, gpgMask, gpgMask}, outputs: []regMask{gpMask}}
gpatomic = regInfo{inputs: []regMask{gpspsbgMask, gpgMask}}
+ fp01 = regInfo{outputs: []regMask{fpMask}}
fp11 = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{fpMask}}
fp21 = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{fpMask}}
fp31 = regInfo{inputs: []regMask{fpMask, fpMask, fpMask}, outputs: []regMask{fpMask}}
@@ -176,7 +177,9 @@ func init() {
{name: "MOVaddr", argLength: 1, reg: gp11sb, asm: "MOV", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
// auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
- {name: "MOVDconst", reg: gp01, asm: "MOV", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint
+ {name: "MOVDconst", reg: gp01, asm: "MOV", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint
+ {name: "FMOVDconst", reg: fp01, asm: "MOVD", typ: "Float64", aux: "Float64", rematerializeable: true}, // auxint
+ {name: "FMOVFconst", reg: fp01, asm: "MOVF", typ: "Float32", aux: "Float32", rematerializeable: true}, // auxint
// Loads: load bits from arg0+auxint+aux and extend to 64 bits; arg1=mem
{name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // 8 bits, sign extend
diff --git a/src/cmd/compile/internal/ssa/_gen/Wasm.rules b/src/cmd/compile/internal/ssa/_gen/Wasm.rules
index f632a01109f..60281522539 100644
--- a/src/cmd/compile/internal/ssa/_gen/Wasm.rules
+++ b/src/cmd/compile/internal/ssa/_gen/Wasm.rules
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+(Last ___) => v.Args[len(v.Args)-1]
+
// Lowering arithmetic
(Add(64|32|16|8|Ptr) ...) => (I64Add ...)
(Add(64|32)F ...) => (F(64|32)Add ...)
@@ -44,6 +46,37 @@
(Not ...) => (I64Eqz ...)
+(Avg64u x y) => (I64Add (I64ShrU (I64Sub x y) (I64Const [1])) y)
+
+// High word of multiply without carry bits; see Hacker's Delight, 2nd. ed, Figure 8-2, p. 174.
+(Hmul64 x y) =>
+ (Last
+ x0: (ZeroExt32to64 x)
+ x1: (I64ShrS x (I64Const [32]))
+ y0: (ZeroExt32to64 y)
+ y1: (I64ShrS y (I64Const [32]))
+ x0y0: (I64Mul x0 y0)
+ tt: (I64Add (I64Mul x1 y0) (I64ShrU x0y0 (I64Const [32])))
+ w1: (I64Add (I64Mul x0 y1) (ZeroExt32to64 tt))
+ w2: (I64ShrS tt (I64Const [32]))
+ (I64Add (I64Add (I64Mul x1 y1) w2) (I64ShrS w1 (I64Const [32]))))
+
+// Same as Hmul64 but signed shifts now unsigned.
+(Hmul64u x y) =>
+ (Last
+ x0: (ZeroExt32to64 x)
+ x1: (I64ShrU x (I64Const [32]))
+ y0: (ZeroExt32to64 y)
+ y1: (I64ShrU y (I64Const [32]))
+ w0: (I64Mul x0 y0)
+ tt: (I64Add (I64Mul x1 y0) (I64ShrU w0 (I64Const [32])))
+ w1: (I64Add (I64Mul x0 y1) (ZeroExt32to64 tt))
+ w2: (I64ShrU tt (I64Const [32]))
+ hi: (I64Add (I64Add (I64Mul x1 y1) w2) (I64ShrU w1 (I64Const [32]))))
+
+(Select0 (Mul64uhilo x y)) => (Hmul64u x y)
+(Select1 (Mul64uhilo x y)) => (I64Mul x y)
+
// Lowering pointer arithmetic
(OffPtr ...) => (I64AddConst ...)
diff --git a/src/cmd/compile/internal/ssa/_gen/allocators.go b/src/cmd/compile/internal/ssa/_gen/allocators.go
index 38acc5133ab..246fe98a21d 100644
--- a/src/cmd/compile/internal/ssa/_gen/allocators.go
+++ b/src/cmd/compile/internal/ssa/_gen/allocators.go
@@ -122,6 +122,11 @@ func genAllocators() {
typ: "[]ID",
base: "LimitSlice",
},
+ {
+ name: "UintSlice",
+ typ: "[]uint",
+ base: "LimitSlice",
+ },
}
w := new(bytes.Buffer)
diff --git a/src/cmd/compile/internal/ssa/_gen/dec.rules b/src/cmd/compile/internal/ssa/_gen/dec.rules
index 5309a7f6b49..9f6dc369759 100644
--- a/src/cmd/compile/internal/ssa/_gen/dec.rules
+++ b/src/cmd/compile/internal/ssa/_gen/dec.rules
@@ -4,7 +4,7 @@
// This file contains rules to decompose builtin compound types
// (complex,string,slice,interface) into their constituent
-// types. These rules work together with the decomposeBuiltIn
+// types. These rules work together with the decomposeBuiltin
// pass which handles phis of these types.
(Store {t} _ _ mem) && t.Size() == 0 => mem
diff --git a/src/cmd/compile/internal/ssa/_gen/dec64.rules b/src/cmd/compile/internal/ssa/_gen/dec64.rules
index ba776af1a70..483818906e6 100644
--- a/src/cmd/compile/internal/ssa/_gen/dec64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/dec64.rules
@@ -3,11 +3,15 @@
// license that can be found in the LICENSE file.
// This file contains rules to decompose [u]int64 types on 32-bit
-// architectures. These rules work together with the decomposeBuiltIn
+// architectures. These rules work together with the decomposeBuiltin
// pass which handles phis of these typ.
+(Last ___) => v.Args[len(v.Args)-1]
+
(Int64Hi (Int64Make hi _)) => hi
(Int64Lo (Int64Make _ lo)) => lo
+(Select0 (MakeTuple x y)) => x
+(Select1 (MakeTuple x y)) => y
(Load ptr mem) && is64BitInt(t) && !config.BigEndian && t.IsSigned() =>
(Int64Make
@@ -60,30 +64,85 @@
(Arg {n} [off])
(Arg {n} [off+4]))
-(Add64 x y) =>
- (Int64Make
- (Add32withcarry
- (Int64Hi x)
- (Int64Hi y)
- (Select1 (Add32carry (Int64Lo x) (Int64Lo y))))
- (Select0 (Add32carry (Int64Lo x) (Int64Lo y))))
+(Add64 x y) =>
+ (Last
+ x0: (Int64Lo x)
+ x1: (Int64Hi x)
+ y0: (Int64Lo y)
+ y1: (Int64Hi y)
+ add: (Add32carry x0 y0)
+ (Int64Make
+ (Add32withcarry x1 y1 (Select1 add))
+ (Select0 add)))
-(Sub64 x y) =>
- (Int64Make
- (Sub32withcarry
- (Int64Hi x)
- (Int64Hi y)
- (Select1 (Sub32carry (Int64Lo x) (Int64Lo y))))
- (Select0 (Sub32carry (Int64Lo x) (Int64Lo y))))
+(Sub64 x y) =>
+ (Last
+ x0: (Int64Lo x)
+ x1: (Int64Hi x)
+ y0: (Int64Lo y)
+ y1: (Int64Hi y)
+ sub: (Sub32carry x0 y0)
+ (Int64Make
+ (Sub32withcarry x1 y1 (Select1 sub))
+ (Select0 sub)))
+
+(Mul64 x y) =>
+ (Last
+ x0: (Int64Lo x)
+ x1: (Int64Hi x)
+ y0: (Int64Lo y)
+ y1: (Int64Hi y)
+ x0y0: (Mul32uhilo x0 y0)
+ x0y0Hi: (Select0 x0y0)
+ x0y0Lo: (Select1 x0y0)
+ (Int64Make
+ (Add32 x0y0Hi
+ (Add32
+ (Mul32 x0 y1)
+ (Mul32 x1 y0)))
+ x0y0Lo))
+
+(Mul64uhilo x y) =>
+ (Last
+ x0: (Int64Lo x)
+ x1: (Int64Hi x)
+ y0: (Int64Lo y)
+ y1: (Int64Hi y)
+ x0y0: (Mul32uhilo x0 y0)
+ x0y1: (Mul32uhilo x0 y1)
+ x1y0: (Mul32uhilo x1 y0)
+ x1y1: (Mul32uhilo x1 y1)
+ x0y0Hi: (Select0 x0y0)
+ x0y0Lo: (Select1 x0y0)
+ x0y1Hi: (Select0 x0y1)
+ x0y1Lo: (Select1 x0y1)
+ x1y0Hi: (Select0 x1y0)
+ x1y0Lo: (Select1 x1y0)
+ x1y1Hi: (Select0 x1y1)
+ x1y1Lo: (Select1 x1y1)
+ w1a: (Add32carry x0y0Hi x0y1Lo)
+ w2a: (Add32carrywithcarry x0y1Hi x1y0Hi (Select1 w1a))
+ w3a: (Add32withcarry x1y1Hi (Const32 [0]) (Select1 w2a))
+ w1b: (Add32carry x1y0Lo (Select0 w1a))
+ w2b: (Add32carrywithcarry x1y1Lo (Select0 w2a) (Select1 w1b))
+ w3b: (Add32withcarry w3a (Const32 [0]) (Select1 w2b))
+ (MakeTuple
+ (Int64Make w3b (Select0 w2b))
+ (Int64Make (Select0 w1b) x0y0Lo)))
+
+(Hmul64u x y) => (Select0 (Mul64uhilo x y))
+
+// Hacker's Delight p. 175: signed hmul = unsigned hmul - (x<0)&y - (y<0)&x.
+(Hmul64 x y) =>
+ (Last
+ p: (Hmul64u x y)
+ xSign: (Int64Make xs:(Rsh32x32 (Int64Hi x) (Const32 [31])) xs)
+ ySign: (Int64Make ys:(Rsh32x32 (Int64Hi y) (Const32 [31])) ys)
+ (Sub64 (Sub64 p (And64 xSign y)) (And64 ySign x)))
+
+// (x+y)/2 => (x-y)/2 + y
+(Avg64u x y) => (Add64 (Rsh64Ux32 (Sub64 x y) (Const32 [1])) y)
-(Mul64 x y) =>
- (Int64Make
- (Add32
- (Mul32 (Int64Lo x) (Int64Hi y))
- (Add32
- (Mul32 (Int64Hi x) (Int64Lo y))
- (Select0 (Mul32uhilo (Int64Lo x) (Int64Lo y)))))
- (Select1 (Mul32uhilo (Int64Lo x) (Int64Lo y))))
(And64 x y) =>
(Int64Make
@@ -217,11 +276,32 @@
(Rsh8x64 x y) => (Rsh8x32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y)))
(Rsh8Ux64 x y) => (Rsh8Ux32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y)))
+
(RotateLeft64 x (Int64Make hi lo)) => (RotateLeft64 x lo)
(RotateLeft32 x (Int64Make hi lo)) => (RotateLeft32 x lo)
(RotateLeft16 x (Int64Make hi lo)) => (RotateLeft16 x lo)
(RotateLeft8 x (Int64Make hi lo)) => (RotateLeft8 x lo)
+// RotateLeft64 by constant, for use in divmod.
+(RotateLeft64 x (Const(64|32|16|8) [c])) && c&63 == 0 => x
+(RotateLeft64 x (Const(64|32|16|8) [c])) && c&63 == 32 => (Int64Make (Int64Lo x) (Int64Hi x))
+(RotateLeft64 x (Const(64|32|16|8) [c])) && 0 < c&63 && c&63 < 32 =>
+ (Int64Make
+ (Or32
+ (Lsh32x32 (Int64Hi x) (Const32 [int32(c&31)]))
+ (Rsh32Ux32 (Int64Lo x) (Const32 [int32(32-c&31)])))
+ (Or32
+ (Lsh32x32 (Int64Lo x) (Const32 [int32(c&31)]))
+ (Rsh32Ux32 (Int64Hi x) (Const32 [int32(32-c&31)]))))
+(RotateLeft64 x (Const(64|32|16|8) [c])) && 32 < c&63 && c&63 < 64 =>
+ (Int64Make
+ (Or32
+ (Lsh32x32 (Int64Lo x) (Const32 [int32(c&31)]))
+ (Rsh32Ux32 (Int64Hi x) (Const32 [int32(32-c&31)])))
+ (Or32
+ (Lsh32x32 (Int64Hi x) (Const32 [int32(c&31)]))
+ (Rsh32Ux32 (Int64Lo x) (Const32 [int32(32-c&31)]))))
+
// Clean up constants a little
(Or32 (Zeromask (Const32 [c])) y) && c == 0 => y
(Or32 (Zeromask (Const32 [c])) y) && c != 0 => (Const32 [-1])
diff --git a/src/cmd/compile/internal/ssa/_gen/divisible.rules b/src/cmd/compile/internal/ssa/_gen/divisible.rules
new file mode 100644
index 00000000000..8c198838267
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/divisible.rules
@@ -0,0 +1,167 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Divisibility checks (x%c == 0 or x%c != 0) convert to multiply, rotate, compare.
+// The opt pass rewrote x%c to x-(x/c)*c
+// and then also rewrote x-(x/c)*c == 0 to x == (x/c)*c.
+// If x/c is being used for a division already (div.Uses != 1)
+// then we leave the expression alone.
+//
+// See ../magic.go for a detailed description of these algorithms.
+// See test/codegen/divmod.go for tests.
+// See divmod.rules for other division rules that run after these.
+
+// Divisiblity by unsigned or signed power of two.
+(Eq(8|16|32|64) x (Mul(8|16|32|64) (Div(8|16|32|64)u x (Const(8|16|32|64) [c])) (Const(8|16|32|64) [c])))
+ && x.Op != OpConst64 && isPowerOfTwo(c) =>
+ (Eq(8|16|32|64) (And(8|16|32|64) x (Const(8|16|32|64) [c-1])) (Const(8|16|32|64) [0]))
+(Eq(8|16|32|64) x (Mul(8|16|32|64) (Div(8|16|32|64) x (Const(8|16|32|64) [c])) (Const(8|16|32|64) [c])))
+ && x.Op != OpConst64 && isPowerOfTwo(c) =>
+ (Eq(8|16|32|64) (And(8|16|32|64) x (Const(8|16|32|64) [c-1])) (Const(8|16|32|64) [0]))
+(Neq(8|16|32|64) x (Mul(8|16|32|64) (Div(8|16|32|64)u x (Const(8|16|32|64) [c])) (Const(8|16|32|64) [c])))
+ && x.Op != OpConst64 && isPowerOfTwo(c) =>
+ (Neq(8|16|32|64) (And(8|16|32|64) x (Const(8|16|32|64) [c-1])) (Const(8|16|32|64) [0]))
+(Neq(8|16|32|64) x (Mul(8|16|32|64) (Div(8|16|32|64) x (Const(8|16|32|64) [c])) (Const(8|16|32|64) [c])))
+ && x.Op != OpConst64 && isPowerOfTwo(c) =>
+ (Neq(8|16|32|64) (And(8|16|32|64) x (Const(8|16|32|64) [c-1])) (Const(8|16|32|64) [0]))
+
+// Divisiblity by unsigned.
+(Eq8 x (Mul8 div:(Div8u x (Const8 [c])) (Const8 [c])))
+ && div.Uses == 1
+ && x.Op != OpConst8 && udivisibleOK8(c) =>
+ (Leq8U
+ (RotateLeft8
+ (Mul8 x (Const8 [int8(udivisible8(c).m)]))
+ (Const8